hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4f6ddffd0d8887b15495fbbd9aa9c35fb69c4ace.hip
// !!! This is a file automatically generated by hipify!!! #include <cudf/cudf.h> #include <rmm/rmm.h> #include <utilities/cudf_utils.h> #include <utilities/error_utils.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cudf/utilities/legacy/wrapper_types.hpp> #include <cub/device/device_segmented_radix_sort.cuh> struct SegmentedRadixSortPlan{ const gdf_size_type num_items; // temporary storage void *storage; size_t storage_bytes; void *back_key, *back_val; size_t back_key_size, back_val_size; hipStream_t stream; int descending; unsigned begin_bit, end_bit; SegmentedRadixSortPlan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) : num_items(num_items), storage(nullptr), storage_bytes(0), back_key(nullptr), back_val(nullptr), back_key_size(0), back_val_size(0), stream(0), descending(descending), begin_bit(begin_bit), end_bit(end_bit) {} gdf_error setup(size_t sizeof_key, size_t sizeof_val) { back_key_size = num_items * sizeof_key; back_val_size = num_items * sizeof_val; RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) ); return GDF_SUCCESS; } gdf_error teardown() { RMM_TRY(RMM_FREE(back_key, stream)); RMM_TRY(RMM_FREE(back_val, stream)); RMM_TRY(RMM_FREE(storage, stream)); return GDF_SUCCESS; } }; template <typename Tk, typename Tv> struct SegmentedRadixSort { static gdf_error sort( SegmentedRadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { unsigned num_items = plan->num_items; Tk *d_key_alt_buf = (Tk*)plan->back_key; Tv *d_value_alt_buf = (Tv*)plan->back_val; hipStream_t stream = plan->stream; int descending = plan->descending; unsigned begin_bit = plan->begin_bit; unsigned end_bit = plan->end_bit; cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf); typedef hipcub::DeviceSegmentedRadixSort Sorter; if (d_value_buf) { // Sort KeyValue pairs cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf); if (descending) { Sorter::SortPairsDescending(plan->storage, plan->storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream); } else { Sorter::SortPairs( plan->storage, plan->storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); if (plan->storage && d_value_buf != d_values.Current()){ hipMemcpyAsync(d_value_buf, d_value_alt_buf, num_items * sizeof(Tv), hipMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // Sort Keys only if (descending) { Sorter::SortKeysDescending( plan->storage, plan->storage_bytes, d_keys, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); CUDA_CHECK_LAST() } else { Sorter::SortKeys( plan->storage, plan->storage_bytes, d_keys, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); } if ( plan->storage ) { // We have operated and the result is not in front buffer if (d_key_buf != d_keys.Current()){ hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk), hipMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // We have not operated. // Just checking for temporary storage requirement RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream CUDA_CHECK_LAST(); // Now that we have allocated, do real work. return sort(plan, d_key_buf, d_value_buf, num_segments, d_begin_offsets, d_end_offsets); } return GDF_SUCCESS; } }; gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){ return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj); } SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){ return reinterpret_cast<SegmentedRadixSortPlan*>(hdl); } gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan( size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) { return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending, begin_bit, end_bit)); } gdf_error gdf_segmented_radixsort_plan_setup( gdf_segmented_radixsort_plan_type *hdl, size_t sizeof_key, size_t sizeof_val) { return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val); } gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl) { auto plan = cffi_unwrap(hdl); gdf_error status = plan->teardown(); delete plan; return status; } template <typename Tv> struct gdf_segmented_radixsort_functor { template <typename Tk> gdf_error operator()( gdf_segmented_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { /* validity mask must be empty */ GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED); /* size of columns must match */ GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); SegmentedRadixSortPlan *plan = cffi_unwrap(hdl); /* num_items must match */ GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); /* back buffer size must match */ GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, GDF_COLUMN_SIZE_MISMATCH); /* Do sort */ return SegmentedRadixSort<Tk, Tv>::sort(plan, (Tk*)keycol->data, (Tv*)valcol->data, num_segments, d_begin_offsets, d_end_offsets); } }; gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE); return cudf::type_dispatcher(keycol->dtype, gdf_segmented_radixsort_functor<int64_t>{}, hdl, keycol, valcol, num_segments, d_begin_offsets, d_end_offsets); }
4f6ddffd0d8887b15495fbbd9aa9c35fb69c4ace.cu
#include <cudf/cudf.h> #include <rmm/rmm.h> #include <utilities/cudf_utils.h> #include <utilities/error_utils.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <cudf/utilities/legacy/wrapper_types.hpp> #include <cub/device/device_segmented_radix_sort.cuh> struct SegmentedRadixSortPlan{ const gdf_size_type num_items; // temporary storage void *storage; size_t storage_bytes; void *back_key, *back_val; size_t back_key_size, back_val_size; cudaStream_t stream; int descending; unsigned begin_bit, end_bit; SegmentedRadixSortPlan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) : num_items(num_items), storage(nullptr), storage_bytes(0), back_key(nullptr), back_val(nullptr), back_key_size(0), back_val_size(0), stream(0), descending(descending), begin_bit(begin_bit), end_bit(end_bit) {} gdf_error setup(size_t sizeof_key, size_t sizeof_val) { back_key_size = num_items * sizeof_key; back_val_size = num_items * sizeof_val; RMM_TRY( RMM_ALLOC(&back_key, back_key_size, stream) ); // TODO: non-default stream RMM_TRY( RMM_ALLOC(&back_val, back_val_size, stream) ); return GDF_SUCCESS; } gdf_error teardown() { RMM_TRY(RMM_FREE(back_key, stream)); RMM_TRY(RMM_FREE(back_val, stream)); RMM_TRY(RMM_FREE(storage, stream)); return GDF_SUCCESS; } }; template <typename Tk, typename Tv> struct SegmentedRadixSort { static gdf_error sort( SegmentedRadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { unsigned num_items = plan->num_items; Tk *d_key_alt_buf = (Tk*)plan->back_key; Tv *d_value_alt_buf = (Tv*)plan->back_val; cudaStream_t stream = plan->stream; int descending = plan->descending; unsigned begin_bit = plan->begin_bit; unsigned end_bit = plan->end_bit; cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf); typedef cub::DeviceSegmentedRadixSort Sorter; if (d_value_buf) { // Sort KeyValue pairs cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf); if (descending) { Sorter::SortPairsDescending(plan->storage, plan->storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream); } else { Sorter::SortPairs( plan->storage, plan->storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); if (plan->storage && d_value_buf != d_values.Current()){ cudaMemcpyAsync(d_value_buf, d_value_alt_buf, num_items * sizeof(Tv), cudaMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // Sort Keys only if (descending) { Sorter::SortKeysDescending( plan->storage, plan->storage_bytes, d_keys, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); CUDA_CHECK_LAST() } else { Sorter::SortKeys( plan->storage, plan->storage_bytes, d_keys, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); } if ( plan->storage ) { // We have operated and the result is not in front buffer if (d_key_buf != d_keys.Current()){ cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk), cudaMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // We have not operated. // Just checking for temporary storage requirement RMM_TRY( RMM_ALLOC(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream CUDA_CHECK_LAST(); // Now that we have allocated, do real work. return sort(plan, d_key_buf, d_value_buf, num_segments, d_begin_offsets, d_end_offsets); } return GDF_SUCCESS; } }; gdf_segmented_radixsort_plan_type* cffi_wrap(SegmentedRadixSortPlan* obj){ return reinterpret_cast<gdf_segmented_radixsort_plan_type*>(obj); } SegmentedRadixSortPlan* cffi_unwrap(gdf_segmented_radixsort_plan_type* hdl){ return reinterpret_cast<SegmentedRadixSortPlan*>(hdl); } gdf_segmented_radixsort_plan_type* gdf_segmented_radixsort_plan( size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) { return cffi_wrap(new SegmentedRadixSortPlan(num_items, descending, begin_bit, end_bit)); } gdf_error gdf_segmented_radixsort_plan_setup( gdf_segmented_radixsort_plan_type *hdl, size_t sizeof_key, size_t sizeof_val) { return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val); } gdf_error gdf_segmented_radixsort_plan_free(gdf_segmented_radixsort_plan_type *hdl) { auto plan = cffi_unwrap(hdl); gdf_error status = plan->teardown(); delete plan; return status; } template <typename Tv> struct gdf_segmented_radixsort_functor { template <typename Tk> gdf_error operator()( gdf_segmented_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { /* validity mask must be empty */ GDF_REQUIRE(!keycol->valid || !keycol->null_count, GDF_VALIDITY_UNSUPPORTED); GDF_REQUIRE(!valcol->valid || !valcol->null_count, GDF_VALIDITY_UNSUPPORTED); /* size of columns must match */ GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); SegmentedRadixSortPlan *plan = cffi_unwrap(hdl); /* num_items must match */ GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); /* back buffer size must match */ GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, GDF_COLUMN_SIZE_MISMATCH); GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, GDF_COLUMN_SIZE_MISMATCH); /* Do sort */ return SegmentedRadixSort<Tk, Tv>::sort(plan, (Tk*)keycol->data, (Tv*)valcol->data, num_segments, d_begin_offsets, d_end_offsets); } }; gdf_error gdf_segmented_radixsort(gdf_segmented_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol, unsigned num_segments, unsigned *d_begin_offsets, unsigned *d_end_offsets) { GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE); return cudf::type_dispatcher(keycol->dtype, gdf_segmented_radixsort_functor<int64_t>{}, hdl, keycol, valcol, num_segments, d_begin_offsets, d_end_offsets); }
84519e8bf19da0b07f4029343d4161f5a9e85393.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by kindr on 2021/4/29. // #include "zeroCopyMemory.cuh" #include "../../common/utils.cuh" #include <cstdio> #include <vector> __global__ void addOne(float *vec, size_t N) { unsigned idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) vec[idx] = vec[idx] + 1.f; } void zeroCopyMemory(size_t nElement, size_t nThread) { float *vec; size_t nBytes = nElement * sizeof(float); hipHostMalloc(&vec, nBytes, hipHostMallocMapped); CHECK(hipGetLastError()); memset(vec, 0, nBytes); size_t nBlock = (nElement + nThread - 1) / nThread; hipLaunchKernelGGL(( addOne), dim3(nBlock), dim3(nThread), 0, 0, vec, nElement); hipDeviceSynchronize(); CHECK(hipGetLastError()); bool isSame = true; for (size_t i = 0; i < nElement; ++i) { if (vec[i] != 1.f) { isSame = false; } } printf("isSame?: %s", isSame ? "true" : "false"); hipHostFree(vec); }
84519e8bf19da0b07f4029343d4161f5a9e85393.cu
// // Created by kindr on 2021/4/29. // #include "zeroCopyMemory.cuh" #include "../../common/utils.cuh" #include <cstdio> #include <vector> __global__ void addOne(float *vec, size_t N) { unsigned idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) vec[idx] = vec[idx] + 1.f; } void zeroCopyMemory(size_t nElement, size_t nThread) { float *vec; size_t nBytes = nElement * sizeof(float); cudaHostAlloc(&vec, nBytes, cudaHostAllocMapped); CHECK(cudaGetLastError()); memset(vec, 0, nBytes); size_t nBlock = (nElement + nThread - 1) / nThread; addOne<<<nBlock, nThread>>>(vec, nElement); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); bool isSame = true; for (size_t i = 0; i < nElement; ++i) { if (vec[i] != 1.f) { isSame = false; } } printf("isSame?: %s", isSame ? "true" : "false"); cudaFreeHost(vec); }
53bb89dbd7b8ead4e9575360c8dec8e5d61f7e1e.hip
// !!! This is a file automatically generated by hipify!!! #include "luaT.h" #include "THH.h" #include "hip/hip_runtime.h" #include "aux.cuh" #include <thrust/transform.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit //no-overlap __global__ void output_kernel(float *input, float *output, float *output_dx, float *output_dy, int input_n, int input_h, int input_w, int output_h, int output_w, int kH, int kW){ float* ptr_input_plane = input + blockIdx.x * input_w * input_h; float* ptr_output_plane = output + blockIdx.x * output_w * output_h; float* ptr_output_plane_dx = output_dx + blockIdx.x * output_w * output_h; float* ptr_output_plane_dy = output_dy + blockIdx.x * output_w * output_h; int xout = threadIdx.x; int yout = threadIdx.y; const int xout_step = blockDim.x; const int yout_step = blockDim.y; int xin_start = threadIdx.x * kW; int yin_start = threadIdx.y * kH; const int xin_step = blockDim.x * kW; const int yin_step = blockDim.y * kH; int xin_end = (input_w/kW) * kW; //TODO could this be right? int yin_end = (input_h/kH) * kH; for (int yin = yin_start; yin < yin_end; yin += yin_step){ for (int xin = xin_start; xin < xin_end; xin += xin_step){ float* ptr_input = ptr_input_plane + xin + yin * input_w; float* ptr_output = ptr_output_plane + xout + yout * output_w; float* ptr_output_dx = ptr_output_plane_dx + xout + yout * output_w; float* ptr_output_dy = ptr_output_plane_dy + xout + yout * output_w; float poolMax = 0; float dx = 0; float dy = 0; if (xout < output_w && yout < output_h){ for (int ky = 0; ky < kH && yin + ky < input_h; ky++){ for (int kx = 0; kx < kW && xin + kx < input_w; kx++){ float* ptr_input_pool = ptr_input + kx + ky * input_w; if (*ptr_input_pool > poolMax){ poolMax = *ptr_input_pool; dx = kx; dy = ky; } } } *ptr_output = poolMax; *ptr_output_dx = dx + 1; *ptr_output_dy = dy + 1; } // endif xout += xout_step; } //end for xin yout += yout_step; } //end for yin } __global__ void gradInput_kernel(float* gradInput, float* gradOutput, float* output_dx, float* output_dy, int input_n, int input_h, int input_w, int output_h, int output_w, int kH, int kW){ float* ptr_gradInput_plane = gradInput + blockIdx.x * input_w * input_h; float* ptr_gradOutput_plane = gradOutput + blockIdx.x * output_w * output_h; float* ptr_output_plane_dx = output_dx + blockIdx.x * output_w * output_h; float* ptr_output_plane_dy = output_dy + blockIdx.x * output_w * output_h; int xout = threadIdx.x; int yout = threadIdx.y; const int xout_step = blockDim.x; const int yout_step = blockDim.y; int xin_start = threadIdx.x * kW; int yin_start = threadIdx.y * kH; const int xin_step = blockDim.x * kW; const int yin_step = blockDim.y * kH; int xin_end = (input_w/kW) * kW; //TODO could this be right? int yin_end = (input_h/kH) * kH; for (int yin = yin_start; yin < yin_end; yin += yin_step){ for (int xin = xin_start; xin < xin_end; xin += xin_step){ float* ptr_gradInput = ptr_gradInput_plane + xin + yin * input_w; float* ptr_gradOutput = ptr_gradOutput_plane + xout + yout * output_w; float* ptr_output_dx = ptr_output_plane_dx + xout + yout * output_w; float* ptr_output_dy = ptr_output_plane_dy + xout + yout * output_w; for (int ky = 0; ky < kH && yin + ky < input_h; ky++){ for (int kx = 0; kx < kW && xin + kx < input_w; kx++){ float* ptr_gradInput_pool = ptr_gradInput + kx + ky * input_w; if(kx == *ptr_output_dx-1 && ky == *ptr_output_dy-1) *ptr_gradInput_pool = *ptr_gradOutput; else *ptr_gradInput_pool = 0; } // end for kx } // end for ky xout += xout_step; } // end for xin yout += yout_step; } // end for yin } static int cunn_SpatialMaxPoolingPos_updateOutput(lua_State *L){ THCState* state = getCutorchState(L); THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor* output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_p", "torch.CudaTensor"); THCudaTensor* dx = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dx", "torch.CudaTensor"); THCudaTensor* dy = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dy", "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); float* output_data; float* output_dx; float* output_dy; float* input_data; long nInputCols = input -> size[3]; long nInputRows = input -> size[2]; long nInputPlane = input -> size[1]; long nBatch = input -> size[0]; long nOutputCols = nInputCols / kW; long nOutputRows = nInputRows / kH; luaL_argcheck(L, input->size[1] == nInputPlane, 2, "invalid number of input planes"); luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nBatch, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resize4d(state, dx, nBatch, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resize4d(state, dy, nBatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); output_dx = THCudaTensor_data(state, dx); output_dy = THCudaTensor_data(state, dy); dim3 blocks(nInputPlane*nBatch, 1); dim3 threads(32,8); hipLaunchKernelGGL(( output_kernel) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, output_dx, output_dy, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW); THCudaTensor_free(state, input); hipError_t err = hipGetLastError(); if (err != hipSuccess){ printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_SpatialMaxPoolingPos_updateGradInput(lua_State *L){ THCState* state = getCutorchState(L); THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor* gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor* output_dx = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dx", "torch.CudaTensor"); THCudaTensor* output_dy = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dy", "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); float* gradInput_data; float* gradOutput_data; float* output_dx_data; float* output_dy_data; long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; long nOutputCols = gradOutput->size[3]; long nOutputRows = gradOutput->size[2]; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); output_dx = THCudaTensor_newContiguous(state, output_dx); output_dy = THCudaTensor_newContiguous(state, output_dy); gradOutput = THCudaTensor_newContiguous(state, gradOutput); gradOutput_data = THCudaTensor_data(state, gradOutput); output_dx_data = THCudaTensor_data(state, output_dx); output_dy_data = THCudaTensor_data(state, output_dy); gradInput_data = THCudaTensor_data(state, gradInput); dim3 blocks(nInputPlane*nbatch, 1); dim3 threads(32,8); hipLaunchKernelGGL(( gradInput_kernel), dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, output_dx_data, output_dy_data, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW); hipError_t err = hipGetLastError(); if (err != hipSuccess){ printf("error in SSMPoolingOffsets_updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, output_dx); THCudaTensor_free(state, output_dy); return 1; } static const struct luaL_Reg cunn_SpatialMaxPoolingPos__ [] = { {"SpatialMaxPoolingPos_updateOutput", cunn_SpatialMaxPoolingPos_updateOutput}, {"SpatialMaxPoolingPos_updateGradInput", cunn_SpatialMaxPoolingPos_updateGradInput}, {NULL, NULL} }; void cunn_SpatialMaxPoolingPos_init(lua_State* L){ luaL_openlib(L, "jz", cunn_SpatialMaxPoolingPos__, 0); }
53bb89dbd7b8ead4e9575360c8dec8e5d61f7e1e.cu
#include "luaT.h" #include "THC.h" #include "cuda.h" #include "aux.cuh" #include <thrust/transform.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit //no-overlap __global__ void output_kernel(float *input, float *output, float *output_dx, float *output_dy, int input_n, int input_h, int input_w, int output_h, int output_w, int kH, int kW){ float* ptr_input_plane = input + blockIdx.x * input_w * input_h; float* ptr_output_plane = output + blockIdx.x * output_w * output_h; float* ptr_output_plane_dx = output_dx + blockIdx.x * output_w * output_h; float* ptr_output_plane_dy = output_dy + blockIdx.x * output_w * output_h; int xout = threadIdx.x; int yout = threadIdx.y; const int xout_step = blockDim.x; const int yout_step = blockDim.y; int xin_start = threadIdx.x * kW; int yin_start = threadIdx.y * kH; const int xin_step = blockDim.x * kW; const int yin_step = blockDim.y * kH; int xin_end = (input_w/kW) * kW; //TODO could this be right? int yin_end = (input_h/kH) * kH; for (int yin = yin_start; yin < yin_end; yin += yin_step){ for (int xin = xin_start; xin < xin_end; xin += xin_step){ float* ptr_input = ptr_input_plane + xin + yin * input_w; float* ptr_output = ptr_output_plane + xout + yout * output_w; float* ptr_output_dx = ptr_output_plane_dx + xout + yout * output_w; float* ptr_output_dy = ptr_output_plane_dy + xout + yout * output_w; float poolMax = 0; float dx = 0; float dy = 0; if (xout < output_w && yout < output_h){ for (int ky = 0; ky < kH && yin + ky < input_h; ky++){ for (int kx = 0; kx < kW && xin + kx < input_w; kx++){ float* ptr_input_pool = ptr_input + kx + ky * input_w; if (*ptr_input_pool > poolMax){ poolMax = *ptr_input_pool; dx = kx; dy = ky; } } } *ptr_output = poolMax; *ptr_output_dx = dx + 1; *ptr_output_dy = dy + 1; } // endif xout += xout_step; } //end for xin yout += yout_step; } //end for yin } __global__ void gradInput_kernel(float* gradInput, float* gradOutput, float* output_dx, float* output_dy, int input_n, int input_h, int input_w, int output_h, int output_w, int kH, int kW){ float* ptr_gradInput_plane = gradInput + blockIdx.x * input_w * input_h; float* ptr_gradOutput_plane = gradOutput + blockIdx.x * output_w * output_h; float* ptr_output_plane_dx = output_dx + blockIdx.x * output_w * output_h; float* ptr_output_plane_dy = output_dy + blockIdx.x * output_w * output_h; int xout = threadIdx.x; int yout = threadIdx.y; const int xout_step = blockDim.x; const int yout_step = blockDim.y; int xin_start = threadIdx.x * kW; int yin_start = threadIdx.y * kH; const int xin_step = blockDim.x * kW; const int yin_step = blockDim.y * kH; int xin_end = (input_w/kW) * kW; //TODO could this be right? int yin_end = (input_h/kH) * kH; for (int yin = yin_start; yin < yin_end; yin += yin_step){ for (int xin = xin_start; xin < xin_end; xin += xin_step){ float* ptr_gradInput = ptr_gradInput_plane + xin + yin * input_w; float* ptr_gradOutput = ptr_gradOutput_plane + xout + yout * output_w; float* ptr_output_dx = ptr_output_plane_dx + xout + yout * output_w; float* ptr_output_dy = ptr_output_plane_dy + xout + yout * output_w; for (int ky = 0; ky < kH && yin + ky < input_h; ky++){ for (int kx = 0; kx < kW && xin + kx < input_w; kx++){ float* ptr_gradInput_pool = ptr_gradInput + kx + ky * input_w; if(kx == *ptr_output_dx-1 && ky == *ptr_output_dy-1) *ptr_gradInput_pool = *ptr_gradOutput; else *ptr_gradInput_pool = 0; } // end for kx } // end for ky xout += xout_step; } // end for xin yout += yout_step; } // end for yin } static int cunn_SpatialMaxPoolingPos_updateOutput(lua_State *L){ THCState* state = getCutorchState(L); THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor* output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_p", "torch.CudaTensor"); THCudaTensor* dx = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dx", "torch.CudaTensor"); THCudaTensor* dy = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dy", "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); float* output_data; float* output_dx; float* output_dy; float* input_data; long nInputCols = input -> size[3]; long nInputRows = input -> size[2]; long nInputPlane = input -> size[1]; long nBatch = input -> size[0]; long nOutputCols = nInputCols / kW; long nOutputRows = nInputRows / kH; luaL_argcheck(L, input->size[1] == nInputPlane, 2, "invalid number of input planes"); luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nBatch, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resize4d(state, dx, nBatch, nInputPlane, nOutputRows, nOutputCols); THCudaTensor_resize4d(state, dy, nBatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); output_dx = THCudaTensor_data(state, dx); output_dy = THCudaTensor_data(state, dy); dim3 blocks(nInputPlane*nBatch, 1); dim3 threads(32,8); output_kernel <<<blocks, threads>>> (input_data, output_data, output_dx, output_dy, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW); THCudaTensor_free(state, input); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess){ printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_SpatialMaxPoolingPos_updateGradInput(lua_State *L){ THCState* state = getCutorchState(L); THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor* gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor* output_dx = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dx", "torch.CudaTensor"); THCudaTensor* output_dy = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output_dy", "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); float* gradInput_data; float* gradOutput_data; float* output_dx_data; float* output_dy_data; long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; long nOutputCols = gradOutput->size[3]; long nOutputRows = gradOutput->size[2]; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); output_dx = THCudaTensor_newContiguous(state, output_dx); output_dy = THCudaTensor_newContiguous(state, output_dy); gradOutput = THCudaTensor_newContiguous(state, gradOutput); gradOutput_data = THCudaTensor_data(state, gradOutput); output_dx_data = THCudaTensor_data(state, output_dx); output_dy_data = THCudaTensor_data(state, output_dy); gradInput_data = THCudaTensor_data(state, gradInput); dim3 blocks(nInputPlane*nbatch, 1); dim3 threads(32,8); gradInput_kernel<<<blocks, threads>>> (gradInput_data, gradOutput_data, output_dx_data, output_dy_data, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess){ printf("error in SSMPoolingOffsets_updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, output_dx); THCudaTensor_free(state, output_dy); return 1; } static const struct luaL_Reg cunn_SpatialMaxPoolingPos__ [] = { {"SpatialMaxPoolingPos_updateOutput", cunn_SpatialMaxPoolingPos_updateOutput}, {"SpatialMaxPoolingPos_updateGradInput", cunn_SpatialMaxPoolingPos_updateGradInput}, {NULL, NULL} }; void cunn_SpatialMaxPoolingPos_init(lua_State* L){ luaL_openlib(L, "jz", cunn_SpatialMaxPoolingPos__, 0); }
conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.cu
/** * \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter_per_chan_relu.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
a086b9137dbd3c65ff2011b3ada1330415283020.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "FCG.h" #define triple_innerproduct_blocksize 1024 __global__ void _triple_innerproduct(itype n, vtype *r, vtype *w, vtype *q, vtype *v, vtype *alpha_beta_gamma){ __shared__ vtype alpha_shared[FULL_WARP]; __shared__ vtype beta_shared[FULL_WARP]; __shared__ vtype gamma_shared[FULL_WARP]; itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = threadIdx.x / FULL_WARP; int lane = tid % FULL_WARP; int i = tid; if(i >= n){ if(lane == 0){ alpha_shared[warp] = 0.; beta_shared[warp] = 0.; gamma_shared[warp] = 0.; } return; } vtype v_i = v[i]; vtype alpha_i = r[i] * v_i; vtype beta_i = w[i] * v_i; vtype gamma_i = q[i] * v_i; #pragma unroll for(int k=FULL_WARP >> 1; k > 0; k = k >> 1){ alpha_i += __shfl_down_sync(FULL_MASK, alpha_i, k); beta_i += __shfl_down_sync(FULL_MASK, beta_i, k); gamma_i += __shfl_down_sync(FULL_MASK, gamma_i, k); } if(lane == 0){ alpha_shared[warp] = alpha_i; beta_shared[warp] = beta_i; gamma_shared[warp] = gamma_i; } __syncthreads(); if(warp == 0){ #pragma unroll for(int k=FULL_WARP >> 1; k > 0; k = k >> 1){ alpha_shared[lane] += __shfl_down_sync(FULL_MASK, alpha_shared[lane], k); beta_shared[lane] += __shfl_down_sync(FULL_MASK, beta_shared[lane], k); gamma_shared[lane] += __shfl_down_sync(FULL_MASK, gamma_shared[lane], k); } if(lane == 0){ atomicAdd(&alpha_beta_gamma[0], alpha_shared[0]); atomicAdd(&alpha_beta_gamma[1], beta_shared[0]); atomicAdd(&alpha_beta_gamma[2], gamma_shared[0]); } } } void triple_innerproduct(vector<vtype> *r, vector<vtype> *w, vector<vtype> *q, vector<vtype> *v, vtype *alpha, vtype *beta, vtype *gamma){ assert(r->n == w->n && r->n == q->n && r->n == v->n); vector<vtype> *alpha_beta_gamma = Vector::init<vtype>(3, true, true); Vector::fillWithValue(alpha_beta_gamma, 0.); gridblock gb = gb1d(r->n, triple_innerproduct_blocksize); hipLaunchKernelGGL(( _triple_innerproduct), dim3(gb.g), dim3(gb.b), 0, 0, r->n, r->val, w->val, q->val, v->val, alpha_beta_gamma->val); vector<vtype> *alpha_beta_gamma_host = Vector::copyToHost(alpha_beta_gamma); *alpha = alpha_beta_gamma_host->val[0]; *beta = alpha_beta_gamma_host->val[1]; *gamma = alpha_beta_gamma_host->val[2]; Vector::free(alpha_beta_gamma); } //########################################################################################################### #define double_merged_axpy_blocksize 1024 __global__ void _double_merged_axpy(itype n, vtype *x0, vtype *x1, vtype *x2, vtype alpha_0, vtype alpha_1){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; vtype xi1_local = alpha_0 * x0[i] + x1[i]; x2[i] = alpha_1 * xi1_local + x2[i]; x1[i] = xi1_local; } void double_merged_axpy(vector<vtype> *x0, vector<vtype> *x1, vector<vtype> *y, vtype alpha_0, vtype alpha_1){ gridblock gb = gb1d(y->n, double_merged_axpy_blocksize); hipLaunchKernelGGL(( _double_merged_axpy), dim3(gb.g), dim3(gb.b), 0, 0, y->n, x0->val, x1->val, y->val, alpha_0, alpha_1); } //########################################################################################################### // bcm_PrecApply SRC\BOOTAMG\bcm_boot_prec.c void preconditionApply(handles *h, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, vector<vtype> *rhs, vector<vtype> *x){ vectorCollection<vtype> *RHS = FCG::context.RHS_buffer; vectorCollection<vtype> *Xtent = FCG::context.Xtent_buffer; if(bootamg_data->solver_type == 0){ // multiplicative for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(RHS->val[0], rhs); Vector::copyTo(Xtent->val[0], x); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } }else if(bootamg_data->solver_type == 1){ // symmetrized multiplicative for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(RHS->val[0], rhs); Vector::copyTo(Xtent->val[0], x); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } for(int k=boot_amg->n_hrc-1; k>=0; k--){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); /* int num_levels = boot_amg->H_array[k]->num_levels; for(int i=1; i<num_levels; i++){ Vector::fillWithValue(RHS->val[i], 0.); Vector::fillWithValue(Xtent->val[i], 0.); } */ Vector::copyTo(Xtent->val[0], x); Vector::copyTo(RHS->val[0], rhs); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } }else if(bootamg_data->solver_type == 2){ // additive itype n = boot_amg->H_array[0]->A_array[0]->n; vector<vtype> *xadd = Vector::init<vtype>(n, true, true); Vector::fillWithValue(xadd, 0.); for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(Xtent->val[0], x); Vector::copyTo(RHS->val[0], rhs); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::axpy(h->cublas_h, Xtent->val[0], xadd, 1.); } vtype alpha = 1.0 / (vtype) boot_amg->n_hrc; Vector::scale(h->cublas_h, xadd, alpha); Vector::copyTo(x, xadd); Vector::free(xadd); } } vtype flexibileConjugateGradients(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = NULL; vector<vtype> *w = NULL; vectorCollection<vtype> *d = Vector::Collection::init<vtype>(2); d->val[0] = Vector::init<vtype>(n, true, true); Vector::fillWithValue(d->val[0], 0.); d->val[1] = Vector::init<vtype>(n, true, true); Vector::fillWithValue(d->val[1], 0.); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 v = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 v = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif w = Vector::clone(rhs); Vector::axpy(h->cublas_h, v, w, -1.); vtype delta0 = Vector::norm(h->cublas_h, w); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, w, d->val[0]); } vtype delta_old = Vector::dot(h->cublas_h, w, d->val[0]); if(delta_old <= 0.){ std::cout << "\n ERROR1: indefinite preconditioner in cg_iter_coarse: " << delta_old << "\n"; exit(-1); } int idx = 0, iter = 0; vtype l2_norm; do{ #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[idx], v, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d->val[idx], v, 1., 0.); #endif vtype tau = Vector::dot(h->cublas_h, d->val[idx], v); if (tau <= 0.){ std::cout << "\n ERROR2: indefinite matrix in cg_iter_coarse: " << tau << "\n"; exit(-2); } vtype alpha = delta_old / tau; // update solution Vector::axpy(h->cublas_h, d->val[idx], x, alpha); // update residual Vector::axpy(h->cublas_h, v, w, -alpha); l2_norm = Vector::norm(h->cublas_h, w); iter++; idx = iter % 2; Vector::fillWithValue(d->val[idx], 0.); if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, w, d->val[idx]); } //update direction vtype tau1 = Vector::dot(h->cublas_h, d->val[idx], v); vtype beta = tau1 / tau; if(idx == 1) Vector::axpy(h->cublas_h, d->val[0], d->val[1], -beta); else Vector::axpy(h->cublas_h, d->val[1], d->val[0], -beta); delta_old = Vector::dot(h->cublas_h, w, d->val[idx]); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::Collection::free(d); return l2_norm; } //############################################################################################### vtype flexibileConjugateGradients_v2(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = Vector::init<vtype>(n, true, true); Vector::fillWithValue(v, 0.); vector<vtype> *w = NULL; vector<vtype> *r = NULL; vector<vtype> *d = NULL; vector<vtype> *q = NULL; r = Vector::clone(rhs); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 w = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 w = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif Vector::axpy(h->cublas_h, w, r, -1.); vtype delta0 = Vector::norm(h->cublas_h, r); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif vtype alpha = Vector::dot(h->cublas_h, r, v); vtype beta = Vector::dot(h->cublas_h, w, v); vtype delta = beta; vtype theta = alpha / delta; vtype gamma; // update solution Vector::axpy(h->cublas_h, v, x, theta); // update residual Vector::axpy(h->cublas_h, w, r, -theta); vtype l2_norm = Vector::norm(h->cublas_h, r); if (l2_norm <= rtol * delta0){ *num_iter = 1; } int iter = 1; d = Vector::clone(v); q = Vector::clone(w); //d1 = Vector::init(n, true, true); //q1 = Vector::init(n, true, true); do{ iter++; Vector::fillWithValue(v, 0.); if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif triple_innerproduct(r, w, q, v, &alpha, &beta, &gamma); theta = gamma / delta; delta = beta - pow(gamma, 2) / delta; vtype theta_2 = alpha / delta; Vector::axpy(h->cublas_h, d, v, -theta); Vector::copyTo(d, v); // update solution Vector::axpy(h->cublas_h, d, x, theta_2); Vector::axpy(h->cublas_h, q, w, -theta); Vector::copyTo(q, w); // update residual Vector::axpy(h->cublas_h, q, r, -theta_2); l2_norm = Vector::norm(h->cublas_h, r); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::free(d); Vector::free(q); Vector::free(r); return l2_norm; } //############################################################################################### vtype flexibileConjugateGradients_v3(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = Vector::init<vtype>(n, true, true); Vector::fillWithValue(v, 0.); vector<vtype> *w = NULL; vector<vtype> *r = NULL; vector<vtype> *d = NULL; vector<vtype> *q = NULL; r = Vector::clone(rhs); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 w = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 w = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif Vector::axpy(h->cublas_h, w, r, -1.); vtype delta0 = Vector::norm(h->cublas_h, r); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif vtype alpha = Vector::dot(h->cublas_h, r, v); vtype beta = Vector::dot(h->cublas_h, w, v); vtype delta = beta; vtype theta = alpha / delta; vtype gamma; // update solution Vector::axpy(h->cublas_h, v, x, theta); // update residual Vector::axpy(h->cublas_h, w, r, -theta); vtype l2_norm = Vector::norm(h->cublas_h, r); if (l2_norm <= rtol * delta0){ *num_iter = 1; } int iter = 0; d = Vector::clone(v); q = Vector::clone(w); do{ int idx = iter % 2; if(idx == 0){ Vector::fillWithValue(v, 0.); if(precon){ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif triple_innerproduct(r, w, q, v, &alpha, &beta, &gamma); }else{ Vector::fillWithValue(d, 0.); if(precon){ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, d); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d, q, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d, q, 1., 0.); #endif triple_innerproduct(r, q, w, d, &alpha, &beta, &gamma); } theta = gamma / delta; delta = beta - pow(gamma, 2) / delta; vtype theta_2 = alpha / delta; if(idx == 0){ //Vector::axpy(h->cublas_h, d, v, -theta); // update solution //Vector::axpy(h->cublas_h, v, x, theta_2); double_merged_axpy(d, v, x, -theta, theta_2); //Vector::axpy(h->cublas_h, q, w, -theta); // update residual //Vector::axpy(h->cublas_h, w, r, -theta_2); double_merged_axpy(q, w, r, -theta, -theta_2); }else{ //Vector::axpy(h->cublas_h, v, d, -theta); // update solution //Vector::axpy(h->cublas_h, d, x, theta_2); double_merged_axpy(v, d, x, -theta, theta_2); //Vector::axpy(h->cublas_h, w, q, -theta); // update residual //Vector::axpy(h->cublas_h, q, r, -theta_2); double_merged_axpy(w, q, r, -theta, -theta_2); } l2_norm = Vector::norm(h->cublas_h, r); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; iter++; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter + 1; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::free(d); Vector::free(q); Vector::free(r); return l2_norm; } //############################################################################################### void preconditionApplyK(handles *h, int kk, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int l, vector<vtype> *rhs, vector<vtype> *x){ hierarchy *hrrch = boot_amg->H_array[kk]; /* vectorCollection<vtype> *RHS = FCGK::context.RHS_buffer; vectorCollection<vtype> *Xtent = FCGK::context.Xtent_buffer; FCGK::setHrrchBufferSize(hrrch); Vector::fillWithValue(Xtent->val[l-1], 0.); */ int num_levels = hrrch->num_levels; vectorCollection<vtype> *RHS = Vector::Collection::init<vtype>(num_levels); vectorCollection<vtype> *Xtent = Vector::Collection::init<vtype>(num_levels); // !skip the first for(int i=l-1; i<num_levels; i++){ itype n_i = hrrch->A_array[i]->n; RHS->val[i] = Vector::init<vtype>(n_i, true, true); Vector::fillWithValue(RHS->val[i], 0.); Xtent->val[i] = Vector::init<vtype>(n_i, true, true); Vector::fillWithValue(Xtent->val[i], 0.); } Vector::copyTo(RHS->val[l-1], rhs); GAMG_cycle(h, kk, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, l); Vector::copyTo(x, Xtent->val[l-1]); Vector::Collection::free(RHS); Vector::Collection::free(Xtent); } //########################################################################### void inneritkcycle(handles *h, int kh, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot * boot_amg, applyData *amg_cycle, double rtol, int l){ hierarchy *hrrc = boot_amg->H_array[kh]; CSR *A = hrrc->A_array[l]; if(VERBOSE > 0) std::cout << "Start inneritkcyle level: " << l+1 << "\n"; vectorCollection<vtype> *d = Vector::Collection::init<vtype>(2); d->val[0] = Vector::init<vtype>(A->n, true, true); Vector::fillWithValue(d->val[0], 0.); d->val[1] = Vector::init<vtype>(A->n, true, true); Vector::fillWithValue(d->val[1], 0.); vector<vtype> *w = Vector::clone(rhs); vtype delta0 = Vector::norm(h->cublas_h, w); if(VERBOSE > 0) std::cout << "Level " << l+1 << " delta0 " << delta0 << "\n"; // apply preconditioner to w preconditionApplyK(h, kh, bootamg_data, boot_amg, amg_cycle, l+1, w, d->val[0]); vtype delta_old = Vector::dot(h->cublas_h, w, d->val[0]); if (VERBOSE > 1){ vtype tnrm = Vector::norm(h->cublas_h, w); fprintf(stderr,"level %d recursion output W nrm %g \n",l+1,tnrm); tnrm = Vector::norm(h->cublas_h, d->val[0]); fprintf(stderr,"level %d recursion output nrm %g \n",l+1,tnrm); } if(delta_old <= 0.){ std::cout << "\n ERROR1: indefinite preconditioner in inner_iter: " << delta_old << "\n"; exit(-1); } // parse-matrix vector product #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 vector<vtype> *v = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[0], NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 vector<vtype> *v = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d->val[0], NULL, 1., 0.); #endif vtype tau = Vector::dot(h->cublas_h, d->val[0], v); if (tau <= 0.){ std::cout << "\n ERROR2: indefinite matrix in inner_iter: " << tau << "\n"; exit(-2); } vtype alpha = delta_old / tau; // update residual Vector::axpy(h->cublas_h, v, w, -alpha); vtype l2_norm = Vector::norm(h->cublas_h, w); if(VERBOSE > 0) fprintf(stderr,"level %d alpha %g l2_n %g rtol*delta0 %g \n", l+1, alpha, l2_norm, rtol * delta0); if(l2_norm <= rtol * delta0){ // update solution Vector::axpy(h->cublas_h, d->val[0], x, alpha); }else{ //apply preconditioner to w preconditionApplyK(h, kh, bootamg_data, boot_amg, amg_cycle, l+1, w, d->val[1]); vector<vtype> *v1 = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[1], NULL, false, 1., 0.); vtype tau1, tau2, tau3, tau4; tau1 = Vector::dot(h->cublas_h, d->val[1], v); /* gamma of Notay algorithm */ tau2 = Vector::dot(h->cublas_h, d->val[1], v1);/* beta of Notay algorithm */ tau3 = Vector::dot(h->cublas_h, d->val[1], w); /* alpha 2 of Notay algorithm */ tau4 = tau2 - pow(tau1, 2) / tau; /* rho2 of Notay algorihtm */ if(VERBOSE > 0) fprintf(stderr,"tau 1:4 %g %g %g %g \n", tau1, tau2, tau3, tau4); // update solution alpha = alpha - (tau1 * tau3) / (tau * tau4); Vector::axpy(h->cublas_h, d->val[0], x, alpha); alpha = tau3 / tau4; Vector::axpy(h->cublas_h, d->val[1], x, alpha); Vector::free(v1); } if(VERBOSE > 0) fprintf(stderr,"End inneritkcyle level %d\n", l); Vector::free(v); Vector::free(w); Vector::Collection::free(d); }
a086b9137dbd3c65ff2011b3ada1330415283020.cu
#pragma once #include "FCG.h" #define triple_innerproduct_blocksize 1024 __global__ void _triple_innerproduct(itype n, vtype *r, vtype *w, vtype *q, vtype *v, vtype *alpha_beta_gamma){ __shared__ vtype alpha_shared[FULL_WARP]; __shared__ vtype beta_shared[FULL_WARP]; __shared__ vtype gamma_shared[FULL_WARP]; itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = threadIdx.x / FULL_WARP; int lane = tid % FULL_WARP; int i = tid; if(i >= n){ if(lane == 0){ alpha_shared[warp] = 0.; beta_shared[warp] = 0.; gamma_shared[warp] = 0.; } return; } vtype v_i = v[i]; vtype alpha_i = r[i] * v_i; vtype beta_i = w[i] * v_i; vtype gamma_i = q[i] * v_i; #pragma unroll for(int k=FULL_WARP >> 1; k > 0; k = k >> 1){ alpha_i += __shfl_down_sync(FULL_MASK, alpha_i, k); beta_i += __shfl_down_sync(FULL_MASK, beta_i, k); gamma_i += __shfl_down_sync(FULL_MASK, gamma_i, k); } if(lane == 0){ alpha_shared[warp] = alpha_i; beta_shared[warp] = beta_i; gamma_shared[warp] = gamma_i; } __syncthreads(); if(warp == 0){ #pragma unroll for(int k=FULL_WARP >> 1; k > 0; k = k >> 1){ alpha_shared[lane] += __shfl_down_sync(FULL_MASK, alpha_shared[lane], k); beta_shared[lane] += __shfl_down_sync(FULL_MASK, beta_shared[lane], k); gamma_shared[lane] += __shfl_down_sync(FULL_MASK, gamma_shared[lane], k); } if(lane == 0){ atomicAdd(&alpha_beta_gamma[0], alpha_shared[0]); atomicAdd(&alpha_beta_gamma[1], beta_shared[0]); atomicAdd(&alpha_beta_gamma[2], gamma_shared[0]); } } } void triple_innerproduct(vector<vtype> *r, vector<vtype> *w, vector<vtype> *q, vector<vtype> *v, vtype *alpha, vtype *beta, vtype *gamma){ assert(r->n == w->n && r->n == q->n && r->n == v->n); vector<vtype> *alpha_beta_gamma = Vector::init<vtype>(3, true, true); Vector::fillWithValue(alpha_beta_gamma, 0.); gridblock gb = gb1d(r->n, triple_innerproduct_blocksize); _triple_innerproduct<<<gb.g, gb.b>>>(r->n, r->val, w->val, q->val, v->val, alpha_beta_gamma->val); vector<vtype> *alpha_beta_gamma_host = Vector::copyToHost(alpha_beta_gamma); *alpha = alpha_beta_gamma_host->val[0]; *beta = alpha_beta_gamma_host->val[1]; *gamma = alpha_beta_gamma_host->val[2]; Vector::free(alpha_beta_gamma); } //########################################################################################################### #define double_merged_axpy_blocksize 1024 __global__ void _double_merged_axpy(itype n, vtype *x0, vtype *x1, vtype *x2, vtype alpha_0, vtype alpha_1){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; vtype xi1_local = alpha_0 * x0[i] + x1[i]; x2[i] = alpha_1 * xi1_local + x2[i]; x1[i] = xi1_local; } void double_merged_axpy(vector<vtype> *x0, vector<vtype> *x1, vector<vtype> *y, vtype alpha_0, vtype alpha_1){ gridblock gb = gb1d(y->n, double_merged_axpy_blocksize); _double_merged_axpy<<<gb.g, gb.b>>>(y->n, x0->val, x1->val, y->val, alpha_0, alpha_1); } //########################################################################################################### // bcm_PrecApply SRC\BOOTAMG\bcm_boot_prec.c void preconditionApply(handles *h, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, vector<vtype> *rhs, vector<vtype> *x){ vectorCollection<vtype> *RHS = FCG::context.RHS_buffer; vectorCollection<vtype> *Xtent = FCG::context.Xtent_buffer; if(bootamg_data->solver_type == 0){ // multiplicative for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(RHS->val[0], rhs); Vector::copyTo(Xtent->val[0], x); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } }else if(bootamg_data->solver_type == 1){ // symmetrized multiplicative for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(RHS->val[0], rhs); Vector::copyTo(Xtent->val[0], x); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } for(int k=boot_amg->n_hrc-1; k>=0; k--){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); /* int num_levels = boot_amg->H_array[k]->num_levels; for(int i=1; i<num_levels; i++){ Vector::fillWithValue(RHS->val[i], 0.); Vector::fillWithValue(Xtent->val[i], 0.); } */ Vector::copyTo(Xtent->val[0], x); Vector::copyTo(RHS->val[0], rhs); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::copyTo(x, Xtent->val[0]); } }else if(bootamg_data->solver_type == 2){ // additive itype n = boot_amg->H_array[0]->A_array[0]->n; vector<vtype> *xadd = Vector::init<vtype>(n, true, true); Vector::fillWithValue(xadd, 0.); for(int k=0; k<boot_amg->n_hrc; k++){ FCG::setHrrchBufferSize(boot_amg->H_array[k]); Vector::copyTo(Xtent->val[0], x); Vector::copyTo(RHS->val[0], rhs); GAMG_cycle(h, k, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, 1); Vector::axpy(h->cublas_h, Xtent->val[0], xadd, 1.); } vtype alpha = 1.0 / (vtype) boot_amg->n_hrc; Vector::scale(h->cublas_h, xadd, alpha); Vector::copyTo(x, xadd); Vector::free(xadd); } } vtype flexibileConjugateGradients(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = NULL; vector<vtype> *w = NULL; vectorCollection<vtype> *d = Vector::Collection::init<vtype>(2); d->val[0] = Vector::init<vtype>(n, true, true); Vector::fillWithValue(d->val[0], 0.); d->val[1] = Vector::init<vtype>(n, true, true); Vector::fillWithValue(d->val[1], 0.); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 v = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 v = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif w = Vector::clone(rhs); Vector::axpy(h->cublas_h, v, w, -1.); vtype delta0 = Vector::norm(h->cublas_h, w); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, w, d->val[0]); } vtype delta_old = Vector::dot(h->cublas_h, w, d->val[0]); if(delta_old <= 0.){ std::cout << "\n ERROR1: indefinite preconditioner in cg_iter_coarse: " << delta_old << "\n"; exit(-1); } int idx = 0, iter = 0; vtype l2_norm; do{ #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[idx], v, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d->val[idx], v, 1., 0.); #endif vtype tau = Vector::dot(h->cublas_h, d->val[idx], v); if (tau <= 0.){ std::cout << "\n ERROR2: indefinite matrix in cg_iter_coarse: " << tau << "\n"; exit(-2); } vtype alpha = delta_old / tau; // update solution Vector::axpy(h->cublas_h, d->val[idx], x, alpha); // update residual Vector::axpy(h->cublas_h, v, w, -alpha); l2_norm = Vector::norm(h->cublas_h, w); iter++; idx = iter % 2; Vector::fillWithValue(d->val[idx], 0.); if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, w, d->val[idx]); } //update direction vtype tau1 = Vector::dot(h->cublas_h, d->val[idx], v); vtype beta = tau1 / tau; if(idx == 1) Vector::axpy(h->cublas_h, d->val[0], d->val[1], -beta); else Vector::axpy(h->cublas_h, d->val[1], d->val[0], -beta); delta_old = Vector::dot(h->cublas_h, w, d->val[idx]); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::Collection::free(d); return l2_norm; } //############################################################################################### vtype flexibileConjugateGradients_v2(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = Vector::init<vtype>(n, true, true); Vector::fillWithValue(v, 0.); vector<vtype> *w = NULL; vector<vtype> *r = NULL; vector<vtype> *d = NULL; vector<vtype> *q = NULL; r = Vector::clone(rhs); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 w = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 w = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif Vector::axpy(h->cublas_h, w, r, -1.); vtype delta0 = Vector::norm(h->cublas_h, r); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif vtype alpha = Vector::dot(h->cublas_h, r, v); vtype beta = Vector::dot(h->cublas_h, w, v); vtype delta = beta; vtype theta = alpha / delta; vtype gamma; // update solution Vector::axpy(h->cublas_h, v, x, theta); // update residual Vector::axpy(h->cublas_h, w, r, -theta); vtype l2_norm = Vector::norm(h->cublas_h, r); if (l2_norm <= rtol * delta0){ *num_iter = 1; } int iter = 1; d = Vector::clone(v); q = Vector::clone(w); //d1 = Vector::init(n, true, true); //q1 = Vector::init(n, true, true); do{ iter++; Vector::fillWithValue(v, 0.); if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif triple_innerproduct(r, w, q, v, &alpha, &beta, &gamma); theta = gamma / delta; delta = beta - pow(gamma, 2) / delta; vtype theta_2 = alpha / delta; Vector::axpy(h->cublas_h, d, v, -theta); Vector::copyTo(d, v); // update solution Vector::axpy(h->cublas_h, d, x, theta_2); Vector::axpy(h->cublas_h, q, w, -theta); Vector::copyTo(q, w); // update residual Vector::axpy(h->cublas_h, q, r, -theta_2); l2_norm = Vector::norm(h->cublas_h, r); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::free(d); Vector::free(q); Vector::free(r); return l2_norm; } //############################################################################################### vtype flexibileConjugateGradients_v3(handles *h, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int precon, int max_iter, double rtol, int *num_iter){ buildData *amg_data = bootamg_data->amg_data; CSR *A = amg_data->A; itype n = A->n; vector<vtype> *v = Vector::init<vtype>(n, true, true); Vector::fillWithValue(v, 0.); vector<vtype> *w = NULL; vector<vtype> *r = NULL; vector<vtype> *d = NULL; vector<vtype> *q = NULL; r = Vector::clone(rhs); #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 w = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, x, NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 w = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, x, NULL, 1., 0.); #endif Vector::axpy(h->cublas_h, w, r, -1.); vtype delta0 = Vector::norm(h->cublas_h, r); vtype rhs_norm = Vector::norm(h->cublas_h, rhs); if(delta0 <= DBL_EPSILON * rhs_norm){ *num_iter = 0; exit(1); } if(precon){ /* apply preconditioner to w */ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif vtype alpha = Vector::dot(h->cublas_h, r, v); vtype beta = Vector::dot(h->cublas_h, w, v); vtype delta = beta; vtype theta = alpha / delta; vtype gamma; // update solution Vector::axpy(h->cublas_h, v, x, theta); // update residual Vector::axpy(h->cublas_h, w, r, -theta); vtype l2_norm = Vector::norm(h->cublas_h, r); if (l2_norm <= rtol * delta0){ *num_iter = 1; } int iter = 0; d = Vector::clone(v); q = Vector::clone(w); do{ int idx = iter % 2; if(idx == 0){ Vector::fillWithValue(v, 0.); if(precon){ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, v); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, v, w, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, v, w, 1., 0.); #endif triple_innerproduct(r, w, q, v, &alpha, &beta, &gamma); }else{ Vector::fillWithValue(d, 0.); if(precon){ preconditionApply(h, bootamg_data, boot_amg, amg_cycle, r, d); } #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d, q, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d, q, 1., 0.); #endif triple_innerproduct(r, q, w, d, &alpha, &beta, &gamma); } theta = gamma / delta; delta = beta - pow(gamma, 2) / delta; vtype theta_2 = alpha / delta; if(idx == 0){ //Vector::axpy(h->cublas_h, d, v, -theta); // update solution //Vector::axpy(h->cublas_h, v, x, theta_2); double_merged_axpy(d, v, x, -theta, theta_2); //Vector::axpy(h->cublas_h, q, w, -theta); // update residual //Vector::axpy(h->cublas_h, w, r, -theta_2); double_merged_axpy(q, w, r, -theta, -theta_2); }else{ //Vector::axpy(h->cublas_h, v, d, -theta); // update solution //Vector::axpy(h->cublas_h, d, x, theta_2); double_merged_axpy(v, d, x, -theta, theta_2); //Vector::axpy(h->cublas_h, w, q, -theta); // update residual //Vector::axpy(h->cublas_h, q, r, -theta_2); double_merged_axpy(w, q, r, -theta, -theta_2); } l2_norm = Vector::norm(h->cublas_h, r); if(VERBOSE > 0) std::cout << "bootpcg iteration: " << iter << " residual: " << l2_norm << " relative residual: " << l2_norm / delta0 << "\n"; iter++; }while(l2_norm > rtol * delta0 && iter < max_iter); assert( std::isfinite(l2_norm) ); *num_iter = iter + 1; if(precon){ FCG::freePreconditionContext(); } if(amg_cycle->cycle_type == 3) FCGK::freePreconditionContext(); Vector::free(w); Vector::free(v); Vector::free(d); Vector::free(q); Vector::free(r); return l2_norm; } //############################################################################################### void preconditionApplyK(handles *h, int kk, bootBuildData *bootamg_data, boot *boot_amg, applyData *amg_cycle, int l, vector<vtype> *rhs, vector<vtype> *x){ hierarchy *hrrch = boot_amg->H_array[kk]; /* vectorCollection<vtype> *RHS = FCGK::context.RHS_buffer; vectorCollection<vtype> *Xtent = FCGK::context.Xtent_buffer; FCGK::setHrrchBufferSize(hrrch); Vector::fillWithValue(Xtent->val[l-1], 0.); */ int num_levels = hrrch->num_levels; vectorCollection<vtype> *RHS = Vector::Collection::init<vtype>(num_levels); vectorCollection<vtype> *Xtent = Vector::Collection::init<vtype>(num_levels); // !skip the first for(int i=l-1; i<num_levels; i++){ itype n_i = hrrch->A_array[i]->n; RHS->val[i] = Vector::init<vtype>(n_i, true, true); Vector::fillWithValue(RHS->val[i], 0.); Xtent->val[i] = Vector::init<vtype>(n_i, true, true); Vector::fillWithValue(Xtent->val[i], 0.); } Vector::copyTo(RHS->val[l-1], rhs); GAMG_cycle(h, kk, bootamg_data, boot_amg, amg_cycle, RHS, Xtent, l); Vector::copyTo(x, Xtent->val[l-1]); Vector::Collection::free(RHS); Vector::Collection::free(Xtent); } //########################################################################### void inneritkcycle(handles *h, int kh, vector<vtype> *x, vector<vtype> *rhs, bootBuildData *bootamg_data, boot * boot_amg, applyData *amg_cycle, double rtol, int l){ hierarchy *hrrc = boot_amg->H_array[kh]; CSR *A = hrrc->A_array[l]; if(VERBOSE > 0) std::cout << "Start inneritkcyle level: " << l+1 << "\n"; vectorCollection<vtype> *d = Vector::Collection::init<vtype>(2); d->val[0] = Vector::init<vtype>(A->n, true, true); Vector::fillWithValue(d->val[0], 0.); d->val[1] = Vector::init<vtype>(A->n, true, true); Vector::fillWithValue(d->val[1], 0.); vector<vtype> *w = Vector::clone(rhs); vtype delta0 = Vector::norm(h->cublas_h, w); if(VERBOSE > 0) std::cout << "Level " << l+1 << " delta0 " << delta0 << "\n"; // apply preconditioner to w preconditionApplyK(h, kh, bootamg_data, boot_amg, amg_cycle, l+1, w, d->val[0]); vtype delta_old = Vector::dot(h->cublas_h, w, d->val[0]); if (VERBOSE > 1){ vtype tnrm = Vector::norm(h->cublas_h, w); fprintf(stderr,"level %d recursion output W nrm %g \n",l+1,tnrm); tnrm = Vector::norm(h->cublas_h, d->val[0]); fprintf(stderr,"level %d recursion output nrm %g \n",l+1,tnrm); } if(delta_old <= 0.){ std::cout << "\n ERROR1: indefinite preconditioner in inner_iter: " << delta_old << "\n"; exit(-1); } // parse-matrix vector product #if CSR_VECTOR_MUL_GENERAL_TYPE == 0 vector<vtype> *v = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[0], NULL, false, 1., 0.); #elif CSR_VECTOR_MUL_GENERAL_TYPE == 1 vector<vtype> *v = CSRm::CSRVector_product_adaptive_miniwarp(h->cusparse_h0, A, d->val[0], NULL, 1., 0.); #endif vtype tau = Vector::dot(h->cublas_h, d->val[0], v); if (tau <= 0.){ std::cout << "\n ERROR2: indefinite matrix in inner_iter: " << tau << "\n"; exit(-2); } vtype alpha = delta_old / tau; // update residual Vector::axpy(h->cublas_h, v, w, -alpha); vtype l2_norm = Vector::norm(h->cublas_h, w); if(VERBOSE > 0) fprintf(stderr,"level %d alpha %g l2_n %g rtol*delta0 %g \n", l+1, alpha, l2_norm, rtol * delta0); if(l2_norm <= rtol * delta0){ // update solution Vector::axpy(h->cublas_h, d->val[0], x, alpha); }else{ //apply preconditioner to w preconditionApplyK(h, kh, bootamg_data, boot_amg, amg_cycle, l+1, w, d->val[1]); vector<vtype> *v1 = CSRm::CSRVector_product_CUSPARSE(h->cusparse_h0, A, d->val[1], NULL, false, 1., 0.); vtype tau1, tau2, tau3, tau4; tau1 = Vector::dot(h->cublas_h, d->val[1], v); /* gamma of Notay algorithm */ tau2 = Vector::dot(h->cublas_h, d->val[1], v1);/* beta of Notay algorithm */ tau3 = Vector::dot(h->cublas_h, d->val[1], w); /* alpha 2 of Notay algorithm */ tau4 = tau2 - pow(tau1, 2) / tau; /* rho2 of Notay algorihtm */ if(VERBOSE > 0) fprintf(stderr,"tau 1:4 %g %g %g %g \n", tau1, tau2, tau3, tau4); // update solution alpha = alpha - (tau1 * tau3) / (tau * tau4); Vector::axpy(h->cublas_h, d->val[0], x, alpha); alpha = tau3 / tau4; Vector::axpy(h->cublas_h, d->val[1], x, alpha); Vector::free(v1); } if(VERBOSE > 0) fprintf(stderr,"End inneritkcyle level %d\n", l); Vector::free(v); Vector::free(w); Vector::Collection::free(d); }
37641b8305611641af99782c25f553cd5a29e2a0.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix_hip.cuh" int main(){ Matrix * m1 = new Matrix(5,5,1); }
37641b8305611641af99782c25f553cd5a29e2a0.cu
#include "matrix.cuh" int main(){ Matrix * m1 = new Matrix(5,5,1); }
46ab16acbb889abf434f76841ec9b34c2e212958.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu" #else THC_API void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim(state, self, src, thrust::identity<real>(), ReduceAdd<real, real>(), ScalarConvert<int, real>::to(0), dimension)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim(state, self, src, thrust::identity<real>(), ReduceMultiply<real, real>(), ScalarConvert<int, real>::to(1), dimension)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, long dim) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor_(sum)(state, self, src, dim); THCTensor_(div)(state, self, self, ScalarConvert<long, real>::to(THCTensor_(size)(state, src, dim))); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THC_API void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, real maxnorm) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0]; THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions"); dim3 grid(data->size[0]); dim3 threads(32); hipLaunchKernelGGL(( THCTensor_kernel_renorm<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), value, size, maxnorm); hipError_t errcode = hipGetLastError(); if(errcode != hipSuccess) THError(hipGetErrorString(errcode)); THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } THC_API void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int flag) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); THLongStorage *dim = THCTensor_(newSizeOf)(state, src); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, self_, dim, NULL); THLongStorage_free(dim); THCTensor *self = THCTensor_(newContiguous)(state, self_); src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimension)(state, src) - 1) { THCTensor_varInnermostDim<THCTensor, real, true>(state, self, src, flag); } else { THCTensor_varOuterDim<THCTensor, real, true>(state, self, src, dimension, flag); } THCTensor_(free)(state, src); THCTensor_(freeCopyTo)(state, self, self_); } THC_API void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int flag) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); THLongStorage *dim = THCTensor_(newSizeOf)(state, src); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, self_, dim, NULL); THLongStorage_free(dim); THCTensor *self = THCTensor_(newContiguous)(state, self_); src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimension)(state, src) - 1) { THCTensor_varInnermostDim<THCTensor, real, false>(state, self, src, flag); } else { THCTensor_varOuterDim<THCTensor, real, false>(state, self, src, dimension, flag); } THCTensor_(free)(state, src); THCTensor_(freeCopyTo)(state, self, self_); } THC_API accreal THCTensor_(stdall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self))); } THC_API accreal THCTensor_(varall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll(state, self, SquareFunctor<accreal, real>(mean), ReduceAdd<accreal, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<int, accreal>::to(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - 1) ); THCudaCheck(hipGetLastError()); return val; } THC_API void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) { THC_reduceDim(state, self, src, TensorNonZeroOp<real>(), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) { THC_reduceDim(state, self, src, TensorNormOp<real, 1>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) { THC_reduceDim(state, self, src, TensorNormOp<real, 2>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5)); } else { THC_reduceDim(state, self, src, TensorNormOp<real, -1>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value)); } THCudaCheck(hipGetLastError()); } THC_API accreal THCTensor_(normall)(THCState *state, THCTensor *self, real value) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal result; if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) { THC_reduceAll(state, self, TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) { THC_reduceAll(state, self, TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) { THC_reduceAll(state, self, TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); result = THCNumerics<accreal>::sqrt(result); } else { THC_reduceAll(state, self, TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); result = THCNumerics<accreal>::pow( result, ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value)) ); } THCudaCheck(hipGetLastError()); return result; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<real> src_data(THCTensor_(data)(state, src)); accreal result = thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0), thrust::plus<accreal>(), TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value))); THCTensor_(free)(state, src); THCTensor_(free)(state, self); return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value)); } #endif THC_API accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<int, accreal>::to(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return val; } THC_API accreal THCTensor_(prodall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMultiply<real, accreal>(), ReduceMultiply<accreal, accreal>(), ScalarConvert<int, accreal>::to(1), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, ScalarConvert<long, accreal>::to(THCTensor_(nElement)(state, self)) - 1 ); THCudaCheck(hipGetLastError()); return val; } THC_API accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); THArgCheck(self->nDimension > 0, 1, "empty Tensor"); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } THC_API real THCTensor_(minall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); real val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMin<real>(), ReduceMin<real>(), THCNumerics<real>::max(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return val; } THC_API real THCTensor_(maxall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); real val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMax<real>(), ReduceMax<real>(), THCNumerics<real>::min(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); return val; } THC_API void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<typename TensorUtils<THCTensor>::DataType, long> init = thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>( THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1); return THC_reduceDimIndex( state, values, indices, src, dimension, init, MaxValuePair<typename TensorUtils<THCTensor>::DataType, long>()); } THC_API void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<typename TensorUtils<THCTensor>::DataType, long> init = thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>( THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1); return THC_reduceDimIndex( state, values, indices, src, dimension, init, MinValuePair<typename TensorUtils<THCTensor>::DataType, long>()); } #endif
46ab16acbb889abf434f76841ec9b34c2e212958.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu" #else THC_API void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim(state, self, src, thrust::identity<real>(), ReduceAdd<real, real>(), ScalarConvert<int, real>::to(0), dimension)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (!THC_reduceDim(state, self, src, thrust::identity<real>(), ReduceMultiply<real, real>(), ScalarConvert<int, real>::to(1), dimension)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, long dim) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor_(sum)(state, self, src, dim); THCTensor_(div)(state, self, self, ScalarConvert<long, real>::to(THCTensor_(size)(state, src, dim))); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THC_API void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension, real maxnorm) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); THCTensor *self_; THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0); THCTensor *data = THCTensor_(newClone)(state, src_); ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0]; THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension"); THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported"); THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions"); dim3 grid(data->size[0]); dim3 threads(32); THCTensor_kernel_renorm<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, data), value, size, maxnorm); cudaError errcode = cudaGetLastError(); if(errcode != cudaSuccess) THError(cudaGetErrorString(errcode)); THCTensor_(free)(state, src_); self_ = THCTensor_(newTranspose)(state, data, dimension, 0); THCTensor_(resizeAs)(state, self, self_); THCTensor_(freeCopyTo)(state, self_, self); THCTensor_(free)(state, data); } THC_API void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int flag) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); THLongStorage *dim = THCTensor_(newSizeOf)(state, src); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, self_, dim, NULL); THLongStorage_free(dim); THCTensor *self = THCTensor_(newContiguous)(state, self_); src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimension)(state, src) - 1) { THCTensor_varInnermostDim<THCTensor, real, true>(state, self, src, flag); } else { THCTensor_varOuterDim<THCTensor, real, true>(state, self, src, dimension, flag); } THCTensor_(free)(state, src); THCTensor_(freeCopyTo)(state, self, self_); } THC_API void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, long dimension, int flag) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); THLongStorage *dim = THCTensor_(newSizeOf)(state, src); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, self_, dim, NULL); THLongStorage_free(dim); THCTensor *self = THCTensor_(newContiguous)(state, self_); src = THCTensor_(newContiguous)(state, src); if (dimension == THCTensor_(nDimension)(state, src) - 1) { THCTensor_varInnermostDim<THCTensor, real, false>(state, self, src, flag); } else { THCTensor_varOuterDim<THCTensor, real, false>(state, self, src, dimension, flag); } THCTensor_(free)(state, src); THCTensor_(freeCopyTo)(state, self, self_); } THC_API accreal THCTensor_(stdall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self))); } THC_API accreal THCTensor_(varall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal mean = THCTensor_(meanall)(state, self); accreal val; if (!THC_reduceAll(state, self, SquareFunctor<accreal, real>(mean), ReduceAdd<accreal, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<int, accreal>::to(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - 1) ); THCudaCheck(cudaGetLastError()); return val; } THC_API void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, long dimension) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) { THC_reduceDim(state, self, src, TensorNonZeroOp<real>(), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) { THC_reduceDim(state, self, src, TensorNormOp<real, 1>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) { THC_reduceDim(state, self, src, TensorNormOp<real, 2>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5)); } else { THC_reduceDim(state, self, src, TensorNormOp<real, -1>(value), ReduceAdd<real, real>(), ScalarConvert<float, real>::to(0.0), dimension); THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value)); } THCudaCheck(cudaGetLastError()); } THC_API accreal THCTensor_(normall)(THCState *state, THCTensor *self, real value) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal result; if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) { THC_reduceAll(state, self, TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) { THC_reduceAll(state, self, TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); } else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) { THC_reduceAll(state, self, TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); result = THCNumerics<accreal>::sqrt(result); } else { THC_reduceAll(state, self, TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<float, accreal>::to(0.0f), &result, 0); result = THCNumerics<accreal>::pow( result, ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value)) ); } THCudaCheck(cudaGetLastError()); return result; } accreal THCTensor_(dist)(THCState *state, THCTensor *self, THCTensor *src, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self, src)); self = THCTensor_(newContiguous)(state, self); ptrdiff_t size = THCTensor_(nElement)(state, self); src = THCTensor_(newContiguous)(state, src); thrust::device_ptr<real> self_data(THCTensor_(data)(state, self)); thrust::device_ptr<real> src_data(THCTensor_(data)(state, src)); accreal result = thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #endif self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0), thrust::plus<accreal>(), TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value))); THCTensor_(free)(state, src); THCTensor_(free)(state, self); return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value)); } #endif THC_API accreal THCTensor_(sumall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(), ScalarConvert<int, accreal>::to(0), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return val; } THC_API accreal THCTensor_(prodall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); accreal val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMultiply<real, accreal>(), ReduceMultiply<accreal, accreal>(), ScalarConvert<int, accreal>::to(1), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } val = THCNumerics<accreal>::div( val, ScalarConvert<long, accreal>::to(THCTensor_(nElement)(state, self)) - 1 ); THCudaCheck(cudaGetLastError()); return val; } THC_API accreal THCTensor_(meanall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); THArgCheck(self->nDimension > 0, 1, "empty Tensor"); return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self); } THC_API real THCTensor_(minall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); real val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMin<real>(), ReduceMin<real>(), THCNumerics<real>::max(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return val; } THC_API real THCTensor_(maxall)(THCState *state, THCTensor *self) { THAssert(THCTensor_(checkGPU)(state, 1, self)); real val; if (!THC_reduceAll(state, self, thrust::identity<real>(), ReduceMax<real>(), ReduceMax<real>(), THCNumerics<real>::min(), &val, 0)) { THArgCheck(false, 1, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); return val; } THC_API void THCTensor_(max)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<typename TensorUtils<THCTensor>::DataType, long> init = thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>( THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 1); return THC_reduceDimIndex( state, values, indices, src, dimension, init, MaxValuePair<typename TensorUtils<THCTensor>::DataType, long>()); } THC_API void THCTensor_(min)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *src, long dimension) { THAssert(THCTensor_(checkGPU)(state, 3, values, indices, src)); thrust::pair<typename TensorUtils<THCTensor>::DataType, long> init = thrust::make_pair<typename TensorUtils<THCTensor>::DataType, long>( THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 1); return THC_reduceDimIndex( state, values, indices, src, dimension, init, MinValuePair<typename TensorUtils<THCTensor>::DataType, long>()); } #endif
3612bd95551ad2b9e78c753b9afc51def81b063c.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __global__ void test_cpp_cuda_kernel( torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> input) { const int c = blockIdx.x * blockDim.x + threadIdx.x; input[c] += 1; } std::vector<torch::Tensor> test_cpp_cuda(torch::Tensor input) { const int input_size = input.size(0); const int threads = 256; const int batch_size = 1; const dim3 blocks((input_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "test_cpp_cuda", ([&] { hipLaunchKernelGGL(( test_cpp_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, input.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>()); })); return {input}; }
3612bd95551ad2b9e78c753b9afc51def81b063c.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __global__ void test_cpp_cuda_kernel( torch::PackedTensorAccessor<scalar_t,1,torch::RestrictPtrTraits,size_t> input) { const int c = blockIdx.x * blockDim.x + threadIdx.x; input[c] += 1; } std::vector<torch::Tensor> test_cpp_cuda(torch::Tensor input) { const int input_size = input.size(0); const int threads = 256; const int batch_size = 1; const dim3 blocks((input_size + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "test_cpp_cuda", ([&] { test_cpp_cuda_kernel<scalar_t><<<blocks, threads>>>( input.packed_accessor<scalar_t,1,torch::RestrictPtrTraits,size_t>()); })); return {input}; }
dea2e4a7c3acfc23317cff7042089bcffa951b0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" /********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL2_H_ #define _KERNEL2_H_ __global__ void Kernel2(hipLaunchParm lp, bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes) { KERNELBEGIN; int tid = hipBlockIdx_x*MAX_THREADS_PER_BLOCK + hipThreadIdx_x; if( tid<no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid]=true; g_graph_visited[tid]=true; *g_over=true; g_updating_graph_mask[tid]=false; } KERNELEND; } #endif
dea2e4a7c3acfc23317cff7042089bcffa951b0b.cu
#include "hip_runtime.h" /********************************************************************************* Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish **********************************************************************************/ #ifndef _KERNEL2_H_ #define _KERNEL2_H_ __global__ void Kernel2(hipLaunchParm lp, bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes) { KERNELBEGIN; int tid = hipBlockIdx_x*MAX_THREADS_PER_BLOCK + hipThreadIdx_x; if( tid<no_of_nodes && g_updating_graph_mask[tid]) { g_graph_mask[tid]=true; g_graph_visited[tid]=true; *g_over=true; g_updating_graph_mask[tid]=false; } KERNELEND; } #endif
70c98b4c7944e1d5cc1f716573614da2fea7a628.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorRandom.h" #include "THHDeviceUtils.cuh" #include "THHGeneral.h" #include "THHTensorCopy.h" #include "THHTensorMath.h" #include "THHReduceApplyUtils.cuh" #include "THHTensorRandom.cuh" #include "THHGenerator.hpp" #include <thrust/functional.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_mtgp32_host.h> #include <rocrand/rocrand_mtgp32_11213.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 THCGenerator* THCRandom_getGenerator(THCState* state); /* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */ __host__ void initializeGenerator(THCState *state, THCGenerator* gen) { gen->state.gen_states = static_cast<struct hiprandStateMtgp32_t*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t))); gen->state.kernel_params = static_cast<mtgp32_kernel_params_t*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params_t))); } /* Creates a new generator state given the seed. Not thread-safe. */ __host__ void createGeneratorState(THCGenerator* gen, uint64_t seed) { if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (hiprandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213, gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); // The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->state.gen_states, states_size, hipMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size); } __global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel) { state[threadIdx.x].k = kernel; } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(gen->state.gen_states, THByteTensor_data(rng_state), states_size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state), gen->state.gen_states, gen->state.kernel_params); memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); if (!no_philox_seed) { memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } else { gen->state.philox_seed_offset = 0; } } // Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats // eps near 0, 1-eps will round to 1. template <typename T> __device__ inline T reverse_bounds(T value) { if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) { return ScalarConvert<int, T>::to(0); } return value; } __device__ inline half half_uniform_scale_and_shift(float x, double a, double b) { half width = ScalarConvert<double, half>::to(b - a); half start = ScalarConvert<double, half>::to(a); half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width); return THCNumerics<half>::add(scaled, start); } #define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } #define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } template<typename T, typename U> struct is_same { static const bool value = false; }; template<typename T> struct is_same<T, T> { static const bool value = true; }; template<typename T, typename prob_type> __global__ void generate_bernoulli_tensor(hiprandStateMtgp32_t *state, int size, T *result, prob_type *probs) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { if (is_same<prob_type, double>::value) { double x = hiprand_uniform_double(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } else { float x = hiprand_uniform(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } } } // NOTE: hiprand_uniform is (0, 1] and we want [a, b) GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean) GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean) GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(x))) GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(x))) GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, hiprand_uniform, (half_uniform_scale_and_shift(x, a, b))) GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean))) GENERATE_KERNEL1(generate_exponential, half, double lambda, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x))))) GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5)))))) #include "generic/THCTensorRandom.cu" #include "THHGenerateAllTypes.h" #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2
70c98b4c7944e1d5cc1f716573614da2fea7a628.cu
#include "THCTensorRandom.h" #include "THCDeviceUtils.cuh" #include "THCGeneral.h" #include "THCTensorCopy.h" #include "THCTensorMath.h" #include "THCReduceApplyUtils.cuh" #include "THCTensorRandom.cuh" #include "THCGenerator.hpp" #include <thrust/functional.h> #include <curand.h> #include <curand_kernel.h> #include <curand_mtgp32_host.h> #include <curand_mtgp32dc_p_11213.h> #define MAX_NUM_BLOCKS 200 #define BLOCK_SIZE 256 THCGenerator* THCRandom_getGenerator(THCState* state); /* Sets up generator. Allocates but does not create the generator states. Not thread-safe. */ __host__ void initializeGenerator(THCState *state, THCGenerator* gen) { gen->state.gen_states = static_cast<struct curandStateMtgp32*>(THCudaMalloc(state, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32))); gen->state.kernel_params = static_cast<mtgp32_kernel_params*>(THCudaMalloc(state, sizeof(mtgp32_kernel_params))); } /* Creates a new generator state given the seed. Not thread-safe. */ __host__ void createGeneratorState(THCGenerator* gen, uint64_t seed) { if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->state.kernel_params) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (curandMakeMTGP32KernelState(gen->state.gen_states, mtgp32dc_params_fast_11213, gen->state.kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); // The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->state.gen_states, states_size, cudaMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &gen->state.initial_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->state.philox_seed_offset, offset_size); } __global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel) { state[threadIdx.x].k = kernel; } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { THCGenerator* gen = THCRandom_getGenerator(state); std::lock_guard<std::mutex> lock(gen->mutex); static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(gen->state.initial_seed); static const size_t offset_size = sizeof(gen->state.philox_seed_offset); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(gen->state.gen_states, THByteTensor_data(rng_state), states_size, cudaMemcpyHostToDevice)); set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, gen->state.kernel_params); memcpy(&gen->state.initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); if (!no_philox_seed) { memcpy(&gen->state.philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } else { gen->state.philox_seed_offset = 0; } } // Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats // eps near 0, 1-eps will round to 1. template <typename T> __device__ inline T reverse_bounds(T value) { if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) { return ScalarConvert<int, T>::to(0); } return value; } __device__ inline half half_uniform_scale_and_shift(float x, double a, double b) { half width = ScalarConvert<double, half>::to(b - a); half start = ScalarConvert<double, half>::to(a); half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width); return THCNumerics<half>::add(scaled, start); } #define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } #define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ T y = TRANSFORM; \ result[i] = y; \ } \ } \ } template<typename T, typename U> struct is_same { static const bool value = false; }; template<typename T> struct is_same<T, T> { static const bool value = true; }; template<typename T, typename prob_type> __global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size, T *result, prob_type *probs) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { if (is_same<prob_type, double>::value) { double x = curand_uniform_double(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } else { float x = curand_uniform(&state[blockIdx.x]); if (i < size) result[i] = ScalarConvert<bool, T>::to(x <= probs[i]); } } } // NOTE: curand_uniform is (0, 1] and we want [a, b) GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, curand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, curand_uniform, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, reverse_bounds(x) * (b-a) + a) GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean) GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean) GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(x))) GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(x))) GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5)))) GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, curand_uniform, (half_uniform_scale_and_shift(x, a, b))) GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean))) GENERATE_KERNEL1(generate_exponential, half, double lambda, float, curand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x))))) GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5)))))) #include "generic/THCTensorRandom.cu" #include "THCGenerateAllTypes.h" #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2
828b2f450f1e164dddf1e4b40bbc00bf5c587557.hip
// !!! This is a file automatically generated by hipify!!! #include <string.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include "rocblas.h" // nvcc 036sgemm.c -lcublas #define DGEMM dgemm_ #define DSPEV dspev_ #define PRINTF printf #define EXIT exit #define CLOCKS_PER_SEC_C 1000000 #define MAXTIME 2147.48 void cputime(double *); double get_iter_Tmat(double *,double *,int ); void get_diag_Tmat(double *,double *,int ); void get_unit_Tmat(double *,int ); extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); } int matmul(double *X, int *LDX, int *ITYPE_X, double *Y, int *LDY, int *ITYPE_Y, double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA) { int m = *NRZ; int n = *NCZ; int k = *NXY; //char MATX=(ITYPE_X) ? 'N' : 'T'; //char MATY=(ITYPE_Y) ? 'N' : 'T'; // DGEMM(&MATX,&MATY,NRZ,NCZ,NXY,ALPHA,X,LDX,Y,LDY,BETA,Z,LDZ); hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // hipError_t cudaStat; // hipMalloc status // hipblasStatus_t stat; // CUBLAS functions status hipblasHandle_t handle; // CUBLAS context // Step 1: Allocate memory on the device: double *d_X, *d_Y, *d_Z; hipMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix hipMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix hipMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix hipblasCreate(&handle); // initialize CUBLAS context // Step 2: Initailize device memory from host: hipblasSetMatrix(m, k, sizeof(double), X, m, d_X, m); hipblasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k); hipblasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); // Step 3: Perform operation, function launches kernel on GPU itself hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); // Step 4: Copy the result back to the host: hipblasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m); // Step 5: Clean up hipFree(d_X); hipFree(d_Y); hipFree(d_Z); hipblasDestroy(handle); } //DGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA, B, LDB, BETA, C, LDC ) /* hipblasStatus_t hipblasDgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) */ int device_matmul(double *d_X, int *LDX, int *ITYPE_X, double *d_Y, int *LDY, int *ITYPE_Y, double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA, hipblasHandle_t handle) { int m = *NRZ; int n = *NCZ; int k = *NXY; hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T; //hipblasHandle_t handle; // CUBLAS context hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); } #define _USE_LAPACK_ #ifdef _USE_LAPACK_ extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);} #endif //======================================================================= //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //======================================================================= int main() //======================================================================= {// begin routine //======================================================================= // I) Set up the problem int nstate; PRINTF("\n============================================\n"); PRINTF("Enter the matrix size : ");scanf("%d",&nstate); int nstate_sq = nstate*nstate; double *S = new double[nstate_sq]; double *Tunit = new double[nstate_sq]; double *Tdiag = new double[nstate_sq]; double *Titer = new double[nstate_sq]; PRINTF("Using random input\n\n"); for(int i=0;i<nstate_sq;i++){S[i]=0.0;} for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;} double seed=14571.0; srand48((long) seed); for(int i=0;i<nstate;i++){ for(int j=i;i<nstate;i++){ int ind = i+nstate*j; int indt = j+nstate*i; int n=1,ierr=0; double rand=drand48(); S[ind] += (rand-0.5)*2.0e-3; S[indt] = S[ind]; }}//endfor //======================================================================= // II) Try three methods //get_unit_Tmat(Tunit,nstate); //get_diag_Tmat(S,Tdiag,nstate); get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); double sum = 0; for(int i = 0; i < 1000; i++){ sum += get_iter_Tmat(S,Titer,nstate); } printf("The average time taken is %g ms for Update-2.0\n\n", sum); //======================================================================= // III) Check the error of the iterative method // // double err=0.0; // for(int i=0;i<nstate_sq;i++){ // double tmp=Tdiag[i]-Titer[i]; // tmp = tmp*tmp; // err = (err > tmp ? err : tmp); // }//endfor // err = sqrt(err); // PRINTF("Maximum error in any element : %g\n",err); // // err=0.0; // for(int i=0;i<nstate;i++){ // for(int j=i;j<nstate;j++){ // int ind = i + j*nstate; // int indt = j + i*nstate; // double tmp=Titer[ind]-Titer[indt]; // tmp = tmp*tmp; // err = (err > tmp ? err : tmp); // }}//endfor // err = sqrt(err); // PRINTF("Deviation from symmetric : %g\n",err); // PRINTF("============================================\n\n"); //======================================================================= }//end routine //======================================================================= //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors //============================================================================ void get_diag_Tmat(double *S,double *T,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *umat = new double[nstate_sq]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *s_eigs = new double[nstate]; double *scr1 = new double[3*nstate]; double *scr2 = new double[3*nstate]; //========================================================================== // II. Diagonalize S using rs_ FORTRAN diagonalization routine int ifound = 0; int ierr = 0; //---------------------------------------------------------------------- // Use LAPACK : Captain Jack is Happy. #ifdef _USE_LAPACK_ ifound ++; for(int i = 1; i <= nstate; i++){ for(int j = 1; j <= i; j++){ int ind = (i-1) + (j-1)*nstate; int ind2 = (i-1) + (j-1)*(2*nstate-j)/2; scr_mat1[ind2] = S[ind]; }}//endfor char Vstuff ='V'; char Lstuff ='L'; DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr); #endif if(ifound!=1 || ierr != 0){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif //========================================================================== // III. Compute inverse square root of eigenvalues: Occupation numbers // are HACKED!!!!! //---------------------------------------------------------------------- // A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba) for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);} memset(scr_mat1,0,sizeof(double)*nstate_sq); for(int i = 0; i < nstate; i++){ int ind = i*nstate+i; scr_mat1[ind]=s_eigs[i]; }/* endfor */ //------------------------------------------------------------------------ // B) Transform matrix back to original representation using eigenvectors double alpha = 1.0; double beta = 0.0; int itransp = 0; int inorm = 1; matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); //============================================================================ // IV) Free allocated temporary memory delete [] umat; delete [] scr_mat1; delete [] scr_mat2; delete [] s_eigs; delete [] scr1; delete [] scr2; cputime(&cpu2); PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1); //============================================================================ } /* End function */ //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Set Tmax to the Unit matrix : remove cputime overhead of diag to test // parallel performance //============================================================================ void get_unit_Tmat(double *Tunit,int nstate){ int nstate_sq = nstate*nstate; memset(Tunit,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;} } //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Schulz iteration for inverse sqrt root : quadratic convergence! //============================================================================ double get_iter_Tmat(double *S,double *Titer,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *scr_mat3 = new double[nstate_sq]; //============================================================================ // II) Set up CUBLAS context // hipError_t cudaStat; // hipMalloc status // hipblasStatus_t stat; // CUBLAS functions status hipblasHandle_t handle; // CUBLAS context //============================================================================ // III) Allocate memory on the device double *d_Titer, *d_mat1, *d_mat2, *d_mat3; hipMalloc(&d_Titer, nstate_sq*sizeof(double)); hipMalloc(&d_mat1, nstate_sq*sizeof(double)); hipMalloc(&d_mat2, nstate_sq*sizeof(double)); hipMalloc(&d_mat3, nstate_sq*sizeof(double)); hipblasCreate(&handle); // initialize CUBLAS context //============================================================================ // IV) Schulz iteration //-------------------------------------------------------------------- // A) Initialize scr_mat1 and Titer on host // scr_mat1 = S/2 for(int i=0;i<nstate_sq;i++){scr_mat1[i] = S[i]/2.0;} // Titer = I = unit matrix memset(Titer,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Titer[ind] = 1.0;} //-------------------------------------------------------------------- // B) Initailize d_mat1 and d_Titer on device hipblasSetMatrix(nstate, nstate, sizeof(double), scr_mat1, nstate, d_mat1, nstate); hipblasSetMatrix(nstate, nstate, sizeof(double), Titer, nstate, d_Titer, nstate); //hipblasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); //-------------------------------------------------------------------- // C) Iterate int iter = 0; //double tol_now = 1.0; while (iter < 4){ //(tol_now > 1.0e-15 && iter<10){ iter++; //-------------------------------- // scr_mat2 = 3*I - Titer*scr_mat1 int itransp = 0; int inorm = 1; double alpha = -1.0; double beta = 1.0; memset(scr_mat2,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;scr_mat2[ind]=3.0;} hipblasSetMatrix(nstate, nstate, sizeof(double), scr_mat2, nstate, d_mat2, nstate); device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // scr_mat1 = 0.5*scr_mat1*scr_mat2 = 0.5*scr_mat3*scr_mat2 alpha = 0.5; beta = 0.0; hipMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice); device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // Titer = 0.5*scr_mat2*Titer = 0.5*scr_mat2*scr_mat3 alpha = 0.5; beta = 0.0; hipMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice); device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // tolerence check // hipblasGetMatrix(nstate, nstate, sizeof(double), d_mat3, nstate, scr_mat3, nstate); // hipblasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); // tol_now = 0.0; // for(int i=0;i<nstate_sq;i++){ // double tmp=scr_mat3[i]-Titer[i]; // tol_now += tmp*tmp; // }//endfor // tol_now /= ((double)nstate_sq); // tol_now = sqrt(tol_now); //PRINTF("iter %d : tol %g\n",iter,tol_now); // PRINTF("iter %d : Skipping tolerence check, guessing 4 iterations should be done\n", iter); }//endwhile //if(tol_now>1.0e-15){ // PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); // PRINTF("Iterative computation of S^{-1/2} failed\n"); // PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); // EXIT(1); //}//endif /*==========================================================================*/ // V) Copy the result back to the host hipblasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); /*==========================================================================*/ // VI) Clean up device hipFree(d_Titer); hipFree(d_mat1); hipFree(d_mat2); hipFree(d_mat3); hipblasDestroy(handle); // VII) Clean up host delete [] scr_mat1; delete [] scr_mat2; delete [] scr_mat3; cputime(&cpu2); // PRINTF("nstate %d : cpu time iter : %g\n\n",nstate,cpu2-cpu1); return cpu2-cpu1; /*==========================================================================*/ }//end routine /*==========================================================================*/ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* subroutine to time processes */ /*==========================================================================*/ void cputime(double *time) /*==========================================================================*/ { int itime; static double to=0.,tn=0.; itime = clock(); tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C); *time = tn; if(tn >= 0 && to >= 0){*time=tn;} if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;} if(tn >= 0 && to < 0){*time=tn+MAXTIME;} if(tn < 0 && to < 0){*time=MAXTIME+tn;} to = tn; } /*==========================================================================*/
828b2f450f1e164dddf1e4b40bbc00bf5c587557.cu
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> #include "cublas_v2.h" // nvcc 036sgemm.c -lcublas #define DGEMM dgemm_ #define DSPEV dspev_ #define PRINTF printf #define EXIT exit #define CLOCKS_PER_SEC_C 1000000 #define MAXTIME 2147.48 void cputime(double *); double get_iter_Tmat(double *,double *,int ); void get_diag_Tmat(double *,double *,int ); void get_unit_Tmat(double *,int ); extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); } int matmul(double *X, int *LDX, int *ITYPE_X, double *Y, int *LDY, int *ITYPE_Y, double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA) { int m = *NRZ; int n = *NCZ; int k = *NXY; //char MATX=(ITYPE_X) ? 'N' : 'T'; //char MATY=(ITYPE_Y) ? 'N' : 'T'; // DGEMM(&MATX,&MATY,NRZ,NCZ,NXY,ALPHA,X,LDX,Y,LDY,BETA,Z,LDZ); cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T; // cudaError_t cudaStat; // cudaMalloc status // cublasStatus_t stat; // CUBLAS functions status cublasHandle_t handle; // CUBLAS context // Step 1: Allocate memory on the device: double *d_X, *d_Y, *d_Z; cudaMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix cudaMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix cudaMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix cublasCreate(&handle); // initialize CUBLAS context // Step 2: Initailize device memory from host: cublasSetMatrix(m, k, sizeof(double), X, m, d_X, m); cublasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k); cublasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); // Step 3: Perform operation, function launches kernel on GPU itself cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); // Step 4: Copy the result back to the host: cublasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m); // Step 5: Clean up cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Z); cublasDestroy(handle); } //DGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA, B, LDB, BETA, C, LDC ) /* cublasStatus_t cublasDgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) */ int device_matmul(double *d_X, int *LDX, int *ITYPE_X, double *d_Y, int *LDY, int *ITYPE_Y, double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA, cublasHandle_t handle) { int m = *NRZ; int n = *NCZ; int k = *NXY; cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T; //cublasHandle_t handle; // CUBLAS context cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); } #define _USE_LAPACK_ #ifdef _USE_LAPACK_ extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);} #endif //======================================================================= //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //======================================================================= int main() //======================================================================= {// begin routine //======================================================================= // I) Set up the problem int nstate; PRINTF("\n============================================\n"); PRINTF("Enter the matrix size : ");scanf("%d",&nstate); int nstate_sq = nstate*nstate; double *S = new double[nstate_sq]; double *Tunit = new double[nstate_sq]; double *Tdiag = new double[nstate_sq]; double *Titer = new double[nstate_sq]; PRINTF("Using random input\n\n"); for(int i=0;i<nstate_sq;i++){S[i]=0.0;} for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;} double seed=14571.0; srand48((long) seed); for(int i=0;i<nstate;i++){ for(int j=i;i<nstate;i++){ int ind = i+nstate*j; int indt = j+nstate*i; int n=1,ierr=0; double rand=drand48(); S[ind] += (rand-0.5)*2.0e-3; S[indt] = S[ind]; }}//endfor //======================================================================= // II) Try three methods //get_unit_Tmat(Tunit,nstate); //get_diag_Tmat(S,Tdiag,nstate); get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); double sum = 0; for(int i = 0; i < 1000; i++){ sum += get_iter_Tmat(S,Titer,nstate); } printf("The average time taken is %g ms for Update-2.0\n\n", sum); //======================================================================= // III) Check the error of the iterative method // // double err=0.0; // for(int i=0;i<nstate_sq;i++){ // double tmp=Tdiag[i]-Titer[i]; // tmp = tmp*tmp; // err = (err > tmp ? err : tmp); // }//endfor // err = sqrt(err); // PRINTF("Maximum error in any element : %g\n",err); // // err=0.0; // for(int i=0;i<nstate;i++){ // for(int j=i;j<nstate;j++){ // int ind = i + j*nstate; // int indt = j + i*nstate; // double tmp=Titer[ind]-Titer[indt]; // tmp = tmp*tmp; // err = (err > tmp ? err : tmp); // }}//endfor // err = sqrt(err); // PRINTF("Deviation from symmetric : %g\n",err); // PRINTF("============================================\n\n"); //======================================================================= }//end routine //======================================================================= //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors //============================================================================ void get_diag_Tmat(double *S,double *T,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *umat = new double[nstate_sq]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *s_eigs = new double[nstate]; double *scr1 = new double[3*nstate]; double *scr2 = new double[3*nstate]; //========================================================================== // II. Diagonalize S using rs_ FORTRAN diagonalization routine int ifound = 0; int ierr = 0; //---------------------------------------------------------------------- // Use LAPACK : Captain Jack is Happy. #ifdef _USE_LAPACK_ ifound ++; for(int i = 1; i <= nstate; i++){ for(int j = 1; j <= i; j++){ int ind = (i-1) + (j-1)*nstate; int ind2 = (i-1) + (j-1)*(2*nstate-j)/2; scr_mat1[ind2] = S[ind]; }}//endfor char Vstuff ='V'; char Lstuff ='L'; DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr); #endif if(ifound!=1 || ierr != 0){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif //========================================================================== // III. Compute inverse square root of eigenvalues: Occupation numbers // are HACKED!!!!! //---------------------------------------------------------------------- // A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba) for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);} memset(scr_mat1,0,sizeof(double)*nstate_sq); for(int i = 0; i < nstate; i++){ int ind = i*nstate+i; scr_mat1[ind]=s_eigs[i]; }/* endfor */ //------------------------------------------------------------------------ // B) Transform matrix back to original representation using eigenvectors double alpha = 1.0; double beta = 0.0; int itransp = 0; int inorm = 1; matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); //============================================================================ // IV) Free allocated temporary memory delete [] umat; delete [] scr_mat1; delete [] scr_mat2; delete [] s_eigs; delete [] scr1; delete [] scr2; cputime(&cpu2); PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1); //============================================================================ } /* End function */ //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Set Tmax to the Unit matrix : remove cputime overhead of diag to test // parallel performance //============================================================================ void get_unit_Tmat(double *Tunit,int nstate){ int nstate_sq = nstate*nstate; memset(Tunit,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;} } //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Schulz iteration for inverse sqrt root : quadratic convergence! //============================================================================ double get_iter_Tmat(double *S,double *Titer,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *scr_mat3 = new double[nstate_sq]; //============================================================================ // II) Set up CUBLAS context // cudaError_t cudaStat; // cudaMalloc status // cublasStatus_t stat; // CUBLAS functions status cublasHandle_t handle; // CUBLAS context //============================================================================ // III) Allocate memory on the device double *d_Titer, *d_mat1, *d_mat2, *d_mat3; cudaMalloc(&d_Titer, nstate_sq*sizeof(double)); cudaMalloc(&d_mat1, nstate_sq*sizeof(double)); cudaMalloc(&d_mat2, nstate_sq*sizeof(double)); cudaMalloc(&d_mat3, nstate_sq*sizeof(double)); cublasCreate(&handle); // initialize CUBLAS context //============================================================================ // IV) Schulz iteration //-------------------------------------------------------------------- // A) Initialize scr_mat1 and Titer on host // scr_mat1 = S/2 for(int i=0;i<nstate_sq;i++){scr_mat1[i] = S[i]/2.0;} // Titer = I = unit matrix memset(Titer,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Titer[ind] = 1.0;} //-------------------------------------------------------------------- // B) Initailize d_mat1 and d_Titer on device cublasSetMatrix(nstate, nstate, sizeof(double), scr_mat1, nstate, d_mat1, nstate); cublasSetMatrix(nstate, nstate, sizeof(double), Titer, nstate, d_Titer, nstate); //cublasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); //-------------------------------------------------------------------- // C) Iterate int iter = 0; //double tol_now = 1.0; while (iter < 4){ //(tol_now > 1.0e-15 && iter<10){ iter++; //-------------------------------- // scr_mat2 = 3*I - Titer*scr_mat1 int itransp = 0; int inorm = 1; double alpha = -1.0; double beta = 1.0; memset(scr_mat2,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;scr_mat2[ind]=3.0;} cublasSetMatrix(nstate, nstate, sizeof(double), scr_mat2, nstate, d_mat2, nstate); device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // scr_mat1 = 0.5*scr_mat1*scr_mat2 = 0.5*scr_mat3*scr_mat2 alpha = 0.5; beta = 0.0; cudaMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice); device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // Titer = 0.5*scr_mat2*Titer = 0.5*scr_mat2*scr_mat3 alpha = 0.5; beta = 0.0; cudaMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice); device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer, &nstate,&nstate,&nstate,&nstate,&alpha,&beta,handle); //-------------------------------- // tolerence check // cublasGetMatrix(nstate, nstate, sizeof(double), d_mat3, nstate, scr_mat3, nstate); // cublasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); // tol_now = 0.0; // for(int i=0;i<nstate_sq;i++){ // double tmp=scr_mat3[i]-Titer[i]; // tol_now += tmp*tmp; // }//endfor // tol_now /= ((double)nstate_sq); // tol_now = sqrt(tol_now); //PRINTF("iter %d : tol %g\n",iter,tol_now); // PRINTF("iter %d : Skipping tolerence check, guessing 4 iterations should be done\n", iter); }//endwhile //if(tol_now>1.0e-15){ // PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); // PRINTF("Iterative computation of S^{-1/2} failed\n"); // PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); // EXIT(1); //}//endif /*==========================================================================*/ // V) Copy the result back to the host cublasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); /*==========================================================================*/ // VI) Clean up device cudaFree(d_Titer); cudaFree(d_mat1); cudaFree(d_mat2); cudaFree(d_mat3); cublasDestroy(handle); // VII) Clean up host delete [] scr_mat1; delete [] scr_mat2; delete [] scr_mat3; cputime(&cpu2); // PRINTF("nstate %d : cpu time iter : %g\n\n",nstate,cpu2-cpu1); return cpu2-cpu1; /*==========================================================================*/ }//end routine /*==========================================================================*/ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* subroutine to time processes */ /*==========================================================================*/ void cputime(double *time) /*==========================================================================*/ { int itime; static double to=0.,tn=0.; itime = clock(); tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C); *time = tn; if(tn >= 0 && to >= 0){*time=tn;} if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;} if(tn >= 0 && to < 0){*time=tn+MAXTIME;} if(tn < 0 && to < 0){*time=MAXTIME+tn;} to = tn; } /*==========================================================================*/
69dbca0ed4d718846f130fde27f0fb81113a665c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <rocblas.h> #include <hip/hip_runtime_api.h> #include "common.h" const double one = 1.0; const double minus_one = -1.0; /// /// @brief Forms a LU decomposition in a scalar manner. /// /// @param[in] matrix size /// @param[in] ldA leading dimension /// @param[inout] A in: matrix, out: LU decomposition /// __global__ void simple_lu(int n, int ldA, double *A) { for (int i = 0; i < n; i++) { for (int j = i+1; j < n; j++) { A[i*ldA+j] /= A[i*ldA+i]; for (int k = i+1; k < n; k++) A[k*ldA+j] -= A[i*ldA+j] * A[k*ldA+i]; } } } /// /// @brief Forms a LU decomposition in a blocked manner. /// /// @param[in] handle cuBLAS handle /// @param[in] block_size block size /// @param[in] n matrix dimension /// @param[in] ldA leading dimension /// @param[inout] A in: matrix, out: LU decomposition /// void blocked_lu( hipblasHandle_t handle, int block_size, int n, int ldA, double *A) { int block_count = DIVCEIL(n, block_size); // allocate and fill an array that stores the block pointers double ***blocks = (double ***) malloc(block_count*sizeof(double**)); for (int i = 0; i < block_count; i++) { blocks[i] = (double **) malloc(block_count*sizeof(double*)); for (int j = 0; j < block_count; j++) blocks[i][j] = A+(j*ldA+i)*block_size; } // // iterate through the diagonal blocks // // +--+--+--+--+ // | 0| | | | // +--+--+--+--+ // | | 1| | | // +--+--+--+--+ // | | | 2| | // +--+--+--+--+ // | | | | 3| // +--+--+--+--+ // for (int i = 0; i < block_count; i++) { // calculate diagonal block size int dsize = min(block_size, n-i*block_size); // calculate trailing matrix size int tsize = n-(i+1)*block_size; // // compute the LU decomposition of the diagonal block // // +--+--+--+--+ // | | | | | // +--+--+--+--+ ## - process (read-write) // | |##| | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // hipLaunchKernelGGL(( simple_lu), dim3(1),dim3(1), 0, 0, dsize, ldA, blocks[i][i]); if (0 < tsize) { // // blocks[i][i+1:] <- L1(blocks[i][i]) \ blocks[i][i+1:] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | |rr|##|##| ## - process (read-write) // +--+--+--+--+ rr - read // | | | | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // CHECK_CUBLAS_ERROR(hipblasDtrsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT, dsize, tsize, &one, blocks[i][i], ldA, blocks[i][i+1], ldA)); // // blocks[i+1:][i] <- U(blocks[i][i]) / blocks[i+1:][i] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | |rr| | | ## - process (read-write) // +--+--+--+--+ rr - read // | |##| | | // +--+--+--+--+ // | |##| | | // +--+--+--+--+ // CHECK_CUBLAS_ERROR(hipblasDtrsm(handle, HIPBLAS_SIDE_RIGHT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, tsize, dsize, &one, blocks[i][i], ldA, blocks[i+1][i], ldA)); // // blocks[i+1:][i+1:] <- blocks[i+1:][i+1:] - // blocks[i+1:][i] * blocks[i][i+1:] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | | |rr|rr| ## - process (read-write) // +--+--+--+--+ rr - read // | |rr|##|##| // +--+--+--+--+ // | |rr|##|##| // +--+--+--+--+ // CHECK_CUBLAS_ERROR(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, tsize, tsize, dsize, &minus_one, blocks[i+1][i], ldA, blocks[i][i+1], ldA, &one, blocks[i+1][i+1], ldA)); } } // free allocated resources for (int i = 0; i < block_count; i++) free(blocks[i]); free(blocks); } int main(int argc, char **argv) { // // check arguments // if (argc != 3) { fprintf(stderr, "[error] Incorrect arguments. Use %s (n) (block size)\n", argv[0]); return EXIT_FAILURE; } int n = atoi(argv[1]); if (n < 1) { fprintf(stderr, "[error] Invalid matrix dimension.\n"); return EXIT_FAILURE; } int block_size = atoi(argv[2]); if (block_size < 2) { fprintf(stderr, "[error] Invalid block size.\n"); return EXIT_FAILURE; } // // Initialize matrix A and store a duplicate to matrix B. Matrix C is for // validation. // srand(time(NULL)); hipblasHandle_t handle; CHECK_CUBLAS_ERROR(hipblasCreate(&handle)); double *A; int ldA = DIVCEIL(n, 32)*32; // align to 256 bytes CHECK_CUDA_ERROR(hipMallocManaged(&A, n*ldA*sizeof(double))); int ldB, ldC; ldB = ldC = DIVCEIL(n, 8)*8; // align to 64 bytes double *B = (double *) aligned_alloc(8, n*ldB*sizeof(double)); double *C = (double *) aligned_alloc(8, n*ldC*sizeof(double)); if (B == NULL || C == NULL) { fprintf(stderr, "[error] Failed to allocate memory.\n"); return EXIT_FAILURE; } // A <- random diagonally dominant matrix for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) A[i*ldA+j] = B[i*ldB+j] = 2.0*rand()/RAND_MAX - 1.0; A[i*ldA+i] = B[i*ldB+i] = 1.0*rand()/RAND_MAX + n; } // // compute // struct timespec ts_start; clock_gettime(CLOCK_MONOTONIC, &ts_start); hipProfilerStart(); // A <- (L,U) blocked_lu(handle, block_size, n, ldA, A); CHECK_CUDA_ERROR(hipDeviceSynchronize()); hipProfilerStop(); struct timespec ts_stop; clock_gettime(CLOCK_MONOTONIC, &ts_stop); printf("Time = %f s\n", ts_stop.tv_sec - ts_start.tv_sec + 1.0E-9*(ts_stop.tv_nsec - ts_start.tv_nsec)); // C <- L * U mul_lu(n, ldA, ldC, A, C); // // validate // // C <- L * U - B for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) C[i*ldC+j] -= B[i*ldB+j]; // compute || C ||_F / || B ||_F = || L * U - B ||_F / || B ||_F double residual = dlange_("Frobenius", &n, &n, C, &ldC, NULL) / dlange_("Frobenius", &n, &n, B, &ldB, NULL); printf("Residual = %E\n", residual); int ret = EXIT_SUCCESS; if (1.0E-12 < residual) { fprintf(stderr, "The residual is too large.\n"); ret = EXIT_FAILURE; } // // cleanup // CHECK_CUBLAS_ERROR(hipblasDestroy(handle)); CHECK_CUDA_ERROR(hipFree(A)); free(B); free(C); return ret; }
69dbca0ed4d718846f130fde27f0fb81113a665c.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cublas_v2.h> #include <cuda_profiler_api.h> #include "common.h" const double one = 1.0; const double minus_one = -1.0; /// /// @brief Forms a LU decomposition in a scalar manner. /// /// @param[in] matrix size /// @param[in] ldA leading dimension /// @param[inout] A in: matrix, out: LU decomposition /// __global__ void simple_lu(int n, int ldA, double *A) { for (int i = 0; i < n; i++) { for (int j = i+1; j < n; j++) { A[i*ldA+j] /= A[i*ldA+i]; for (int k = i+1; k < n; k++) A[k*ldA+j] -= A[i*ldA+j] * A[k*ldA+i]; } } } /// /// @brief Forms a LU decomposition in a blocked manner. /// /// @param[in] handle cuBLAS handle /// @param[in] block_size block size /// @param[in] n matrix dimension /// @param[in] ldA leading dimension /// @param[inout] A in: matrix, out: LU decomposition /// void blocked_lu( cublasHandle_t handle, int block_size, int n, int ldA, double *A) { int block_count = DIVCEIL(n, block_size); // allocate and fill an array that stores the block pointers double ***blocks = (double ***) malloc(block_count*sizeof(double**)); for (int i = 0; i < block_count; i++) { blocks[i] = (double **) malloc(block_count*sizeof(double*)); for (int j = 0; j < block_count; j++) blocks[i][j] = A+(j*ldA+i)*block_size; } // // iterate through the diagonal blocks // // +--+--+--+--+ // | 0| | | | // +--+--+--+--+ // | | 1| | | // +--+--+--+--+ // | | | 2| | // +--+--+--+--+ // | | | | 3| // +--+--+--+--+ // for (int i = 0; i < block_count; i++) { // calculate diagonal block size int dsize = min(block_size, n-i*block_size); // calculate trailing matrix size int tsize = n-(i+1)*block_size; // // compute the LU decomposition of the diagonal block // // +--+--+--+--+ // | | | | | // +--+--+--+--+ ## - process (read-write) // | |##| | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // simple_lu<<<1,1>>>(dsize, ldA, blocks[i][i]); if (0 < tsize) { // // blocks[i][i+1:] <- L1(blocks[i][i]) \ blocks[i][i+1:] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | |rr|##|##| ## - process (read-write) // +--+--+--+--+ rr - read // | | | | | // +--+--+--+--+ // | | | | | // +--+--+--+--+ // CHECK_CUBLAS_ERROR(cublasDtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, dsize, tsize, &one, blocks[i][i], ldA, blocks[i][i+1], ldA)); // // blocks[i+1:][i] <- U(blocks[i][i]) / blocks[i+1:][i] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | |rr| | | ## - process (read-write) // +--+--+--+--+ rr - read // | |##| | | // +--+--+--+--+ // | |##| | | // +--+--+--+--+ // CHECK_CUBLAS_ERROR(cublasDtrsm(handle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, tsize, dsize, &one, blocks[i][i], ldA, blocks[i+1][i], ldA)); // // blocks[i+1:][i+1:] <- blocks[i+1:][i+1:] - // blocks[i+1:][i] * blocks[i][i+1:] // // +--+--+--+--+ // | | | | | // +--+--+--+--+ // | | |rr|rr| ## - process (read-write) // +--+--+--+--+ rr - read // | |rr|##|##| // +--+--+--+--+ // | |rr|##|##| // +--+--+--+--+ // CHECK_CUBLAS_ERROR(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, tsize, tsize, dsize, &minus_one, blocks[i+1][i], ldA, blocks[i][i+1], ldA, &one, blocks[i+1][i+1], ldA)); } } // free allocated resources for (int i = 0; i < block_count; i++) free(blocks[i]); free(blocks); } int main(int argc, char **argv) { // // check arguments // if (argc != 3) { fprintf(stderr, "[error] Incorrect arguments. Use %s (n) (block size)\n", argv[0]); return EXIT_FAILURE; } int n = atoi(argv[1]); if (n < 1) { fprintf(stderr, "[error] Invalid matrix dimension.\n"); return EXIT_FAILURE; } int block_size = atoi(argv[2]); if (block_size < 2) { fprintf(stderr, "[error] Invalid block size.\n"); return EXIT_FAILURE; } // // Initialize matrix A and store a duplicate to matrix B. Matrix C is for // validation. // srand(time(NULL)); cublasHandle_t handle; CHECK_CUBLAS_ERROR(cublasCreate(&handle)); double *A; int ldA = DIVCEIL(n, 32)*32; // align to 256 bytes CHECK_CUDA_ERROR(cudaMallocManaged(&A, n*ldA*sizeof(double))); int ldB, ldC; ldB = ldC = DIVCEIL(n, 8)*8; // align to 64 bytes double *B = (double *) aligned_alloc(8, n*ldB*sizeof(double)); double *C = (double *) aligned_alloc(8, n*ldC*sizeof(double)); if (B == NULL || C == NULL) { fprintf(stderr, "[error] Failed to allocate memory.\n"); return EXIT_FAILURE; } // A <- random diagonally dominant matrix for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) A[i*ldA+j] = B[i*ldB+j] = 2.0*rand()/RAND_MAX - 1.0; A[i*ldA+i] = B[i*ldB+i] = 1.0*rand()/RAND_MAX + n; } // // compute // struct timespec ts_start; clock_gettime(CLOCK_MONOTONIC, &ts_start); cudaProfilerStart(); // A <- (L,U) blocked_lu(handle, block_size, n, ldA, A); CHECK_CUDA_ERROR(cudaDeviceSynchronize()); cudaProfilerStop(); struct timespec ts_stop; clock_gettime(CLOCK_MONOTONIC, &ts_stop); printf("Time = %f s\n", ts_stop.tv_sec - ts_start.tv_sec + 1.0E-9*(ts_stop.tv_nsec - ts_start.tv_nsec)); // C <- L * U mul_lu(n, ldA, ldC, A, C); // // validate // // C <- L * U - B for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) C[i*ldC+j] -= B[i*ldB+j]; // compute || C ||_F / || B ||_F = || L * U - B ||_F / || B ||_F double residual = dlange_("Frobenius", &n, &n, C, &ldC, NULL) / dlange_("Frobenius", &n, &n, B, &ldB, NULL); printf("Residual = %E\n", residual); int ret = EXIT_SUCCESS; if (1.0E-12 < residual) { fprintf(stderr, "The residual is too large.\n"); ret = EXIT_FAILURE; } // // cleanup // CHECK_CUBLAS_ERROR(cublasDestroy(handle)); CHECK_CUDA_ERROR(cudaFree(A)); free(B); free(C); return ret; }
9c61b889aacbf05326f9c3ef813d76169865ede2.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************************************ * Implementing Singular Value Decomposition on GPU using CUDA using algorithm * * given in IPDPS '09 paper "Singular Value Decomposition on GPU using CUDA" * * * * Copyright (c) 2009 International Institute of Information Technology, Hyderabad. * * All rights reserved. * * * * Permission to use, copy, modify and distribute this software and its documentation for * * educational purpose is hereby granted without fee, provided that the above copyright * * notice and this permission notice appear in all copies of this software and that you do * * not sell the software. * * * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR * * OTHERWISE. * * * * Created by Sheetal Lahabar. * * Tested on CUDA 2.0 * ************************************************************************************************/ #ifndef _EXAMPLE_CU_ #define _EXAMPLE_CU_ #include "example.h" //Include the below file in your main program #include "cusvd.cu" float *initialize(int ind) { int i = 0, j = 0, l = 0; float *temp = (float*)malloc(sizeof(float) * ind * ind); for(i=0 ; i < ind ; i++) { for(j=0 ; j < ind ; j++) { if(i==j) temp[l++] = 1; else temp[l++] = 0; } } return temp; } int main(int argc, char** argv) { bool result; double *Sigma; //M>=N and M and N are a multiple of 32 int M = 512, N = 512; float *A, *U, *VT, *d_A, *d_U, *d_VT; //Step 1 - Read A in column major order A = (float*)malloc(sizeof(float) * M * N); FILE *fp = fopen("data", "r"); for(i=0 ; i < M * N ; i++) { fscanf(fp,"%f", &A[i]); } fclose(fp); //Step 2 Sigma = (double*)malloc(sizeof(double)*N); //Step 3 CUT_DEVICE_INIT(argc, argv); status = hipblasInit(); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error in initialization"); return EXIT_FAILURE; } //Step 4 status = CUDA_SAFE_CALL(hipblasAlloc(M*N*sizeof(float), sizeof(float), (void**)&d_A)); status = CUDA_SAFE_CALL(hipblasAlloc(M*M*sizeof(float), sizeof(float), (void**)&d_U)); status = CUDA_SAFE_CALL(hipblasAlloc(N*N*sizeof(float), sizeof(float), (void**)&d_VT)); //Step 5 U = initialize(M); VT = initialize(N); status = CUDA_SAFE_CALL(hipblasSetMatrix(M, N, sizeof(float), A, M, d_A, M)); status = CUDA_SAFE_CALL(hipblasSetMatrix(M, N, sizeof(float), U, M, d_U, M)); status = CUDA_SAFE_CALL(hipblasSetMatrix(M, N, sizeof(float), VT, M, d_VT, M)); //Step 6 timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); result = cusvd(M, N, d_A, d_U, d_VT, Sigma); CUT_SAFE_CALL(cutStopTimer(timer)); printf("SVD processing time: %f (ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); /* printf("Copy and print VT matrix\n"); CUDA_SAFE_CALL(hipMemcpy(VT, d_VT, sizeof(float)*N*N, hipMemcpyDeviceToHost)); for(int i=0; i < N; i++) for(int j=0; j < N; j++) printf("%f\n", check2[i*N+j]); */ //Step 7 free(A); CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_U)); CUDA_SAFE_CALL(hipFree(d_VT)); CUT_EXIT(argc, argv); return 0; } #endif
9c61b889aacbf05326f9c3ef813d76169865ede2.cu
/************************************************************************************************ * Implementing Singular Value Decomposition on GPU using CUDA using algorithm * * given in IPDPS '09 paper "Singular Value Decomposition on GPU using CUDA" * * * * Copyright (c) 2009 International Institute of Information Technology, Hyderabad. * * All rights reserved. * * * * Permission to use, copy, modify and distribute this software and its documentation for * * educational purpose is hereby granted without fee, provided that the above copyright * * notice and this permission notice appear in all copies of this software and that you do * * not sell the software. * * * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR * * OTHERWISE. * * * * Created by Sheetal Lahabar. * * Tested on CUDA 2.0 * ************************************************************************************************/ #ifndef _EXAMPLE_CU_ #define _EXAMPLE_CU_ #include "example.h" //Include the below file in your main program #include "cusvd.cu" float *initialize(int ind) { int i = 0, j = 0, l = 0; float *temp = (float*)malloc(sizeof(float) * ind * ind); for(i=0 ; i < ind ; i++) { for(j=0 ; j < ind ; j++) { if(i==j) temp[l++] = 1; else temp[l++] = 0; } } return temp; } int main(int argc, char** argv) { bool result; double *Sigma; //M>=N and M and N are a multiple of 32 int M = 512, N = 512; float *A, *U, *VT, *d_A, *d_U, *d_VT; //Step 1 - Read A in column major order A = (float*)malloc(sizeof(float) * M * N); FILE *fp = fopen("data", "r"); for(i=0 ; i < M * N ; i++) { fscanf(fp,"%f", &A[i]); } fclose(fp); //Step 2 Sigma = (double*)malloc(sizeof(double)*N); //Step 3 CUT_DEVICE_INIT(argc, argv); status = cublasInit(); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "Error in initialization"); return EXIT_FAILURE; } //Step 4 status = CUDA_SAFE_CALL(cublasAlloc(M*N*sizeof(float), sizeof(float), (void**)&d_A)); status = CUDA_SAFE_CALL(cublasAlloc(M*M*sizeof(float), sizeof(float), (void**)&d_U)); status = CUDA_SAFE_CALL(cublasAlloc(N*N*sizeof(float), sizeof(float), (void**)&d_VT)); //Step 5 U = initialize(M); VT = initialize(N); status = CUDA_SAFE_CALL(cublasSetMatrix(M, N, sizeof(float), A, M, d_A, M)); status = CUDA_SAFE_CALL(cublasSetMatrix(M, N, sizeof(float), U, M, d_U, M)); status = CUDA_SAFE_CALL(cublasSetMatrix(M, N, sizeof(float), VT, M, d_VT, M)); //Step 6 timer = 0; CUT_SAFE_CALL(cutCreateTimer(&timer)); CUT_SAFE_CALL(cutStartTimer(timer)); result = cusvd(M, N, d_A, d_U, d_VT, Sigma); CUT_SAFE_CALL(cutStopTimer(timer)); printf("SVD processing time: %f (ms)\n", cutGetTimerValue(timer)); CUT_SAFE_CALL(cutDeleteTimer(timer)); /* printf("Copy and print VT matrix\n"); CUDA_SAFE_CALL(cudaMemcpy(VT, d_VT, sizeof(float)*N*N, cudaMemcpyDeviceToHost)); for(int i=0; i < N; i++) for(int j=0; j < N; j++) printf("%f\n", check2[i*N+j]); */ //Step 7 free(A); CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_U)); CUDA_SAFE_CALL(cudaFree(d_VT)); CUT_EXIT(argc, argv); return 0; } #endif
567229fcfd5670e0b10af3fe53a71513e7851811.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <assert.h> #include <THH/THH.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 #include <sys/time.h> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //extern THCState * state; template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_parallel_cuda_abs(x - x1) + line_variance_parallel_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } //scalar_t l1 = k1 / line_variance_parallel_cuda_divide_non_zero(k3); //scalar_t l2 = k2 / line_variance_parallel_cuda_divide_non_zero(k3); scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); ret[0] = 0; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_parallel_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_parallel_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to int total_idx = bidx * n_pixel * n_grid + pixel_idx * n_grid; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; for (int grididx = 0; grididx < n_grid; grididx++){ ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign == 0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } scalar_t variance = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; for (int grididx = 0; grididx < n_grid; grididx++){ int in_sign = 0; if(buffer_bxnxk[bidx][pixel_idx][grididx] == 1){ in_sign = 1; } buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / (sum_exp + 1e-15); w = buffer_bxnxk[bidx][pixel_idx][grididx]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; reconstruct_bxnxd[bidx][pixel_idx][d] += w * grid_f; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; } variance = variance + w * difference; if(in_sign == 1){ //hard variance for upsample buffer_bxnxk[bidx][pixel_idx][grididx] = difference; } else{ buffer_bxnxk[bidx][pixel_idx][grididx] = 0; } } variance_bxn[bidx][pixel_idx] = variance; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_calc_buffer( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; scalar_t ret[3] = {0}; ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0){ min_distance = min_distance / sigma; } else{ min_distance = - min_distance / sigma; } buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_sum( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; for (int grididx = 0; grididx < n_grid; grididx++){ min_distance = buffer_bxnxk[bidx][pixel_idx][grididx]; // if (find_sign == 1 && min_distance > 0){ // min_distance = -min_distance; // } // if (min_distance > 0){ // find_sign = 1; // } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_exp; } #define BLOCK_SIZE 1024 #define WARP_SIZE 32 template <typename scalar_t> __inline__ __device__ scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val += __shfl_down(val, offset); } return val; } template <typename scalar_t> __global__ void blockReduceSum( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ max_dist_bxn, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ scalar_t max_dist = max_dist_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] = expf(buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] - max_dist); val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __inline__ __device__ scalar_t warpReduceMax(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val = max(val, __shfl_down(val, offset)); } return val; } template <typename scalar_t> __global__ void blockReduceMax( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = -MAX_DIS; scalar_t val = -MAX_DIS; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceMax(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : -MAX_DIS; if (wid==0){ val = warpReduceMax(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t max_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ if(buffer_bxnx4[base_idx + t] > max_v){ max_v = buffer_bxnx4[base_idx + t]; } } buffer_bxn[bidx * n_pixel + pixel_idx] = max_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t sum_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnx4[base_idx + t]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_final_1( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxn, scalar_t* __restrict__ reconstruct_buffer_bxnxdx4, int bnum, int n_pixel, int n_grid, int d_fea, int fea_idx, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); // N_allthread = n_pixel * 1 * BLOCK_SIZE * split_size int block_idx = blockIdx.x; int split_idx = block_idx % split_size; int grididx = split_idx * BLOCK_SIZE + tid; int bidx = 0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; scalar_t sum_exp = 0.0; // We first calculate the difference for the buffer: if (grididx < n_grid && fea_idx == 0 && pixel_idx < n_pixel){ // first run calculate the buffer (soft assignment sum_exp = buffer_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / (sum_exp + 1e-15); } // shared add for reconstruct if (tid == 0 && pixel_idx < n_pixel && fea_idx < d_fea) reconstruct_buffer_bxnxdx4[pixel_idx * d_fea * split_size + fea_idx * split_size + split_idx] = 0; scalar_t val = 0.0; if (pixel_idx < n_pixel && grididx < n_grid){ w = buffer_bxnxk[bidx][pixel_idx][grididx]; grid_f = grid_fea_bxkxd[bidx][grididx][fea_idx]; val = w * grid_f; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0.0; if (wid==0){ val = warpReduceSum(val); // Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel && fea_idx < d_fea){ reconstruct_buffer_bxnxdx4[pixel_idx * d_fea * split_size + fea_idx * split_size + split_idx] = val; } } if (grididx < n_grid && fea_idx == (d_fea - 1) && pixel_idx < n_pixel){ w = buffer_bxnxk[bidx][pixel_idx][grididx]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; } buffer_bxnxk[bidx][pixel_idx][grididx] = w * difference; // this should be changed at the last step, other wise } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step_reconstruct( scalar_t* __restrict__ buffer_bxnxdx4, scalar_t* __restrict__ buffer_bxnxd, int bnum, int n_pixel, int n_grid, int d_fea, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int fea_idx = presentthread % d_fea; int pixel_idx = (presentthread - fea_idx) / d_fea; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || fea_idx >= d_fea) { return; } int base_idx = bidx * n_pixel * d_fea * split_size + pixel_idx * d_fea * split_size + fea_idx * split_size; scalar_t sum_v = buffer_bxnxdx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnxdx4[base_idx + t]; } buffer_bxnxd[bidx * n_pixel * d_fea + pixel_idx * d_fea + fea_idx] = sum_v; } void line_variance_parallel_cuda_forward_batch(at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, at::Tensor variance_bxn, float sigma, at::Tensor reconstruct_bxnxd, at::Tensor buffer_bxnxk, at::Tensor buffer_bxn, at::Tensor buffer_bxnx4, at::Tensor buffer_bxnxdx4, int split_size){ int bnum = grid_bxkx3x2.size(0); int n_grid = grid_bxkx3x2.size(1); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); // struct timeval t1, t2; // gettimeofday(&t1, 0); // for fxbxhxw image size // calculate the initi buffer const int threadnum = BLOCK_SIZE; const int totalthread_1 = bnum * n_pixel * n_grid; const int blocknum_1 = totalthread_1 / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks_1(blocknum_1, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_calc_buffer", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_calc_buffer<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), reconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); // find the maximum value in the buffer const int totalthread_3 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_3 = totalthread_3 / threadnum + 1; const dim3 blocks_3(blocknum_3, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( blockReduceMax<scalar_t>), dim3(blocks_3), dim3(threads), 0, 0, buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_4 = bnum * n_pixel; const int blocknum_4 = totalthread_4 / threadnum + 1; const dim3 blocks_4(blocknum_4, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step<scalar_t>), dim3(blocks_4), dim3(threads), 0, 0, buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); // find the sum of the buffer const int totalthread_5 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_5 = totalthread_5 / threadnum + 1; const dim3 blocks_5(blocknum_5, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( blockReduceSum<scalar_t>), dim3(blocks_5), dim3(threads), 0, 0, buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_6 = bnum * n_pixel; const int blocknum_6 = totalthread_6 / threadnum + 1; const dim3 blocks_6(blocknum_6, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t>), dim3(blocks_6), dim3(threads), 0, 0, buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_7 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_7 = totalthread_7 / threadnum + 1; const dim3 blocks_7(blocknum_7, 1, 1); for (int fea_idx = 0; fea_idx < d_fea; fea_idx++){ AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_final_1<scalar_t>), dim3(blocks_7), dim3(threads), 0, 0, img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxn.data<scalar_t>(), buffer_bxnxdx4.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, fea_idx, split_size); })); } const int totalthread_8 = bnum * n_pixel * d_fea; const int blocknum_8 = totalthread_8 / threadnum + 1; const dim3 blocks_8(blocknum_8, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step_reconstruct<scalar_t>), dim3(blocks_8), dim3(threads), 0, 0, buffer_bxnxdx4.data<scalar_t>(), reconstruct_bxnxd.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, split_size); })); }
567229fcfd5670e0b10af3fe53a71513e7851811.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <assert.h> #include <THC/THC.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 #include <sys/time.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //extern THCState * state; template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_parallel_cuda_abs(x - x1) + line_variance_parallel_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } //scalar_t l1 = k1 / line_variance_parallel_cuda_divide_non_zero(k3); //scalar_t l2 = k2 / line_variance_parallel_cuda_divide_non_zero(k3); scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); ret[0] = 0; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_parallel_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_parallel_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to int total_idx = bidx * n_pixel * n_grid + pixel_idx * n_grid; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; for (int grididx = 0; grididx < n_grid; grididx++){ ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign == 0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } scalar_t variance = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; for (int grididx = 0; grididx < n_grid; grididx++){ int in_sign = 0; if(buffer_bxnxk[bidx][pixel_idx][grididx] == 1){ in_sign = 1; } buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / (sum_exp + 1e-15); w = buffer_bxnxk[bidx][pixel_idx][grididx]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; reconstruct_bxnxd[bidx][pixel_idx][d] += w * grid_f; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; } variance = variance + w * difference; if(in_sign == 1){ //hard variance for upsample buffer_bxnxk[bidx][pixel_idx][grididx] = difference; } else{ buffer_bxnxk[bidx][pixel_idx][grididx] = 0; } } variance_bxn[bidx][pixel_idx] = variance; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_calc_buffer( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) { return; } ///////////////////////////////////////////////////////////////// // which pixel it belongs to scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t min_distance = 0.0; scalar_t ax, ay, bx, by, cx, cy; scalar_t condition; scalar_t ret[3] = {0}; ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0){ min_distance = min_distance / sigma; } else{ min_distance = - min_distance / sigma; } buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_sum( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> reconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int d_fea, float sigma ) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } scalar_t min_distance = 0.0; scalar_t find_sign = 0.0; scalar_t sum_exp = 0.0; scalar_t max_dist = -MAX_DIS; for (int grididx = 0; grididx < n_grid; grididx++){ min_distance = buffer_bxnxk[bidx][pixel_idx][grididx]; // if (find_sign == 1 && min_distance > 0){ // min_distance = -min_distance; // } // if (min_distance > 0){ // find_sign = 1; // } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_exp; } #define BLOCK_SIZE 1024 #define WARP_SIZE 32 template <typename scalar_t> __inline__ __device__ scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val += __shfl_down(val, offset); } return val; } template <typename scalar_t> __global__ void blockReduceSum( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ max_dist_bxn, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ scalar_t max_dist = max_dist_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] = expf(buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] - max_dist); val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __inline__ __device__ scalar_t warpReduceMax(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val = max(val, __shfl_down(val, offset)); } return val; } template <typename scalar_t> __global__ void blockReduceMax( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = -MAX_DIS; scalar_t val = -MAX_DIS; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceMax(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : -MAX_DIS; if (wid==0){ val = warpReduceMax(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t max_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ if(buffer_bxnx4[base_idx + t] > max_v){ max_v = buffer_bxnx4[base_idx + t]; } } buffer_bxn[bidx * n_pixel + pixel_idx] = max_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t sum_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnx4[base_idx + t]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_final_1( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> variance_bxn, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxn, scalar_t* __restrict__ reconstruct_buffer_bxnxdx4, int bnum, int n_pixel, int n_grid, int d_fea, int fea_idx, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); // N_allthread = n_pixel * 1 * BLOCK_SIZE * split_size int block_idx = blockIdx.x; int split_idx = block_idx % split_size; int grididx = split_idx * BLOCK_SIZE + tid; int bidx = 0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; scalar_t w = 0.0; scalar_t difference = 0.0; scalar_t sum_exp = 0.0; // We first calculate the difference for the buffer: if (grididx < n_grid && fea_idx == 0 && pixel_idx < n_pixel){ // first run calculate the buffer (soft assignment sum_exp = buffer_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / (sum_exp + 1e-15); } // shared add for reconstruct if (tid == 0 && pixel_idx < n_pixel && fea_idx < d_fea) reconstruct_buffer_bxnxdx4[pixel_idx * d_fea * split_size + fea_idx * split_size + split_idx] = 0; scalar_t val = 0.0; if (pixel_idx < n_pixel && grididx < n_grid){ w = buffer_bxnxk[bidx][pixel_idx][grididx]; grid_f = grid_fea_bxkxd[bidx][grididx][fea_idx]; val = w * grid_f; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0.0; if (wid==0){ val = warpReduceSum(val); // Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel && fea_idx < d_fea){ reconstruct_buffer_bxnxdx4[pixel_idx * d_fea * split_size + fea_idx * split_size + split_idx] = val; } } if (grididx < n_grid && fea_idx == (d_fea - 1) && pixel_idx < n_pixel){ w = buffer_bxnxk[bidx][pixel_idx][grididx]; difference = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; } buffer_bxnxk[bidx][pixel_idx][grididx] = w * difference; // this should be changed at the last step, other wise } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step_reconstruct( scalar_t* __restrict__ buffer_bxnxdx4, scalar_t* __restrict__ buffer_bxnxd, int bnum, int n_pixel, int n_grid, int d_fea, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int fea_idx = presentthread % d_fea; int pixel_idx = (presentthread - fea_idx) / d_fea; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || fea_idx >= d_fea) { return; } int base_idx = bidx * n_pixel * d_fea * split_size + pixel_idx * d_fea * split_size + fea_idx * split_size; scalar_t sum_v = buffer_bxnxdx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnxdx4[base_idx + t]; } buffer_bxnxd[bidx * n_pixel * d_fea + pixel_idx * d_fea + fea_idx] = sum_v; } void line_variance_parallel_cuda_forward_batch(at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, at::Tensor variance_bxn, float sigma, at::Tensor reconstruct_bxnxd, at::Tensor buffer_bxnxk, at::Tensor buffer_bxn, at::Tensor buffer_bxnx4, at::Tensor buffer_bxnxdx4, int split_size){ int bnum = grid_bxkx3x2.size(0); int n_grid = grid_bxkx3x2.size(1); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); // struct timeval t1, t2; // gettimeofday(&t1, 0); // for fxbxhxw image size // calculate the initi buffer const int threadnum = BLOCK_SIZE; const int totalthread_1 = bnum * n_pixel * n_grid; const int blocknum_1 = totalthread_1 / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks_1(blocknum_1, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_calc_buffer", ([&] { line_variance_parallel_cuda_forward_kernel_batch_calc_buffer<scalar_t><<<blocks_1, threads>>>( img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), reconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); // find the maximum value in the buffer const int totalthread_3 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_3 = totalthread_3 / threadnum + 1; const dim3 blocks_3(blocknum_3, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { blockReduceMax<scalar_t><<<blocks_3, threads>>>( buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_4 = bnum * n_pixel; const int blocknum_4 = totalthread_4 / threadnum + 1; const dim3 blocks_4(blocknum_4, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step<scalar_t><<<blocks_4, threads>>>( buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); // find the sum of the buffer const int totalthread_5 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_5 = totalthread_5 / threadnum + 1; const dim3 blocks_5(blocknum_5, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { blockReduceSum<scalar_t><<<blocks_5, threads>>>( buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_6 = bnum * n_pixel; const int blocknum_6 = totalthread_6 / threadnum + 1; const dim3 blocks_6(blocknum_6, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t><<<blocks_6, threads>>>( buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_7 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_7 = totalthread_7 / threadnum + 1; const dim3 blocks_7(blocknum_7, 1, 1); for (int fea_idx = 0; fea_idx < d_fea; fea_idx++){ AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_final_1<scalar_t><<<blocks_7, threads>>>( img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), variance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxn.data<scalar_t>(), buffer_bxnxdx4.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, fea_idx, split_size); })); } const int totalthread_8 = bnum * n_pixel * d_fea; const int blocknum_8 = totalthread_8 / threadnum + 1; const dim3 blocks_8(blocknum_8, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step_reconstruct<scalar_t><<<blocks_8, threads>>>( buffer_bxnxdx4.data<scalar_t>(), reconstruct_bxnxd.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, split_size); })); }
464ffbd8e5bd687a297a092ff30769da8e251f3f.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __GDD_SQRT_CU__ #define __GDD_SQRT_CU__ #include "common.hip" /* Computes the square root of the double-double number dd. NOTE: dd must be a non-negative number. */ __device__ gdd_real sqrt(const gdd_real &a) { if (is_zero(a)) return make_dd(0.0); //TODO: should make an error if (is_negative(a)) { //return _nan; return make_dd(0.0); } double x = 1.0 / sqrt(a.x); double ax = a.x * x; return dd_add(ax, (a - sqr(ax)).x * (x * 0.5)); //return a - sqr(ax); } #endif /* __GDD_SQRT_CU__ */
464ffbd8e5bd687a297a092ff30769da8e251f3f.cu
#ifndef __GDD_SQRT_CU__ #define __GDD_SQRT_CU__ #include "common.cu" /* Computes the square root of the double-double number dd. NOTE: dd must be a non-negative number. */ __device__ gdd_real sqrt(const gdd_real &a) { if (is_zero(a)) return make_dd(0.0); //TODO: should make an error if (is_negative(a)) { //return _nan; return make_dd(0.0); } double x = 1.0 / sqrt(a.x); double ax = a.x * x; return dd_add(ax, (a - sqr(ax)).x * (x * 0.5)); //return a - sqr(ax); } #endif /* __GDD_SQRT_CU__ */
8eb794c6be5ad1273d64ea94f8daf0fee208b405.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_cbrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(cbrt)(x[offset_x + gid * stride_x]); } }
8eb794c6be5ad1273d64ea94f8daf0fee208b405.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_cbrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(cbrt)(x[offset_x + gid * stride_x]); } }
50b57d13490fe04f406ee22d0543c5aa517b39c9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <hip/hip_runtime.h> #include <time.h> #include <sys/types.h> #include <sys/time.h> #define VERBOSE 1 __global__ void checkMatchOnDevice(char *fileBuffer, char* searchString, int* matchArray, int numBytes,size_t searchSize,int* matchStartArray, int* matchEndArray) { extern __shared__ int sdata[]; int idx = blockIdx.x*blockDim.x + threadIdx.x; int rangeStart; int rangeEnd; rangeStart = idx*numBytes; rangeEnd = rangeStart + numBytes; int i,j; int numMatches; int foundMatch; int firstMatch = 1; int firstMatchIndex = -1; int lastMatchIndex = -1; unsigned int tid = threadIdx.x; for(numMatches=0, i = rangeStart; i < rangeEnd; i++) { foundMatch = 1; for(j = 0; j < searchSize; j++) { int index = i+j; if(fileBuffer[index] != searchString[j]) { foundMatch = 0; break; } } if(foundMatch) { numMatches++; if(firstMatch) { firstMatchIndex = i; firstMatch = 0; } lastMatchIndex = i+searchSize; i+=searchSize-1; } } matchStartArray[idx] = firstMatchIndex; matchEndArray[idx] = lastMatchIndex; //matchArray[idx] = numMatches; sdata[tid] = numMatches; __syncthreads(); for(unsigned int s=1;s<blockDim.x;s *= 2){ int index = s*2*tid; if((index+s) < blockDim.x){ sdata[index] += sdata[index + s]; } __syncthreads(); } if(tid==0) matchArray[blockIdx.x]=sdata[0]; } __global__ void cumulateOnDevice(int* matchArray, int noOfThreads, int* outArray) { extern __shared__ int sdata2[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<noOfThreads){ sdata2[tid] = matchArray[i]; //__syncthreads(); for(unsigned int s=1;s<blockDim.x;s*=2){ int index = s*2*tid; __syncthreads(); if((index+s) < noOfThreads){ sdata2[index] += sdata2[index+s]; } // __syncthreads(); } if(tid == 0) matchArray[blockIdx.x] = sdata2[0]; } } int main(int argc, char *argv[]) { struct timeval cpuStart,cpuEnd; char* searchString = (char*)malloc(sizeof(char*)*80); char* fileBuffer = (char*)malloc(sizeof(char*)*10000000); int nBlocks; int threadsPerBlock; if(argc != 4) { printf("Usage: stringSearch \"Key\" numBlocks threadsPerBlock < inputFile\n"); exit(0); } else { searchString = argv[1]; nBlocks = atoi(argv[2]); threadsPerBlock = atoi(argv[3]); #ifdef VERBOSE printf("Search String: %s\n",searchString); #endif int ptr; for(ptr = 0; !feof(stdin);) { ptr+= fread(&(fileBuffer[ptr]),1,1,stdin); } char *deviceFileBuffer; // pointer to device memory char *deviceSearchBuffer; // pointer to device memory int *matchArray; int *outArray; int *hostMatchArray; int *hostMatchStartArray; int *hostMatchEndArray; int *matchStartArray; int *matchEndArray; int fileSize = ptr; //printf("FileSize: %d",strlen(fileBuffer)); size_t searchSize = strlen(searchString); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipDeviceSynchronize(); gettimeofday(&cpuStart, NULL); // allocate file buffer space on device hipMalloc((void **) &deviceFileBuffer, fileSize); // allocate search string space on device hipMalloc((void **) &deviceSearchBuffer, searchSize); // copy data from host to device hipMemcpy(deviceFileBuffer, fileBuffer, fileSize, hipMemcpyHostToDevice); hipMemcpy(deviceSearchBuffer, searchString, searchSize, hipMemcpyHostToDevice); // do calculation on device: // Part 1 of 2. Compute execution configuration size_t numThreads = nBlocks*threadsPerBlock; int numBytesPerThread = fileSize/numThreads; //Allocate match array space on device hipMalloc((void **) &matchArray, sizeof(int)*numThreads); hipMalloc((void **) &outArray, sizeof(int)*numThreads); hostMatchArray = (int*)malloc(sizeof(int)*numThreads); hipMalloc((void **) &matchStartArray, sizeof(int)*numThreads); hostMatchStartArray = (int*)malloc(sizeof(int)*numThreads); hipMalloc((void **) &matchEndArray, sizeof(int)*numThreads); hostMatchEndArray = (int*)malloc(sizeof(int)*numThreads); //Init array to 0 int i; for(i = 0; i < numThreads; i++) { hostMatchArray[i] = 0; hostMatchStartArray[i] = -1; hostMatchEndArray[i] = -1; } hipMemcpy(matchArray, hostMatchArray, numThreads, hipMemcpyHostToDevice); hipMemcpy(matchStartArray, hostMatchStartArray, numThreads, hipMemcpyHostToDevice); hipMemcpy(matchEndArray, hostMatchEndArray, numThreads, hipMemcpyHostToDevice); //printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread); // Part 2 of 2. Call call checkMatchOnDevice kernel hipEventRecord( start, 0 ); hipLaunchKernelGGL(( checkMatchOnDevice) , dim3(nBlocks), dim3(threadsPerBlock) , threadsPerBlock*sizeof(int), 0, deviceFileBuffer, deviceSearchBuffer, matchArray,numBytesPerThread,searchSize,matchStartArray,matchEndArray); int newNBlocks=nBlocks,newNThreads;//printf("\nNew Blocks:%d",nBlocks); hipDeviceSynchronize(); while(newNBlocks > 1){ newNThreads = newNBlocks; newNBlocks = (newNBlocks/threadsPerBlock)+1; hipLaunchKernelGGL(( cumulateOnDevice) , dim3(newNBlocks), dim3(threadsPerBlock) ,threadsPerBlock * sizeof(int), 0, matchArray,newNThreads,outArray); } hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); hipDeviceSynchronize(); // Retrieve result from device and store in host array hipMemcpy(hostMatchArray, matchArray, sizeof(int)*numThreads, hipMemcpyDeviceToHost); hipMemcpy(hostMatchStartArray, matchStartArray, sizeof(int)*numThreads, hipMemcpyDeviceToHost); hipMemcpy(hostMatchEndArray, matchEndArray, sizeof(int)*numThreads, hipMemcpyDeviceToHost); int total = 0; //for(i = 0; i < numThreads; i++) //{ //total += hostMatchArray[i]; //printf("%d)%d\n",i,hostMatchArray[i]); //} total = hostMatchArray [0]; //Overlap check, commented out for hw2 /* for(i = 0; i < numThreads; i++) { if(hostMatchEndArray[i] != -1 && hostMatchStartArray[i+1] != -1) { if(hostMatchEndArray[i] - hostMatchStartArray[i+1] < 0) total--; } //printf("%d)%d\n",i,hostMatchStartArray[i]); //printf("start:%d,end:%d\n",hostMatchStartArray[i],hostMatchEndArray[i]); }*/ gettimeofday(&cpuEnd, NULL); // float totalTime = (cpuEnd - cpuStart); //printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread); //printf("numOfThread: %4d matchCount: %4d CPUrunningTime: %8ld\n", blocksize, thread_num, sum, q.tv_usec - p.tv_usec + (q.tv_sec-p.tv_sec)*1000000); #ifdef VERBOSE printf("Completed Successfully! Number of blocks:%d Number of threads per block:%d Num Threads: %d Matches:%d CPU Time:%8ld GPU Time:%f\n\n",nBlocks,threadsPerBlock,numThreads,total,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_sec - cpuStart.tv_sec ),time); #else printf("%d %f\n\n",numThreads,time); //printf("%d %8ld\n\n",numThreads,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_usec - cpuStart.tv_usec) ); #endif hipFree(matchArray); hipFree(outArray); } }
50b57d13490fe04f406ee22d0543c5aa517b39c9.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <cuda.h> #include <time.h> #include <sys/types.h> #include <sys/time.h> #define VERBOSE 1 __global__ void checkMatchOnDevice(char *fileBuffer, char* searchString, int* matchArray, int numBytes,size_t searchSize,int* matchStartArray, int* matchEndArray) { extern __shared__ int sdata[]; int idx = blockIdx.x*blockDim.x + threadIdx.x; int rangeStart; int rangeEnd; rangeStart = idx*numBytes; rangeEnd = rangeStart + numBytes; int i,j; int numMatches; int foundMatch; int firstMatch = 1; int firstMatchIndex = -1; int lastMatchIndex = -1; unsigned int tid = threadIdx.x; for(numMatches=0, i = rangeStart; i < rangeEnd; i++) { foundMatch = 1; for(j = 0; j < searchSize; j++) { int index = i+j; if(fileBuffer[index] != searchString[j]) { foundMatch = 0; break; } } if(foundMatch) { numMatches++; if(firstMatch) { firstMatchIndex = i; firstMatch = 0; } lastMatchIndex = i+searchSize; i+=searchSize-1; } } matchStartArray[idx] = firstMatchIndex; matchEndArray[idx] = lastMatchIndex; //matchArray[idx] = numMatches; sdata[tid] = numMatches; __syncthreads(); for(unsigned int s=1;s<blockDim.x;s *= 2){ int index = s*2*tid; if((index+s) < blockDim.x){ sdata[index] += sdata[index + s]; } __syncthreads(); } if(tid==0) matchArray[blockIdx.x]=sdata[0]; } __global__ void cumulateOnDevice(int* matchArray, int noOfThreads, int* outArray) { extern __shared__ int sdata2[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<noOfThreads){ sdata2[tid] = matchArray[i]; //__syncthreads(); for(unsigned int s=1;s<blockDim.x;s*=2){ int index = s*2*tid; __syncthreads(); if((index+s) < noOfThreads){ sdata2[index] += sdata2[index+s]; } // __syncthreads(); } if(tid == 0) matchArray[blockIdx.x] = sdata2[0]; } } int main(int argc, char *argv[]) { struct timeval cpuStart,cpuEnd; char* searchString = (char*)malloc(sizeof(char*)*80); char* fileBuffer = (char*)malloc(sizeof(char*)*10000000); int nBlocks; int threadsPerBlock; if(argc != 4) { printf("Usage: stringSearch \"Key\" numBlocks threadsPerBlock < inputFile\n"); exit(0); } else { searchString = argv[1]; nBlocks = atoi(argv[2]); threadsPerBlock = atoi(argv[3]); #ifdef VERBOSE printf("Search String: %s\n",searchString); #endif int ptr; for(ptr = 0; !feof(stdin);) { ptr+= fread(&(fileBuffer[ptr]),1,1,stdin); } char *deviceFileBuffer; // pointer to device memory char *deviceSearchBuffer; // pointer to device memory int *matchArray; int *outArray; int *hostMatchArray; int *hostMatchStartArray; int *hostMatchEndArray; int *matchStartArray; int *matchEndArray; int fileSize = ptr; //printf("FileSize: %d",strlen(fileBuffer)); size_t searchSize = strlen(searchString); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaThreadSynchronize(); gettimeofday(&cpuStart, NULL); // allocate file buffer space on device cudaMalloc((void **) &deviceFileBuffer, fileSize); // allocate search string space on device cudaMalloc((void **) &deviceSearchBuffer, searchSize); // copy data from host to device cudaMemcpy(deviceFileBuffer, fileBuffer, fileSize, cudaMemcpyHostToDevice); cudaMemcpy(deviceSearchBuffer, searchString, searchSize, cudaMemcpyHostToDevice); // do calculation on device: // Part 1 of 2. Compute execution configuration size_t numThreads = nBlocks*threadsPerBlock; int numBytesPerThread = fileSize/numThreads; //Allocate match array space on device cudaMalloc((void **) &matchArray, sizeof(int)*numThreads); cudaMalloc((void **) &outArray, sizeof(int)*numThreads); hostMatchArray = (int*)malloc(sizeof(int)*numThreads); cudaMalloc((void **) &matchStartArray, sizeof(int)*numThreads); hostMatchStartArray = (int*)malloc(sizeof(int)*numThreads); cudaMalloc((void **) &matchEndArray, sizeof(int)*numThreads); hostMatchEndArray = (int*)malloc(sizeof(int)*numThreads); //Init array to 0 int i; for(i = 0; i < numThreads; i++) { hostMatchArray[i] = 0; hostMatchStartArray[i] = -1; hostMatchEndArray[i] = -1; } cudaMemcpy(matchArray, hostMatchArray, numThreads, cudaMemcpyHostToDevice); cudaMemcpy(matchStartArray, hostMatchStartArray, numThreads, cudaMemcpyHostToDevice); cudaMemcpy(matchEndArray, hostMatchEndArray, numThreads, cudaMemcpyHostToDevice); //printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread); // Part 2 of 2. Call call checkMatchOnDevice kernel cudaEventRecord( start, 0 ); checkMatchOnDevice <<< nBlocks, threadsPerBlock , threadsPerBlock*sizeof(int)>>> (deviceFileBuffer, deviceSearchBuffer, matchArray,numBytesPerThread,searchSize,matchStartArray,matchEndArray); int newNBlocks=nBlocks,newNThreads;//printf("\nNew Blocks:%d",nBlocks); cudaThreadSynchronize(); while(newNBlocks > 1){ newNThreads = newNBlocks; newNBlocks = (newNBlocks/threadsPerBlock)+1; cumulateOnDevice <<< newNBlocks, threadsPerBlock ,threadsPerBlock * sizeof(int)>>> (matchArray,newNThreads,outArray); } cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaThreadSynchronize(); // Retrieve result from device and store in host array cudaMemcpy(hostMatchArray, matchArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost); cudaMemcpy(hostMatchStartArray, matchStartArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost); cudaMemcpy(hostMatchEndArray, matchEndArray, sizeof(int)*numThreads, cudaMemcpyDeviceToHost); int total = 0; //for(i = 0; i < numThreads; i++) //{ //total += hostMatchArray[i]; //printf("%d)%d\n",i,hostMatchArray[i]); //} total = hostMatchArray [0]; //Overlap check, commented out for hw2 /* for(i = 0; i < numThreads; i++) { if(hostMatchEndArray[i] != -1 && hostMatchStartArray[i+1] != -1) { if(hostMatchEndArray[i] - hostMatchStartArray[i+1] < 0) total--; } //printf("%d)%d\n",i,hostMatchStartArray[i]); //printf("start:%d,end:%d\n",hostMatchStartArray[i],hostMatchEndArray[i]); }*/ gettimeofday(&cpuEnd, NULL); // float totalTime = (cpuEnd - cpuStart); //printf("Number of threads:%d, Number of blocks:%d, Num Threads Per Block:%d, Num Bytes Per Thread:%d\n",numThreads,nBlocks,threadsPerBlock,numBytesPerThread); //printf("numOfThread: %4d matchCount: %4d CPUrunningTime: %8ld\n", blocksize, thread_num, sum, q.tv_usec - p.tv_usec + (q.tv_sec-p.tv_sec)*1000000); #ifdef VERBOSE printf("Completed Successfully! Number of blocks:%d Number of threads per block:%d Num Threads: %d Matches:%d CPU Time:%8ld GPU Time:%f\n\n",nBlocks,threadsPerBlock,numThreads,total,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_sec - cpuStart.tv_sec ),time); #else printf("%d %f\n\n",numThreads,time); //printf("%d %8ld\n\n",numThreads,cpuEnd.tv_usec - cpuStart.tv_usec + (cpuEnd.tv_usec - cpuStart.tv_usec) ); #endif cudaFree(matchArray); cudaFree(outArray); } }
1b58c37c8d227e72d66b52a62bd0aa81cce55d69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #if (defined MD || defined SLDH || defined SLDHL || defined SLDNC) && defined GPU #include "spilady.h" #include "prototype_GPU.h" __global__ void LP1ChPr_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_ke_ptr_d); __global__ void LP1ChPr_part2(double *sum_ke_ptr_d); void check_pressure_GPU(int current_step){ double sum_ke; double *sum_ke_ptr_d; hipMalloc((void**)&sum_ke_ptr_d, no_of_MP*no_of_threads*sizeof(double)); hipLaunchKernelGGL(( LP1ChPr_part1), dim3(no_of_MP), dim3(no_of_threads), 0, 0, var_ptr_d, first_atom_ptr_d, sum_ke_ptr_d); hipLaunchKernelGGL(( LP1ChPr_part2), dim3(no_of_MP), dim3(no_of_threads), 0, 0, sum_ke_ptr_d); hipMemcpy(&sum_ke, sum_ke_ptr_d, sizeof(double), hipMemcpyDeviceToHost); double tmp = 2e0/3e0*sum_ke/natom; pressure0 = density*(tmp-virial/(3e0*natom)); pressure0 *=160.217653e0; char out_prs_front[] = "prs-"; char out_prs[256]; strcpy(out_prs,out_prs_front); strcat(out_prs,out_body); strcat(out_prs,".dat"); ofstream out_file(out_prs,ios::app); out_file << setiosflags(ios::scientific) << setprecision(15); out_file << current_step << " " << total_time << " " << d.xx << " " << d.yx << " " << d.yy << " " << d.zx << " " << d.zy << " " << d.zz << " " << density << " " << pressure0 << '\n'; out_file.close(); hipFree(sum_ke_ptr_d); } void check_pressure(int current_step){ check_pressure_GPU(current_step); } __global__ void LP1ChPr_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_ke_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(sum_ke_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(sum_ke_ptr_d + i) += (first_atom_ptr_d + m)->ke; } } __syncthreads(); } __global__ void LP1ChPr_part2(double *sum_ke_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(sum_ke_ptr_d + depth) += *(sum_ke_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *sum_ke_ptr_d += *(sum_ke_ptr_d + j*blockDim.x); } } #endif
1b58c37c8d227e72d66b52a62bd0aa81cce55d69.cu
/******************************************************************************** * * Copyright (C) 2015 Culham Centre for Fusion Energy, * United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************** * * Program: SPILADY - A Spin-Lattice Dynamics Simulation Program * Version: 1.0 * Date: Aug 2015 * Author: Pui-Wai (Leo) MA * Contact: [email protected] * Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom * ********************************************************************************/ #if (defined MD || defined SLDH || defined SLDHL || defined SLDNC) && defined GPU #include "spilady.h" #include "prototype_GPU.h" __global__ void LP1ChPr_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_ke_ptr_d); __global__ void LP1ChPr_part2(double *sum_ke_ptr_d); void check_pressure_GPU(int current_step){ double sum_ke; double *sum_ke_ptr_d; cudaMalloc((void**)&sum_ke_ptr_d, no_of_MP*no_of_threads*sizeof(double)); LP1ChPr_part1<<<no_of_MP, no_of_threads>>>(var_ptr_d, first_atom_ptr_d, sum_ke_ptr_d); LP1ChPr_part2<<<no_of_MP, no_of_threads>>>(sum_ke_ptr_d); cudaMemcpy(&sum_ke, sum_ke_ptr_d, sizeof(double), cudaMemcpyDeviceToHost); double tmp = 2e0/3e0*sum_ke/natom; pressure0 = density*(tmp-virial/(3e0*natom)); pressure0 *=160.217653e0; char out_prs_front[] = "prs-"; char out_prs[256]; strcpy(out_prs,out_prs_front); strcat(out_prs,out_body); strcat(out_prs,".dat"); ofstream out_file(out_prs,ios::app); out_file << setiosflags(ios::scientific) << setprecision(15); out_file << current_step << " " << total_time << " " << d.xx << " " << d.yx << " " << d.yy << " " << d.zx << " " << d.zy << " " << d.zz << " " << density << " " << pressure0 << '\n'; out_file.close(); cudaFree(sum_ke_ptr_d); } void check_pressure(int current_step){ check_pressure_GPU(current_step); } __global__ void LP1ChPr_part1(struct varGPU *var_ptr_d, struct atom_struct *first_atom_ptr_d, double *sum_ke_ptr_d){ int i = blockIdx.x*blockDim.x + threadIdx.x; *(sum_ke_ptr_d + i) = 0.0; int area = blockDim.x*gridDim.x; int k = (var_ptr_d->natom - 1)/area + 1; for (int j = 0; j < k; ++j){ int m = i + j*area; if (m < var_ptr_d->natom) { *(sum_ke_ptr_d + i) += (first_atom_ptr_d + m)->ke; } } __syncthreads(); } __global__ void LP1ChPr_part2(double *sum_ke_ptr_d){ int depth = blockIdx.x*blockDim.x; if (threadIdx.x == 0){ for (int j = 1; j < blockDim.x; ++j) *(sum_ke_ptr_d + depth) += *(sum_ke_ptr_d + depth + j); } __threadfence(); if (blockIdx.x == 0 && threadIdx.x == 0){ for (int j = 1; j < gridDim.x; ++j) *sum_ke_ptr_d += *(sum_ke_ptr_d + j*blockDim.x); } } #endif
9cfd18a51258912d6748d5045a5c2fb247c151ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #define BLOCKDIM 16 __global__ void transpose_kernel(Real_t * idata, Real_t * odata, int width, int height) { __shared__ Real_t block[BLOCKDIM][BLOCKDIM+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCKDIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCKDIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCKDIM + threadIdx.x; yIndex = blockIdx.x * BLOCKDIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } }
9cfd18a51258912d6748d5045a5c2fb247c151ad.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #define BLOCKDIM 16 __global__ void transpose_kernel(Real_t * idata, Real_t * odata, int width, int height) { __shared__ Real_t block[BLOCKDIM][BLOCKDIM+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCKDIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCKDIM + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCKDIM + threadIdx.x; yIndex = blockIdx.x * BLOCKDIM + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } }
b9a88fab2926ed1b36a719a169a4fd1caf846c55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void test(int* dataD, int* sumD) { for (int i = 0; i < 1000000; i++) { int x = dataD[0]; int y = dataD[1]; int z = dataD[2]; int sum = x+y+z; *sumD += sum; } } int main() { int* dataH = (int*)malloc(sizeof(int)*10); for (int i = 0; i < 10; i++) { dataH[i] = i; } int* dataD; hipMalloc((void**)&dataD, sizeof(int)*10); hipMemcpy(dataD, dataH, sizeof(int)*10, hipMemcpyHostToDevice); int* sumH = (int*)malloc(sizeof(int)); int* sumD; hipMalloc((void**)&sumD, sizeof(int)); hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, dataD, sumD); hipMemcpy(sumH, sumD, sizeof(int), hipMemcpyDeviceToHost); printf("%d\n", *sumH); }
b9a88fab2926ed1b36a719a169a4fd1caf846c55.cu
#include <stdio.h> __global__ void test(int* dataD, int* sumD) { for (int i = 0; i < 1000000; i++) { int x = dataD[0]; int y = dataD[1]; int z = dataD[2]; int sum = x+y+z; *sumD += sum; } } int main() { int* dataH = (int*)malloc(sizeof(int)*10); for (int i = 0; i < 10; i++) { dataH[i] = i; } int* dataD; cudaMalloc((void**)&dataD, sizeof(int)*10); cudaMemcpy(dataD, dataH, sizeof(int)*10, cudaMemcpyHostToDevice); int* sumH = (int*)malloc(sizeof(int)); int* sumD; cudaMalloc((void**)&sumD, sizeof(int)); test<<<1,1>>>(dataD, sumD); cudaMemcpy(sumH, sumD, sizeof(int), cudaMemcpyDeviceToHost); printf("%d\n", *sumH); }
dc7cbf0fdac319948111843e2d0d779a6e88bfcd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * simulate a cellular automaton with periodic boundaries (torus-like) * serial version * * (c) 2016 Felix Kubicek (Cuda port) * (c) 2016 Steffen Christgau (C99 port, modularization) * (c) 1996,1997 Peter Sanders, Ingo Boesnach (original source) * * command line arguments: * #1: Number of lines * #2: Number of iterations to be simulated * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "ca_common.h" #ifdef USE_2D_MAPPING #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 4 #else #define BLOCK_SIZE 128 #endif /* --------------------- CA simulation -------------------------------- */ /* annealing rule from ChoDro96 page 34 * the table is used to map the number of nonzero * states in the neighborhood to the new state */ __constant__ static const cell_state_t anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1}; /* make one simulation iteration with lines lines. * old configuration is in from, new one is written to to. */ __global__ static void simulate(line_t_cuda *from, line_t_cuda *to, int lines) { #ifdef USE_2D_MAPPING int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int x_prev = ((x - 1) + XSIZE) % XSIZE; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % XSIZE; int y_next = (y + 1) % lines; to[y][x] = transition_cuda(from, x_prev, y_prev, x, y, x_next, y_next); #else int gid = threadIdx.x + blockIdx.x * blockDim.x; int grid_size = blockDim.x * gridDim.x; for (int i = gid; i < lines * XSIZE; i+= grid_size) { int x = i % XSIZE; int y = i / XSIZE; int x_prev = ((x - 1) + XSIZE) % XSIZE; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % XSIZE; int y_next = (y + 1) % lines; to[y][x] = transition_cuda(from, x_prev, y_prev, x, y, x_next, y_next); } #endif } /* --------------------- measurement ---------------------------------- */ int main(int argc, char** argv) { int lines, its; ca_init(argc, argv, &lines, &its); line_t_cuda *from, *to, *from_d, *to_d; line_t *verify_field; MALLOC_ERROR_CHECK(from = (line_t_cuda *) calloc(lines, sizeof(line_t_cuda))); MALLOC_ERROR_CHECK(to = (line_t_cuda *) calloc(lines, sizeof(line_t_cuda))); MALLOC_ERROR_CHECK(verify_field = (line_t *) calloc((lines + 2), sizeof(line_t))); CUDA_ERROR_CHECK(hipMalloc((void **) &from_d, lines * sizeof(line_t_cuda))); CUDA_ERROR_CHECK(hipMalloc((void **) &to_d, lines * sizeof(line_t_cuda))); CUDA_ERROR_CHECK(hipMalloc((void **) &to_d, lines * sizeof(line_t_cuda))); ca_init_config_cuda(from, lines, 0); CUDA_ERROR_CHECK(hipMemcpy((void *) from_d, (void *) from, lines * sizeof(line_t_cuda), hipMemcpyHostToDevice)); CUDA_ERROR_CHECK(hipMemcpy((void *) to_d, (void *) to, lines * sizeof(line_t_cuda), hipMemcpyHostToDevice)); /* int numBlocks; int blockSize; #ifdef USE_2D_MAPPING blockSize = BLOCK_SIZE_X * BLOCK_SIZE_Y; #else blockSize = BLOCK_SIZE; #endif hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, simulate, blockSize, 0); printf("Occupancy: %d\n", numBlocks); */ #ifdef USE_2D_MAPPING //assert cuda mapping fits (otherwise wrong hash) assert((XSIZE % BLOCK_SIZE_X) == 0); assert((lines % BLOCK_SIZE_Y) == 0); dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 dimGrid(XSIZE/dimBlock.x, lines/dimBlock.y); #endif TIME_GET(sim_start); for (int i = 0; i < its; i++) { #ifdef USE_2D_MAPPING hipLaunchKernelGGL(( simulate) , dim3(dimGrid), dim3(dimBlock), 0, 0, from_d, to_d, lines); #else hipLaunchKernelGGL(( simulate) , dim3(lines), dim3(BLOCK_SIZE), 0, 0, from_d, to_d, lines); #endif line_t_cuda *temp = from_d; from_d = to_d; to_d = temp; } hipDeviceSynchronize(); TIME_GET(sim_stop); CUDA_ERROR_CHECK(hipPeekAtLastError()); CUDA_ERROR_CHECK(hipMemcpy((void *) from, (void *) from_d, lines * sizeof(line_t_cuda), hipMemcpyDeviceToHost)); for(int y = 1; y <= lines; y++) { memcpy((void *) &verify_field[y][1], (void *) &from[y-1][0], XSIZE); } ca_hash_and_report(verify_field + 1, lines, TIME_DIFF(sim_start, sim_stop)); free(from); free(to); free(verify_field); CUDA_ERROR_CHECK(hipFree(from_d)); CUDA_ERROR_CHECK(hipFree(to_d)); return EXIT_SUCCESS; }
dc7cbf0fdac319948111843e2d0d779a6e88bfcd.cu
/* * simulate a cellular automaton with periodic boundaries (torus-like) * serial version * * (c) 2016 Felix Kubicek (Cuda port) * (c) 2016 Steffen Christgau (C99 port, modularization) * (c) 1996,1997 Peter Sanders, Ingo Boesnach (original source) * * command line arguments: * #1: Number of lines * #2: Number of iterations to be simulated * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "ca_common.h" #ifdef USE_2D_MAPPING #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 4 #else #define BLOCK_SIZE 128 #endif /* --------------------- CA simulation -------------------------------- */ /* annealing rule from ChoDro96 page 34 * the table is used to map the number of nonzero * states in the neighborhood to the new state */ __constant__ static const cell_state_t anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1}; /* make one simulation iteration with lines lines. * old configuration is in from, new one is written to to. */ __global__ static void simulate(line_t_cuda *from, line_t_cuda *to, int lines) { #ifdef USE_2D_MAPPING int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int x_prev = ((x - 1) + XSIZE) % XSIZE; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % XSIZE; int y_next = (y + 1) % lines; to[y][x] = transition_cuda(from, x_prev, y_prev, x, y, x_next, y_next); #else int gid = threadIdx.x + blockIdx.x * blockDim.x; int grid_size = blockDim.x * gridDim.x; for (int i = gid; i < lines * XSIZE; i+= grid_size) { int x = i % XSIZE; int y = i / XSIZE; int x_prev = ((x - 1) + XSIZE) % XSIZE; int y_prev = ((y - 1) + lines) % lines; int x_next = (x + 1) % XSIZE; int y_next = (y + 1) % lines; to[y][x] = transition_cuda(from, x_prev, y_prev, x, y, x_next, y_next); } #endif } /* --------------------- measurement ---------------------------------- */ int main(int argc, char** argv) { int lines, its; ca_init(argc, argv, &lines, &its); line_t_cuda *from, *to, *from_d, *to_d; line_t *verify_field; MALLOC_ERROR_CHECK(from = (line_t_cuda *) calloc(lines, sizeof(line_t_cuda))); MALLOC_ERROR_CHECK(to = (line_t_cuda *) calloc(lines, sizeof(line_t_cuda))); MALLOC_ERROR_CHECK(verify_field = (line_t *) calloc((lines + 2), sizeof(line_t))); CUDA_ERROR_CHECK(cudaMalloc((void **) &from_d, lines * sizeof(line_t_cuda))); CUDA_ERROR_CHECK(cudaMalloc((void **) &to_d, lines * sizeof(line_t_cuda))); CUDA_ERROR_CHECK(cudaMalloc((void **) &to_d, lines * sizeof(line_t_cuda))); ca_init_config_cuda(from, lines, 0); CUDA_ERROR_CHECK(cudaMemcpy((void *) from_d, (void *) from, lines * sizeof(line_t_cuda), cudaMemcpyHostToDevice)); CUDA_ERROR_CHECK(cudaMemcpy((void *) to_d, (void *) to, lines * sizeof(line_t_cuda), cudaMemcpyHostToDevice)); /* int numBlocks; int blockSize; #ifdef USE_2D_MAPPING blockSize = BLOCK_SIZE_X * BLOCK_SIZE_Y; #else blockSize = BLOCK_SIZE; #endif cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, simulate, blockSize, 0); printf("Occupancy: %d\n", numBlocks); */ #ifdef USE_2D_MAPPING //assert cuda mapping fits (otherwise wrong hash) assert((XSIZE % BLOCK_SIZE_X) == 0); assert((lines % BLOCK_SIZE_Y) == 0); dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 dimGrid(XSIZE/dimBlock.x, lines/dimBlock.y); #endif TIME_GET(sim_start); for (int i = 0; i < its; i++) { #ifdef USE_2D_MAPPING simulate <<<dimGrid, dimBlock>>> (from_d, to_d, lines); #else simulate <<<lines, BLOCK_SIZE>>> (from_d, to_d, lines); #endif line_t_cuda *temp = from_d; from_d = to_d; to_d = temp; } cudaDeviceSynchronize(); TIME_GET(sim_stop); CUDA_ERROR_CHECK(cudaPeekAtLastError()); CUDA_ERROR_CHECK(cudaMemcpy((void *) from, (void *) from_d, lines * sizeof(line_t_cuda), cudaMemcpyDeviceToHost)); for(int y = 1; y <= lines; y++) { memcpy((void *) &verify_field[y][1], (void *) &from[y-1][0], XSIZE); } ca_hash_and_report(verify_field + 1, lines, TIME_DIFF(sim_start, sim_stop)); free(from); free(to); free(verify_field); CUDA_ERROR_CHECK(cudaFree(from_d)); CUDA_ERROR_CHECK(cudaFree(to_d)); return EXIT_SUCCESS; }
8dd5f016b9e46272440b8a372329ab59863a5e60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<string.h> #include <stdint.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/binary_search.h> int null = INT_MAX; int ninf = INT_MIN; typedef struct Node{ Node* parent; thrust :: host_vector<int> keys; thrust :: host_vector<Node*> pointer; bool isLeaf; bool isDead; Node* buffer; }Node; typedef struct dNode{ int keys[7]; Node* pointer[8]; bool isLeaf; int no_keys=0; int *data_pointer[7]; }dNode; __global__ void range(int *prefix_sum , dNode *nodes,int **result,int* count ,int n , int d , int tree_size , int *ab) { __shared__ int node_idx; node_idx=0; int idx = threadIdx.x ; bool flag = true; __shared__ int a; __shared__ int b; dNode curr; __shared__ int c; c=0; __shared__ int level; level=0; a=ab[blockIdx.x*2 + 0]; b=ab[blockIdx.x*2 + 1]; __syncthreads(); if(a!=-1 && b!=-1) { while(true) { curr = nodes[node_idx]; if(level >= n || node_idx>=tree_size) { break; } if(curr.isLeaf) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(a - curr.keys[idx]); __syncthreads(); atomicMin(&min_diff,diff); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(a<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(a<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(a<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(a>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } } __shared__ int ele_count; ele_count=0; bool flag=false; int ite=node_idx; __syncthreads(); while(ele_count < n && idx==0) { for(int i=0;i<curr.no_keys;i++) { if( a <= curr.keys[i] && curr.keys[i] <= b ) { c++; result[blockIdx.x * n + ele_count]=curr.data_pointer[i]; ele_count++; } else if (curr.keys[i] > b) flag=true; } if(flag) break; ite++; if(ite >= tree_size) break; curr = nodes[ite]; } // printf("Block %d : %d\n",blockIdx.x,c); count[blockIdx.x] = c; //printf("Block %d : %d\n",blockIdx.x,c); //printf("%d ::: %d\n",blockIdx.x, count[blockIdx.x]); } } __global__ void find(int *prefix_sum , dNode *nodes , int **result , int* found , int tree_size , int n , int d , int *keys) { int idx = threadIdx.x ; __shared__ int node_idx; node_idx=0; __shared__ bool flag; flag=false; dNode curr; __shared__ int key; key=keys[blockIdx.x]; __shared__ int level; level=0; __syncthreads(); if(key!=-1) { while(true) { //printf("\n"); level++; if(level >= n || node_idx>=tree_size) { break; } curr = nodes[node_idx]; if(curr.isLeaf ) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(key - curr.keys[idx]); atomicMin(&min_diff,diff); __syncthreads(); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(key<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(key<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(key<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(key>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } __syncthreads(); } } if(flag) { if(curr.keys[idx] == key) { //printf("FOUND THE KEY : %d.\n",key); found[blockIdx.x] = 1; result[blockIdx.x] = curr.data_pointer[idx]; //printf("AAAA\n"); } } } __global__ void path_trace(int *prefix_sum , dNode *nodes,int* keys, int *count , int tree_size , int n , int d,int k) { int idx = threadIdx.x ; __shared__ int node_idx; node_idx=0; __shared__ int it; it=0; bool flag=false; dNode curr; __shared__ int key; key=k; __shared__ int level; level = 0; while(true) { //printf("\n"); level++; if(level >= n || node_idx>=tree_size) { break; } curr = nodes[node_idx]; if(idx==0) { keys[it]=curr.keys[0]; it++; ++*count; } if(curr.isLeaf ) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(key - curr.keys[idx]); atomicMin(&min_diff,diff); __syncthreads(); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(key<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(key<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(key<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(key>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } __syncthreads(); } } Node* init_node(int n, bool flag) { Node* node = new Node; node->parent = NULL; node->keys = thrust :: host_vector<int>(n, null); node->pointer = thrust :: host_vector<Node*>(n+1); node->isLeaf = flag; node->isDead = false; node->buffer = NULL; return node; } void unMark(Node* parent, Node* child, int value) { if(parent != NULL) { bool flag = false; for (int i = 1; i < parent->pointer.size(); ++i) { if(parent->pointer[i] == child) { flag = true; parent->keys[i - 1] = value; } } if(parent->isDead && flag) unMark(parent->parent, parent, value); } } Node* insert(Node* node, int value) { Node* root = NULL; int node_size = node->keys.size(); bool full_flag = false; if(node->keys[node_size - 1] != null) full_flag = true; if(full_flag) { thrust :: host_vector<int> tempKeys = node->keys; thrust :: host_vector<Node*> tempPointers = node->pointer; int tempIndex = thrust :: upper_bound(tempKeys.begin(), tempKeys.end(), value) - tempKeys.begin(); int ubp, newVal; tempKeys.insert(tempKeys.begin() + tempIndex, value); if(!node->isLeaf) tempPointers.insert(tempPointers.begin() + tempIndex + 1, node->buffer); Node* new_node = init_node(node_size, node->isLeaf); new_node->parent = node->parent; if(node->isLeaf) { new_node->pointer[node_size] = node->pointer[node_size]; node->pointer[node_size] = new_node; double tempFloat = node_size + 1; if(node_size % 2 == 1) ubp = (int)ceil(tempFloat/2); else ubp = (int)ceil(tempFloat/2)-1; } else { double tempFloat = node_size + 2; if(node_size % 2 == 1) ubp = (int)ceil((tempFloat)/2); else ubp = (int)ceil(tempFloat/2)-1; for (int i = 0; i < tempPointers.size(); ++i) { if(i <= ubp) node->pointer[i] = tempPointers[i]; else { new_node->pointer[i - ubp-1] = tempPointers[i]; new_node->pointer[i - ubp-1]->parent = new_node; if(i <= node_size) node->pointer[i] = NULL; } } newVal = tempKeys[ubp]; tempKeys.erase(tempKeys.begin() + ubp); } for (int i = 0; i < tempKeys.size(); ++i) { if(i < ubp) node->keys[i] = tempKeys[i]; else { new_node->keys[i - ubp] = tempKeys[i]; if(i < node_size) node->keys[i] = null; } } if(node->isDead && value != node->keys[0] && tempIndex < ubp) { node->isDead = false; unMark(node->parent, node, value); } tempIndex = upper_bound(new_node->keys.begin(), new_node->keys.end(), node->keys[ubp - 1]) - new_node->keys.begin(); if(new_node->keys[tempIndex] == null) { newVal = new_node->keys[0]; new_node->isDead = true; } else if(node->isLeaf) newVal = new_node->keys[tempIndex]; if(node->parent != NULL) { node->parent->buffer = new_node; root = insert(node->parent, newVal); } else { root = init_node(node_size, false); root->keys[0] = newVal; root->pointer[0] = node; root->pointer[1] = new_node; node->parent = root; new_node->parent = root; } } else { bool insert_flag = false; int tempKey = null; Node* tempPointer = NULL; for (int i = 0; i < node_size; i++) { if(insert_flag) { int temp = node->keys[i] ; node->keys[i]=tempKey ; tempKey = temp ; if(!node->isLeaf) { Node* temp = node->pointer[i + 1]; node->pointer[i + 1] = tempPointer ; tempPointer = temp; //swap(node->pointer[i + 1], tempPointer); } } else { if(value < node->keys[i] || node->keys[i] == null) { insert_flag = true; tempKey = node->keys[i]; node->keys[i] = value; if(!node->isLeaf) { tempPointer = node->pointer[i + 1]; node->pointer[i + 1] = node->buffer; } } if(value != node->keys[0] && node->isDead) { node->isDead = false; unMark(node->parent, node, value); } } } } return root; } Node* find_pos(Node* node, int value, bool up) { while(!node->isLeaf) { int lb = ninf, ub, node_size = node->keys.size(), index; for (int i = 0; i < node_size; i++) { if(node->keys[i] == null) { index = i; break; } ub = node->keys[i]; if(lb <= value && value < ub) { index = i; break; } else if(lb <= value && value == ub && !up && node->pointer[i + 1]->isDead) { index = i; break; } else index = i + 1; lb = ub; } node = node->pointer[index]; } return node; } Node* insert_(Node* root, int value) { Node* temp = root; temp = insert(find_pos(root, value, true), value); if(temp != NULL) root = temp; return root; } int main(int argc,char **argv) { int n,m; FILE *inputfilepointer; char *inputfilename = argv[1]; inputfilepointer = fopen( inputfilename , "r"); if ( inputfilepointer == NULL ) { printf( "input.txt file failed to open." ); return 0; } fscanf( inputfilepointer, "%d", &n ); fscanf( inputfilepointer, "%d", &m ); int arr[n][m]; for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { fscanf( inputfilepointer, "%d", &arr[i][j] ); } } int d=7; int keys[n]; int min_key = INT_MAX; for(int i=0;i<n;i++) { keys[i]=arr[i][0]; if(min_key > keys[i]) min_key=keys[i]; } Node *root=init_node(d,true); for(int i=0;i<n;i++) { root = insert_(root , keys[i]); } int idx = 0 ; thrust :: host_vector<int>t; Node *node = root ; thrust :: host_vector<Node*>tree; tree.push_back(node); t.push_back(1); while (idx < tree.size()) { int count=0; Node *temp = tree[idx]; idx++; if(!temp->isLeaf) { for(int i=0;i<=d;i++) { if(temp->pointer[i] != NULL) { count++; tree.push_back(temp->pointer[i]); } } t.push_back(count); } } dNode* dtree=(dNode*)malloc(tree.size()*sizeof(dNode)); for(int i=0;i<tree.size();i++) { Node *curr=tree[i]; dNode new_curr; new_curr.isLeaf = curr->isLeaf; for(int j=0;j<d;j++) { new_curr.keys[j] = curr->keys[j]; new_curr.pointer[j] = curr->pointer[j]; } new_curr.pointer[d]=curr->pointer[d]; dtree[i]=new_curr; } for(int i=0;i<tree.size();i++) { int count=0; Node* curr = tree[i]; for(int j=0;j<d;j++) { if(curr->keys[j]!=null ) count++; } dtree[i].no_keys=count; if(curr->isLeaf) { for(int j=0;j<dtree[i].no_keys;j++) { int val = curr->keys[j]; for(int k=0;k<n;k++) { if(val == arr[k][0]) { dtree[i].data_pointer[j]=&arr[k][0]; break; } } } } } int prefix_sum[t.size()-1]; prefix_sum[0]=1; for(int i=1;i<t.size()-1;i++) { prefix_sum[i]=t[i]+prefix_sum[i-1]; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); dNode* d_tree ; hipMalloc(&d_tree , tree.size()*sizeof(dNode)) ; hipMemcpy(d_tree , dtree, tree.size()*sizeof(dNode), hipMemcpyHostToDevice); int * d_prefix_sum ; hipMalloc(&d_prefix_sum,(t.size()-1)*sizeof(int)); hipMemcpy(d_prefix_sum , prefix_sum, (t.size()-1)*sizeof(int), hipMemcpyHostToDevice); char *outputfilename = argv[2]; FILE *outputfilepointer; outputfilepointer = fopen(outputfilename,"w"); int q; //scanf("%d",&q); fscanf( inputfilepointer, "%d", &q ); while(q--) { int type; fscanf( inputfilepointer, "%d", &type ); if(type == 1) { int p; fscanf( inputfilepointer, "%d", &p ); int find_keys[p]; for(int i=0;i<p;i++) { fscanf( inputfilepointer, "%d", &find_keys[i] ); } int no_calls=ceil(float(p)/float(100)); int extra = p%100; int idx=0; int *h_result[100]; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x]; ite++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); hipMemcpy(h_result,d_result,100*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); } fprintf( outputfilepointer, "\n"); } else { fprintf( outputfilepointer, "-1\n"); } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x]; ite++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); hipMemcpy(h_result,d_result,100*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); } fprintf( outputfilepointer, "\n"); } else { fprintf( outputfilepointer,"-1\n" ); } } } int h_keys[100]={-1}; idx=0; for(int i=(no_calls-1)*100;i<p;i++) { h_keys[idx]=find_keys[i]; idx++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); hipMemcpy(h_result,d_result , 100*sizeof(int*) ,hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int i=0;i<extra;i++) { if(found[i]) { int * addr = h_result[i]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } else { //printf("-1\n"); fprintf( outputfilepointer, "-1\n"); } } } } else if(type == 2) { int p; fscanf( inputfilepointer, "%d", &p ); int points[p][2]; for(int i=0;i<p;i++) { fscanf( inputfilepointer, "%d", &points[i][0] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &points[i][1] ); //scaning for toll tax zone passing time } int no_calls=ceil(float(p)/float(100)); int extra = p%100; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=0; int ab[100][2]; for(int x=i;x<i+100;x++) { ab[idx][0]=points[x][0]; ab[idx][1]=points[x][1]; idx++; } int *d_ab; hipMalloc(&d_ab,200*sizeof(int)); hipMemcpy(d_ab,ab,200*sizeof(int),hipMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; hipMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; hipMalloc(&d_count,100*sizeof(int)); hipMemcpy(d_count ,count , 100*(sizeof(int)) , hipMemcpyHostToDevice); hipLaunchKernelGGL(( range), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); hipMemcpy(h_result,d_result,100*n*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(count , d_count , 100*(sizeof(int)) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int l=0;l<100;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { idx=0; int ab[100][2]; for(int x=i;x<i+100;x++) { printf("%d & %d \n", idx , x); ab[idx][0]=points[x][0]; ab[idx][1]=points[x][1]; idx++; } int *d_ab; hipMalloc(&d_ab,200*sizeof(int)); hipMemcpy(d_ab,ab,200*sizeof(int),hipMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; hipMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; hipMalloc(&d_count,100*sizeof(int)); hipMemcpy(d_count ,count , 100*(sizeof(int)) , hipMemcpyHostToDevice); hipLaunchKernelGGL(( range), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); hipMemcpy(h_result,d_result,100*n*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(count , d_count , 100*(sizeof(int)) , hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int l=0;l<100;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } int ab[100][2]; for(int x=0;x<100;x++) { ab[x][0]=-1; ab[x][1]=-1; } idx=0; for(int i=(no_calls-1)*100;i<p;i++) { ab[idx][0]=points[i][0]; ab[idx][1]=points[i][1]; idx++; } int *d_ab; hipMalloc(&d_ab,200*sizeof(int)); hipMemcpy(d_ab,ab,200*sizeof(int),hipMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; hipMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; hipMalloc(&d_count,100*sizeof(int)); hipMemcpy(d_count ,count , 100*(sizeof(int)) , hipMemcpyHostToDevice); hipLaunchKernelGGL(( range), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); hipDeviceSynchronize(); hipMemcpy(h_result,d_result,100*n*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(count , d_count , 100*(sizeof(int)) , hipMemcpyDeviceToHost); for(int l=0;l<extra;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } } else if(type == 3) { //int p=3; int p; //scanf("%d",&p); fscanf( inputfilepointer, "%d", &p ); //scaning for toll tax zone passing time int find_keys[p][3]; //int find_keys[p][3]={{21,4,987},{18,3,143},{6,2,100}}; for(int i=0;i<p;i++) { //scanf("%d",&find_keys[i][0]); //scanf("%d",&find_keys[i][1]); //scanf("%d",&find_keys[i][2]); fscanf( inputfilepointer, "%d", &find_keys[i][0] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &find_keys[i][1] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &find_keys[i][2] ); //scaning for toll tax zone passing time } int no_calls=ceil(float(p)/float(100)); int extra = p%100; int idx=0; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x][0]; ite++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int *h_result[100]; int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys); hipDeviceSynchronize(); hipMemcpy(h_result,d_result,100*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2]; } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { //printf("Inside type 3 : %d\n",i); idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x][0]; ite++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int *h_result[100]; int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); hipDeviceSynchronize(); hipMemcpy(h_result,d_result,100*sizeof(int*),hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2]; } else { //printf("-1\n"); } } } int h_keys[100]; for(int y=0;y<100;y++) { h_keys[y]=-1; } idx=0; for(int i=(no_calls-1)*100;i<p;i++) { h_keys[idx]=find_keys[i][0]; idx++; } int *d_keys; hipMalloc(&d_keys,100*sizeof(int)); hipMemcpy(d_keys,h_keys,100*sizeof(int),hipMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; hipMalloc(&d_found , 100*sizeof(int)); hipMemcpy(d_found , found, 100*sizeof(int) , hipMemcpyHostToDevice); int *h_result[100]; int **d_result; hipMalloc(&d_result,100*sizeof(int*)); hipLaunchKernelGGL(( find), dim3(100),dim3(7), 0, 0, d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys); hipDeviceSynchronize(); hipMemcpy(h_result,d_result , 100*sizeof(int*) ,hipMemcpyDeviceToHost); hipMemcpy(found , d_found , 100*(sizeof(int)) , hipMemcpyDeviceToHost); idx = (no_calls-1)*100; for(int i=0;i<extra;i++) { if(found[i]) { int * addr = h_result[i]; addr[find_keys[i+idx][1] - 1] = addr[find_keys[i+idx][1] - 1] + find_keys[i+idx][2]; //printf("\n"); //fprintf( outputfilepointer, "\n"); } else { //printf("-1\n"); //fprintf( outputfilepointer, "-1\n"); } } } } else { int key_; fscanf( inputfilepointer, "%d", &key_ ); int kk[n]; int *k; hipMalloc(&k,n*sizeof(int)); int h_count=0; int *d_count; hipMalloc(&d_count,sizeof(int)); hipMemcpy(d_count,&h_count,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( path_trace), dim3(1),dim3(7), 0, 0, d_prefix_sum , d_tree , k , d_count , tree.size() , n , d , key_); hipMemcpy(&h_count,d_count,sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(kk,k,n*sizeof(int),hipMemcpyDeviceToHost); for(int i=0;i<h_count;i++) { fprintf( outputfilepointer, "%d ",kk[i]); //printf("%d ",kk[i]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Time taken by function to execute is: %.6f ms\n", milliseconds); fclose( outputfilepointer ); fclose( inputfilepointer ); }
8dd5f016b9e46272440b8a372329ab59863a5e60.cu
#include<stdio.h> #include<cuda.h> #include<string.h> #include <stdint.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/binary_search.h> int null = INT_MAX; int ninf = INT_MIN; typedef struct Node{ Node* parent; thrust :: host_vector<int> keys; thrust :: host_vector<Node*> pointer; bool isLeaf; bool isDead; Node* buffer; }Node; typedef struct dNode{ int keys[7]; Node* pointer[8]; bool isLeaf; int no_keys=0; int *data_pointer[7]; }dNode; __global__ void range(int *prefix_sum , dNode *nodes,int **result,int* count ,int n , int d , int tree_size , int *ab) { __shared__ int node_idx; node_idx=0; int idx = threadIdx.x ; bool flag = true; __shared__ int a; __shared__ int b; dNode curr; __shared__ int c; c=0; __shared__ int level; level=0; a=ab[blockIdx.x*2 + 0]; b=ab[blockIdx.x*2 + 1]; __syncthreads(); if(a!=-1 && b!=-1) { while(true) { curr = nodes[node_idx]; if(level >= n || node_idx>=tree_size) { break; } if(curr.isLeaf) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(a - curr.keys[idx]); __syncthreads(); atomicMin(&min_diff,diff); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(a<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(a<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(a<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(a>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } } __shared__ int ele_count; ele_count=0; bool flag=false; int ite=node_idx; __syncthreads(); while(ele_count < n && idx==0) { for(int i=0;i<curr.no_keys;i++) { if( a <= curr.keys[i] && curr.keys[i] <= b ) { c++; result[blockIdx.x * n + ele_count]=curr.data_pointer[i]; ele_count++; } else if (curr.keys[i] > b) flag=true; } if(flag) break; ite++; if(ite >= tree_size) break; curr = nodes[ite]; } // printf("Block %d : %d\n",blockIdx.x,c); count[blockIdx.x] = c; //printf("Block %d : %d\n",blockIdx.x,c); //printf("%d ::: %d\n",blockIdx.x, count[blockIdx.x]); } } __global__ void find(int *prefix_sum , dNode *nodes , int **result , int* found , int tree_size , int n , int d , int *keys) { int idx = threadIdx.x ; __shared__ int node_idx; node_idx=0; __shared__ bool flag; flag=false; dNode curr; __shared__ int key; key=keys[blockIdx.x]; __shared__ int level; level=0; __syncthreads(); if(key!=-1) { while(true) { //printf("\n"); level++; if(level >= n || node_idx>=tree_size) { break; } curr = nodes[node_idx]; if(curr.isLeaf ) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(key - curr.keys[idx]); atomicMin(&min_diff,diff); __syncthreads(); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(key<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(key<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(key<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(key>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } __syncthreads(); } } if(flag) { if(curr.keys[idx] == key) { //printf("FOUND THE KEY : %d.\n",key); found[blockIdx.x] = 1; result[blockIdx.x] = curr.data_pointer[idx]; //printf("AAAA\n"); } } } __global__ void path_trace(int *prefix_sum , dNode *nodes,int* keys, int *count , int tree_size , int n , int d,int k) { int idx = threadIdx.x ; __shared__ int node_idx; node_idx=0; __shared__ int it; it=0; bool flag=false; dNode curr; __shared__ int key; key=k; __shared__ int level; level = 0; while(true) { //printf("\n"); level++; if(level >= n || node_idx>=tree_size) { break; } curr = nodes[node_idx]; if(idx==0) { keys[it]=curr.keys[0]; it++; ++*count; } if(curr.isLeaf ) { flag=true; break; } int diff=INT_MAX; __shared__ int min_idx,min_diff; min_idx=0; min_diff=INT_MAX; if(idx < curr.no_keys ) diff = abs(key - curr.keys[idx]); atomicMin(&min_diff,diff); __syncthreads(); //printf("min_diff : %d\n",min_diff); if(min_diff == diff) { min_idx = idx ; if(min_idx == 0 ) { if(key<curr.keys[0]) { node_idx = prefix_sum[node_idx] ; } else node_idx = prefix_sum[node_idx]+1; } else if(min_idx == d-1 ) { if(key<curr.keys[d-1]) node_idx = prefix_sum[node_idx] + d-1; else node_idx = prefix_sum[node_idx] + d ; } else { if(key<curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx; } else if(key>=curr.keys[min_idx]) { node_idx = prefix_sum[node_idx] + min_idx + 1; } } } __syncthreads(); } } Node* init_node(int n, bool flag) { Node* node = new Node; node->parent = NULL; node->keys = thrust :: host_vector<int>(n, null); node->pointer = thrust :: host_vector<Node*>(n+1); node->isLeaf = flag; node->isDead = false; node->buffer = NULL; return node; } void unMark(Node* parent, Node* child, int value) { if(parent != NULL) { bool flag = false; for (int i = 1; i < parent->pointer.size(); ++i) { if(parent->pointer[i] == child) { flag = true; parent->keys[i - 1] = value; } } if(parent->isDead && flag) unMark(parent->parent, parent, value); } } Node* insert(Node* node, int value) { Node* root = NULL; int node_size = node->keys.size(); bool full_flag = false; if(node->keys[node_size - 1] != null) full_flag = true; if(full_flag) { thrust :: host_vector<int> tempKeys = node->keys; thrust :: host_vector<Node*> tempPointers = node->pointer; int tempIndex = thrust :: upper_bound(tempKeys.begin(), tempKeys.end(), value) - tempKeys.begin(); int ubp, newVal; tempKeys.insert(tempKeys.begin() + tempIndex, value); if(!node->isLeaf) tempPointers.insert(tempPointers.begin() + tempIndex + 1, node->buffer); Node* new_node = init_node(node_size, node->isLeaf); new_node->parent = node->parent; if(node->isLeaf) { new_node->pointer[node_size] = node->pointer[node_size]; node->pointer[node_size] = new_node; double tempFloat = node_size + 1; if(node_size % 2 == 1) ubp = (int)ceil(tempFloat/2); else ubp = (int)ceil(tempFloat/2)-1; } else { double tempFloat = node_size + 2; if(node_size % 2 == 1) ubp = (int)ceil((tempFloat)/2); else ubp = (int)ceil(tempFloat/2)-1; for (int i = 0; i < tempPointers.size(); ++i) { if(i <= ubp) node->pointer[i] = tempPointers[i]; else { new_node->pointer[i - ubp-1] = tempPointers[i]; new_node->pointer[i - ubp-1]->parent = new_node; if(i <= node_size) node->pointer[i] = NULL; } } newVal = tempKeys[ubp]; tempKeys.erase(tempKeys.begin() + ubp); } for (int i = 0; i < tempKeys.size(); ++i) { if(i < ubp) node->keys[i] = tempKeys[i]; else { new_node->keys[i - ubp] = tempKeys[i]; if(i < node_size) node->keys[i] = null; } } if(node->isDead && value != node->keys[0] && tempIndex < ubp) { node->isDead = false; unMark(node->parent, node, value); } tempIndex = upper_bound(new_node->keys.begin(), new_node->keys.end(), node->keys[ubp - 1]) - new_node->keys.begin(); if(new_node->keys[tempIndex] == null) { newVal = new_node->keys[0]; new_node->isDead = true; } else if(node->isLeaf) newVal = new_node->keys[tempIndex]; if(node->parent != NULL) { node->parent->buffer = new_node; root = insert(node->parent, newVal); } else { root = init_node(node_size, false); root->keys[0] = newVal; root->pointer[0] = node; root->pointer[1] = new_node; node->parent = root; new_node->parent = root; } } else { bool insert_flag = false; int tempKey = null; Node* tempPointer = NULL; for (int i = 0; i < node_size; i++) { if(insert_flag) { int temp = node->keys[i] ; node->keys[i]=tempKey ; tempKey = temp ; if(!node->isLeaf) { Node* temp = node->pointer[i + 1]; node->pointer[i + 1] = tempPointer ; tempPointer = temp; //swap(node->pointer[i + 1], tempPointer); } } else { if(value < node->keys[i] || node->keys[i] == null) { insert_flag = true; tempKey = node->keys[i]; node->keys[i] = value; if(!node->isLeaf) { tempPointer = node->pointer[i + 1]; node->pointer[i + 1] = node->buffer; } } if(value != node->keys[0] && node->isDead) { node->isDead = false; unMark(node->parent, node, value); } } } } return root; } Node* find_pos(Node* node, int value, bool up) { while(!node->isLeaf) { int lb = ninf, ub, node_size = node->keys.size(), index; for (int i = 0; i < node_size; i++) { if(node->keys[i] == null) { index = i; break; } ub = node->keys[i]; if(lb <= value && value < ub) { index = i; break; } else if(lb <= value && value == ub && !up && node->pointer[i + 1]->isDead) { index = i; break; } else index = i + 1; lb = ub; } node = node->pointer[index]; } return node; } Node* insert_(Node* root, int value) { Node* temp = root; temp = insert(find_pos(root, value, true), value); if(temp != NULL) root = temp; return root; } int main(int argc,char **argv) { int n,m; FILE *inputfilepointer; char *inputfilename = argv[1]; inputfilepointer = fopen( inputfilename , "r"); if ( inputfilepointer == NULL ) { printf( "input.txt file failed to open." ); return 0; } fscanf( inputfilepointer, "%d", &n ); fscanf( inputfilepointer, "%d", &m ); int arr[n][m]; for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { fscanf( inputfilepointer, "%d", &arr[i][j] ); } } int d=7; int keys[n]; int min_key = INT_MAX; for(int i=0;i<n;i++) { keys[i]=arr[i][0]; if(min_key > keys[i]) min_key=keys[i]; } Node *root=init_node(d,true); for(int i=0;i<n;i++) { root = insert_(root , keys[i]); } int idx = 0 ; thrust :: host_vector<int>t; Node *node = root ; thrust :: host_vector<Node*>tree; tree.push_back(node); t.push_back(1); while (idx < tree.size()) { int count=0; Node *temp = tree[idx]; idx++; if(!temp->isLeaf) { for(int i=0;i<=d;i++) { if(temp->pointer[i] != NULL) { count++; tree.push_back(temp->pointer[i]); } } t.push_back(count); } } dNode* dtree=(dNode*)malloc(tree.size()*sizeof(dNode)); for(int i=0;i<tree.size();i++) { Node *curr=tree[i]; dNode new_curr; new_curr.isLeaf = curr->isLeaf; for(int j=0;j<d;j++) { new_curr.keys[j] = curr->keys[j]; new_curr.pointer[j] = curr->pointer[j]; } new_curr.pointer[d]=curr->pointer[d]; dtree[i]=new_curr; } for(int i=0;i<tree.size();i++) { int count=0; Node* curr = tree[i]; for(int j=0;j<d;j++) { if(curr->keys[j]!=null ) count++; } dtree[i].no_keys=count; if(curr->isLeaf) { for(int j=0;j<dtree[i].no_keys;j++) { int val = curr->keys[j]; for(int k=0;k<n;k++) { if(val == arr[k][0]) { dtree[i].data_pointer[j]=&arr[k][0]; break; } } } } } int prefix_sum[t.size()-1]; prefix_sum[0]=1; for(int i=1;i<t.size()-1;i++) { prefix_sum[i]=t[i]+prefix_sum[i-1]; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); dNode* d_tree ; cudaMalloc(&d_tree , tree.size()*sizeof(dNode)) ; cudaMemcpy(d_tree , dtree, tree.size()*sizeof(dNode), cudaMemcpyHostToDevice); int * d_prefix_sum ; cudaMalloc(&d_prefix_sum,(t.size()-1)*sizeof(int)); cudaMemcpy(d_prefix_sum , prefix_sum, (t.size()-1)*sizeof(int), cudaMemcpyHostToDevice); char *outputfilename = argv[2]; FILE *outputfilepointer; outputfilepointer = fopen(outputfilename,"w"); int q; //scanf("%d",&q); fscanf( inputfilepointer, "%d", &q ); while(q--) { int type; fscanf( inputfilepointer, "%d", &type ); if(type == 1) { int p; fscanf( inputfilepointer, "%d", &p ); int find_keys[p]; for(int i=0;i<p;i++) { fscanf( inputfilepointer, "%d", &find_keys[i] ); } int no_calls=ceil(float(p)/float(100)); int extra = p%100; int idx=0; int *h_result[100]; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x]; ite++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); } fprintf( outputfilepointer, "\n"); } else { fprintf( outputfilepointer, "-1\n"); } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x]; ite++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); } fprintf( outputfilepointer, "\n"); } else { fprintf( outputfilepointer,"-1\n" ); } } } int h_keys[100]={-1}; idx=0; for(int i=(no_calls-1)*100;i<p;i++) { h_keys[idx]=find_keys[i]; idx++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); cudaMemcpy(h_result,d_result , 100*sizeof(int*) ,cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int i=0;i<extra;i++) { if(found[i]) { int * addr = h_result[i]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d ", addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } else { //printf("-1\n"); fprintf( outputfilepointer, "-1\n"); } } } } else if(type == 2) { int p; fscanf( inputfilepointer, "%d", &p ); int points[p][2]; for(int i=0;i<p;i++) { fscanf( inputfilepointer, "%d", &points[i][0] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &points[i][1] ); //scaning for toll tax zone passing time } int no_calls=ceil(float(p)/float(100)); int extra = p%100; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=0; int ab[100][2]; for(int x=i;x<i+100;x++) { ab[idx][0]=points[x][0]; ab[idx][1]=points[x][1]; idx++; } int *d_ab; cudaMalloc(&d_ab,200*sizeof(int)); cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; cudaMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; cudaMalloc(&d_count,100*sizeof(int)); cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice); range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int l=0;l<100;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { idx=0; int ab[100][2]; for(int x=i;x<i+100;x++) { printf("%d & %d \n", idx , x); ab[idx][0]=points[x][0]; ab[idx][1]=points[x][1]; idx++; } int *d_ab; cudaMalloc(&d_ab,200*sizeof(int)); cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; cudaMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; cudaMalloc(&d_count,100*sizeof(int)); cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice); range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int l=0;l<100;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } int ab[100][2]; for(int x=0;x<100;x++) { ab[x][0]=-1; ab[x][1]=-1; } idx=0; for(int i=(no_calls-1)*100;i<p;i++) { ab[idx][0]=points[i][0]; ab[idx][1]=points[i][1]; idx++; } int *d_ab; cudaMalloc(&d_ab,200*sizeof(int)); cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice); int **h_result; h_result = (int**)malloc(100*n*sizeof(int*)); int **d_result; cudaMalloc(&d_result,100*n*sizeof(int*)); int count[100]; for(int y=0;y<100;y++) count[y]=-1; int *d_count; cudaMalloc(&d_count,100*sizeof(int)); cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice); range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab); cudaDeviceSynchronize(); cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); for(int l=0;l<extra;l++) { if(count[l] > 0) { for(int j=0;j<count[l];j++) { int *addr = h_result[l*n + j]; for(int k=0;k<m;k++) { fprintf( outputfilepointer, "%d " , addr[k]); //printf("%d ",addr[k]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } else if(count[l]==0) { fprintf( outputfilepointer, "-1\n" ); //printf("-1\n"); } } } } else if(type == 3) { //int p=3; int p; //scanf("%d",&p); fscanf( inputfilepointer, "%d", &p ); //scaning for toll tax zone passing time int find_keys[p][3]; //int find_keys[p][3]={{21,4,987},{18,3,143},{6,2,100}}; for(int i=0;i<p;i++) { //scanf("%d",&find_keys[i][0]); //scanf("%d",&find_keys[i][1]); //scanf("%d",&find_keys[i][2]); fscanf( inputfilepointer, "%d", &find_keys[i][0] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &find_keys[i][1] ); //scaning for toll tax zone passing time fscanf( inputfilepointer, "%d", &find_keys[i][2] ); //scaning for toll tax zone passing time } int no_calls=ceil(float(p)/float(100)); int extra = p%100; int idx=0; if(extra == 0) { for(int i=0;i<(no_calls)*100;i+=100) { idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x][0]; ite++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int *h_result[100]; int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys); cudaDeviceSynchronize(); cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2]; } } } } if(extra!=0) { for(int i=0;i<(no_calls-1)*100;i+=100) { //printf("Inside type 3 : %d\n",i); idx=i; int h_keys[100]; int ite=0; for(int x=i;x<i+100;x++) { h_keys[ite]=find_keys[x][0]; ite++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int *h_result[100]; int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys); cudaDeviceSynchronize(); cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); for(int j=0;j<100;j++) { if(found[j]) { int * addr = h_result[j]; addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2]; } else { //printf("-1\n"); } } } int h_keys[100]; for(int y=0;y<100;y++) { h_keys[y]=-1; } idx=0; for(int i=(no_calls-1)*100;i<p;i++) { h_keys[idx]=find_keys[i][0]; idx++; } int *d_keys; cudaMalloc(&d_keys,100*sizeof(int)); cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice); int found[100]; for(int y=0;y<100;y++) found[y]=0; int *d_found; cudaMalloc(&d_found , 100*sizeof(int)); cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice); int *h_result[100]; int **d_result; cudaMalloc(&d_result,100*sizeof(int*)); find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys); cudaDeviceSynchronize(); cudaMemcpy(h_result,d_result , 100*sizeof(int*) ,cudaMemcpyDeviceToHost); cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost); idx = (no_calls-1)*100; for(int i=0;i<extra;i++) { if(found[i]) { int * addr = h_result[i]; addr[find_keys[i+idx][1] - 1] = addr[find_keys[i+idx][1] - 1] + find_keys[i+idx][2]; //printf("\n"); //fprintf( outputfilepointer, "\n"); } else { //printf("-1\n"); //fprintf( outputfilepointer, "-1\n"); } } } } else { int key_; fscanf( inputfilepointer, "%d", &key_ ); int kk[n]; int *k; cudaMalloc(&k,n*sizeof(int)); int h_count=0; int *d_count; cudaMalloc(&d_count,sizeof(int)); cudaMemcpy(d_count,&h_count,sizeof(int),cudaMemcpyHostToDevice); path_trace<<<1,7>>>(d_prefix_sum , d_tree , k , d_count , tree.size() , n , d , key_); cudaMemcpy(&h_count,d_count,sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(kk,k,n*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<h_count;i++) { fprintf( outputfilepointer, "%d ",kk[i]); //printf("%d ",kk[i]); } //printf("\n"); fprintf( outputfilepointer, "\n"); } } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time taken by function to execute is: %.6f ms\n", milliseconds); fclose( outputfilepointer ); fclose( inputfilepointer ); }
2878d3b88df7a66e2b59de171c1364663bda43ef.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #define N 10000 #define M 10000 #define K 10000 #define tile_size 16 __global__ void matrix_mul_shared_coal(int *a, int *b, int *c) { __shared__ int a_tile[tile_size][tile_size]; //define shared memory tile for matrix a __shared__ int b_tile[tile_size][tile_size]; //define shared memory tile for matrix b int row = blockIdx.y * tile_size + threadIdx.y; int col = blockIdx.x * tile_size + threadIdx.x; int temp = 0; int tileIdx; //Load one tile into shared memory for (int s = 0; s < gridDim.x; s++) { tileIdx = row * K + s * tile_size + threadIdx.x; if(tileIdx >= K*K) a_tile[threadIdx.y][threadIdx.x] = 0; //check if K is divisible by tile size for a_tile else a_tile[threadIdx.y][threadIdx.x] = a[tileIdx]; tileIdx = (s * tile_size + threadIdx.y) * K + col; if(tileIdx >= K*K) b_tile[threadIdx.y][threadIdx.x] = 0; //check if K is divisible by tile size for b_tile else b_tile[threadIdx.y][threadIdx.x] = b[tileIdx]; __syncthreads(); for (int j = 0; j < tile_size; j++) temp += a_tile[threadIdx.y][j] * b_tile[j][threadIdx.x]; //perform addition and multiply __syncthreads(); } if(row < K && col < K) c[row * K + col] = temp; //store result } //Function to initialize matrices with random values void randomInit (int *data, int size) { for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) *(data + i * size + j) = rand() % 1024; } int main() { int *a, *b, *c, *bt; //CPU copies int *d_a, *d_b, *d_c; //GPU copies int matrix_size = N * M * sizeof(int); hipEvent_t start, stop,start1,stop1, start2,stop2; int time,time1,time2; //Start the cuda timer hipEventCreate(&start); hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&stop); hipEventCreate(&stop1); hipEventCreate(&stop2); //Allocate CPU memory a = (int *) malloc(matrix_size); randomInit(a, N); b = (int *) malloc(matrix_size); randomInit(b, M); bt = (int *) malloc(matrix_size); c = (int *) malloc(matrix_size); for (int i = 0; i < M; i++) for (int j = 0; j < M; j++) *(bt + i * M + j) = *(b + j * M + i); //Allocate GPU memory hipMalloc((void **) &d_a, matrix_size); hipMalloc((void **) &d_b, matrix_size); hipMalloc((void **) &d_c, matrix_size); //Copy from CPU memory to GPU memory hipEventRecord( start1, 0 ); hipMemcpy( d_a, a, matrix_size, hipMemcpyHostToDevice); hipMemcpy( d_b, bt, matrix_size, hipMemcpyHostToDevice); hipEventRecord( stop1, 0 ); hipEventSynchronize(stop1); hipEventElapsedTime( &time1, start1, stop1 ); hipEventDestroy( start1 ); hipEventDestroy( stop1 ); //Set thread and grid dimensions dim3 tBlock(16, 16); dim3 Grid((N + 16 - 1)/tBlock.x, (M + 16 - 1)/tBlock.y); hipEventRecord( start, 0 ); //Call kernels hipLaunchKernelGGL(( matrix_mul_shared_coal), dim3(Grid),dim3(tBlock) , 0, 0, d_a,d_b,d_c); hipEventRecord( stop, 0 ); hipEventSynchronize(stop); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); printf("GPU Execution Time without Memory Transfer= %f\n",time); //Copy from device to host hipEventRecord( start2, 0 ); hipMemcpy( c, d_c, matrix_size, hipMemcpyDeviceToHost); hipEventRecord( stop2, 0 ); hipEventSynchronize(stop2); hipEventElapsedTime( &time2, start2, stop2 ); hipEventDestroy( start2 ); hipEventDestroy( stop2 ); float tTime=time+time1+time2; printf("GPU Execution Time with Memory Transfer: %f\n",tTime); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
2878d3b88df7a66e2b59de171c1364663bda43ef.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda.h> #define N 10000 #define M 10000 #define K 10000 #define tile_size 16 __global__ void matrix_mul_shared_coal(int *a, int *b, int *c) { __shared__ int a_tile[tile_size][tile_size]; //define shared memory tile for matrix a __shared__ int b_tile[tile_size][tile_size]; //define shared memory tile for matrix b int row = blockIdx.y * tile_size + threadIdx.y; int col = blockIdx.x * tile_size + threadIdx.x; int temp = 0; int tileIdx; //Load one tile into shared memory for (int s = 0; s < gridDim.x; s++) { tileIdx = row * K + s * tile_size + threadIdx.x; if(tileIdx >= K*K) a_tile[threadIdx.y][threadIdx.x] = 0; //check if K is divisible by tile size for a_tile else a_tile[threadIdx.y][threadIdx.x] = a[tileIdx]; tileIdx = (s * tile_size + threadIdx.y) * K + col; if(tileIdx >= K*K) b_tile[threadIdx.y][threadIdx.x] = 0; //check if K is divisible by tile size for b_tile else b_tile[threadIdx.y][threadIdx.x] = b[tileIdx]; __syncthreads(); for (int j = 0; j < tile_size; j++) temp += a_tile[threadIdx.y][j] * b_tile[j][threadIdx.x]; //perform addition and multiply __syncthreads(); } if(row < K && col < K) c[row * K + col] = temp; //store result } //Function to initialize matrices with random values void randomInit (int *data, int size) { for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) *(data + i * size + j) = rand() % 1024; } int main() { int *a, *b, *c, *bt; //CPU copies int *d_a, *d_b, *d_c; //GPU copies int matrix_size = N * M * sizeof(int); cudaEvent_t start, stop,start1,stop1, start2,stop2; int time,time1,time2; //Start the cuda timer cudaEventCreate(&start); cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&stop); cudaEventCreate(&stop1); cudaEventCreate(&stop2); //Allocate CPU memory a = (int *) malloc(matrix_size); randomInit(a, N); b = (int *) malloc(matrix_size); randomInit(b, M); bt = (int *) malloc(matrix_size); c = (int *) malloc(matrix_size); for (int i = 0; i < M; i++) for (int j = 0; j < M; j++) *(bt + i * M + j) = *(b + j * M + i); //Allocate GPU memory cudaMalloc((void **) &d_a, matrix_size); cudaMalloc((void **) &d_b, matrix_size); cudaMalloc((void **) &d_c, matrix_size); //Copy from CPU memory to GPU memory cudaEventRecord( start1, 0 ); cudaMemcpy( d_a, a, matrix_size, cudaMemcpyHostToDevice); cudaMemcpy( d_b, bt, matrix_size, cudaMemcpyHostToDevice); cudaEventRecord( stop1, 0 ); cudaEventSynchronize(stop1); cudaEventElapsedTime( &time1, start1, stop1 ); cudaEventDestroy( start1 ); cudaEventDestroy( stop1 ); //Set thread and grid dimensions dim3 tBlock(16, 16); dim3 Grid((N + 16 - 1)/tBlock.x, (M + 16 - 1)/tBlock.y); cudaEventRecord( start, 0 ); //Call kernels matrix_mul_shared_coal<<< Grid,tBlock >>> (d_a,d_b,d_c); cudaEventRecord( stop, 0 ); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); printf("GPU Execution Time without Memory Transfer= %f\n",time); //Copy from device to host cudaEventRecord( start2, 0 ); cudaMemcpy( c, d_c, matrix_size, cudaMemcpyDeviceToHost); cudaEventRecord( stop2, 0 ); cudaEventSynchronize(stop2); cudaEventElapsedTime( &time2, start2, stop2 ); cudaEventDestroy( start2 ); cudaEventDestroy( stop2 ); float tTime=time+time1+time2; printf("GPU Execution Time with Memory Transfer: %f\n",tTime); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
3d29db941c958095bf2acd1ec3bf98bb27d2596b.hip
// !!! This is a file automatically generated by hipify!!! // SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/DuplQueue.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <chrono> int main(int argc, char** argv) { BCL::init(16); printf("Hello, world! I am rank %lu/%lu\n", BCL::rank(), BCL::nprocs()); BCL::cuda::init(8*1024); size_t num_inserts = 2*1024; size_t insert_size = 1024; size_t total_inserts = num_inserts*insert_size; BCL::cuda::DuplQueue<int> queue(0, total_inserts); BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>> values(insert_size); // BCL::cuda::device_vector<int> values(insert_size); std::vector<int> values_local(insert_size, BCL::rank()); values.assign(values_local.begin(), values_local.end()); BCL::cuda::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::global_launch(num_inserts, [] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue, BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>>& values) { // BCL::cuda::device_vector<int>& values) { bool success = queue.push(values.data(), values.size()); if (!success) { printf("AGH! I have failed!\n"); } }, queue, values); hipDeviceSynchronize(); BCL::cuda::barrier(); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double data_moved = total_inserts*sizeof(int); double bw = data_moved / duration; double bw_gb = bw*1e-9; BCL::print("Total %lf s (%lf GB/s)\n", duration, bw_gb); if (BCL::rank() == 0) { std::vector<int> histogram_local(BCL::nprocs(), 0); BCL::cuda::device_vector<int> histogram(BCL::nprocs()); histogram.assign(histogram_local.begin(), histogram_local.end()); BCL::cuda::launch(total_inserts, [] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue, BCL::cuda::device_vector<int>& histogram) { int value = 12; bool success = queue.local_pop(value); if (success && value >= 0 && value < BCL::cuda::nprocs()) { atomicAdd(&histogram.data()[value], 1); } else { printf("Error! Missing values in the queue (%lu)\n", idx); } }, queue, histogram); hipDeviceSynchronize(); size_t total_counted = 0; for (size_t i = 0; i < histogram.size(); i++) { int hval = histogram[i]; printf("%lu: %d\n", i, hval); total_counted += hval; } assert(total_counted == total_inserts); } BCL::cuda::barrier(); BCL::print("Finished...\n"); BCL::finalize(); return 0; }
3d29db941c958095bf2acd1ec3bf98bb27d2596b.cu
// SPDX-FileCopyrightText: 2021 Benjamin Brock // // SPDX-License-Identifier: BSD-3-Clause #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/DuplQueue.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <chrono> int main(int argc, char** argv) { BCL::init(16); printf("Hello, world! I am rank %lu/%lu\n", BCL::rank(), BCL::nprocs()); BCL::cuda::init(8*1024); size_t num_inserts = 2*1024; size_t insert_size = 1024; size_t total_inserts = num_inserts*insert_size; BCL::cuda::DuplQueue<int> queue(0, total_inserts); BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>> values(insert_size); // BCL::cuda::device_vector<int> values(insert_size); std::vector<int> values_local(insert_size, BCL::rank()); values.assign(values_local.begin(), values_local.end()); BCL::cuda::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::global_launch(num_inserts, [] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue, BCL::cuda::device_vector<int, BCL::cuda::bcl_allocator<int>>& values) { // BCL::cuda::device_vector<int>& values) { bool success = queue.push(values.data(), values.size()); if (!success) { printf("AGH! I have failed!\n"); } }, queue, values); cudaDeviceSynchronize(); BCL::cuda::barrier(); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double data_moved = total_inserts*sizeof(int); double bw = data_moved / duration; double bw_gb = bw*1e-9; BCL::print("Total %lf s (%lf GB/s)\n", duration, bw_gb); if (BCL::rank() == 0) { std::vector<int> histogram_local(BCL::nprocs(), 0); BCL::cuda::device_vector<int> histogram(BCL::nprocs()); histogram.assign(histogram_local.begin(), histogram_local.end()); BCL::cuda::launch(total_inserts, [] __device__ (size_t idx, BCL::cuda::DuplQueue<int>& queue, BCL::cuda::device_vector<int>& histogram) { int value = 12; bool success = queue.local_pop(value); if (success && value >= 0 && value < BCL::cuda::nprocs()) { atomicAdd(&histogram.data()[value], 1); } else { printf("Error! Missing values in the queue (%lu)\n", idx); } }, queue, histogram); cudaDeviceSynchronize(); size_t total_counted = 0; for (size_t i = 0; i < histogram.size(); i++) { int hval = histogram[i]; printf("%lu: %d\n", i, hval); total_counted += hval; } assert(total_counted == total_inserts); } BCL::cuda::barrier(); BCL::print("Finished...\n"); BCL::finalize(); return 0; }
cf0dd929fe8f6d81876165a0bf064df3ee5942c1.hip
// !!! This is a file automatically generated by hipify!!! // ------------------------------------------------------------------------ // File: utility.h // S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs. // This file defines the unit_test functions. // ------------------------------------------------------------------------ // Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S. // Homepage: http://www.angliphd.com // Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz // and Linghao Song // GitHub repo: http://www.github.com/uuudown/S-BLAS // PNNL-IPID: 31803-E, IR: PNNL-31803 // MIT Lincese. // ------------------------------------------------------------------------ #include "matrix.h" #include "sblas.h" #include "spmm.h" /* This unit test-function tests COO Matrix */ bool cooMatrixTest() { //============= COO ============= //default construction function CooSparseMatrix<int,double> cooMtx1; //load from file CooSparseMatrix<int,double> cooMtx2("./ash85.mtx"); return true; } /* This unit test-function tests CSR Matrix */ bool csrMatrixTest() { //============= CSR ============= //default construction function CsrSparseMatrix<unsigned,double> csrMtx1; //load from file CsrSparseMatrix<unsigned,double> csrMtx2("./ash85.mtx"); return true; } /* This unit test-function tests CSC Matrix */ bool cscMatrixTest() { //============= CSC ============= //default construction function CscSparseMatrix<unsigned,double> cscMtx1; //obtain from csr matrix CsrSparseMatrix<unsigned,double> csrMtx1("./ash85.mtx"); CscSparseMatrix<unsigned,double> cscMtx2(&csrMtx1); return true; } /* This unit test-function tests Dense Matrix */ bool denseMatrixTest() { //============= Dense ============= //default construction function DenseMatrix<unsigned,double> denseMtx1; //random generate DenseMatrix<unsigned,double> denseMtx2(2048, 2048, row_major); DenseMatrix<unsigned,double>* p_denseMtx3 = NULL; p_denseMtx3 = denseMtx2.transpose(); return true; } bool spmmCsrTest(unsigned b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, replicate); B.sync2gpu(n_gpu, segment); C.sync2gpu(n_gpu, segment); CUDA_SAFE_CALL( hipDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_cpu_timer.stop_timer(); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); //print_1d_array(C.val,C.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmmCsrTest2(unsigned b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( hipDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v2<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_cpu_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_mtx_num()); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmvCsrTest(double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); //Correct CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseVector<int,double> B(A.width,1.); DenseVector<int,double> C(A.height,1.); DenseVector<int,double> C_cpu(A.height,1.); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( hipDeviceSynchronize() ); load_timer.stop_timer(); //CPU Baseline run_cpu_timer.start_timer(); sblas_spmv_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_cpu_timer.stop_timer(); run_timer.start_timer(); sblas_spmv_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); run_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_vec_length()); //print_1d_array(C_cpu.val,C_cpu.get_vec_length()); bool correct = check_equal(C_cpu.val, C.val, C.get_vec_length()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; return correct; } int main() { cooMatrixTest(); csrMatrixTest(); cscMatrixTest(); denseMatrixTest(); spmmCsrTest(256,3.0,4.0,4); spmmCsrTest2(256,3.0,4.0,4); spmvCsrTest(3.0,4.0,4); return 0; }
cf0dd929fe8f6d81876165a0bf064df3ee5942c1.cu
// ------------------------------------------------------------------------ // File: utility.h // S-BLAS: A Scalable Sparse-BLAS Kernel Library for Multi-GPUs. // This file defines the unit_test functions. // ------------------------------------------------------------------------ // Ang Li, Scientist, Pacific Northwest National Laboratory(PNNL), U.S. // Homepage: http://www.angliphd.com // Other PNNL Developers: Chenhao Xie, Jieyang Chen, Jiajia Li, Jesun Firoz // and Linghao Song // GitHub repo: http://www.github.com/uuudown/S-BLAS // PNNL-IPID: 31803-E, IR: PNNL-31803 // MIT Lincese. // ------------------------------------------------------------------------ #include "matrix.h" #include "sblas.h" #include "spmm.h" /* This unit test-function tests COO Matrix */ bool cooMatrixTest() { //============= COO ============= //default construction function CooSparseMatrix<int,double> cooMtx1; //load from file CooSparseMatrix<int,double> cooMtx2("./ash85.mtx"); return true; } /* This unit test-function tests CSR Matrix */ bool csrMatrixTest() { //============= CSR ============= //default construction function CsrSparseMatrix<unsigned,double> csrMtx1; //load from file CsrSparseMatrix<unsigned,double> csrMtx2("./ash85.mtx"); return true; } /* This unit test-function tests CSC Matrix */ bool cscMatrixTest() { //============= CSC ============= //default construction function CscSparseMatrix<unsigned,double> cscMtx1; //obtain from csr matrix CsrSparseMatrix<unsigned,double> csrMtx1("./ash85.mtx"); CscSparseMatrix<unsigned,double> cscMtx2(&csrMtx1); return true; } /* This unit test-function tests Dense Matrix */ bool denseMatrixTest() { //============= Dense ============= //default construction function DenseMatrix<unsigned,double> denseMtx1; //random generate DenseMatrix<unsigned,double> denseMtx2(2048, 2048, row_major); DenseMatrix<unsigned,double>* p_denseMtx3 = NULL; p_denseMtx3 = denseMtx2.transpose(); return true; } bool spmmCsrTest(unsigned b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, replicate); B.sync2gpu(n_gpu, segment); C.sync2gpu(n_gpu, segment); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_cpu_timer.stop_timer(); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); //print_1d_array(C.val,C.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmmCsrTest2(unsigned b_width, double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseMatrix<int,double> B(A.width, b_width, col_major); DenseMatrix<int,double> C(A.height, b_width, 1, col_major); DenseMatrix<int,double> C_cpu(A.height, b_width, 1, col_major); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); load_timer.stop_timer(); run_timer.start_timer(); sblas_spmm_csr_v2<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_timer.stop_timer(); run_cpu_timer.start_timer(); sblas_spmm_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_cpu_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_mtx_num()); //print_1d_array(C_cpu.val,C_cpu.get_mtx_num()); bool correct = check_equal(C_cpu.val, C.val, C.get_mtx_num()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; return correct; } bool spmvCsrTest(double alpha, double beta, unsigned n_gpu) { cpu_timer load_timer, run_timer, run_cpu_timer; load_timer.start_timer(); //Correct CsrSparseMatrix<int, double> A("./ash85.mtx"); DenseVector<int,double> B(A.width,1.); DenseVector<int,double> C(A.height,1.); DenseVector<int,double> C_cpu(A.height,1.); //Partition and Distribute A.sync2gpu(n_gpu, segment); B.sync2gpu(n_gpu, replicate); C.sync2gpu(n_gpu, replicate); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); load_timer.stop_timer(); //CPU Baseline run_cpu_timer.start_timer(); sblas_spmv_csr_cpu<int, double>(&A, &B, &C_cpu, alpha, beta); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_cpu_timer.stop_timer(); run_timer.start_timer(); sblas_spmv_csr_v1<int, double>(&A, &B, &C, alpha, beta, n_gpu); CUDA_CHECK_ERROR(); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); run_timer.stop_timer(); //get data back to CPU C.sync2cpu(0); //print_1d_array(C.val,C.get_vec_length()); //print_1d_array(C_cpu.val,C_cpu.get_vec_length()); bool correct = check_equal(C_cpu.val, C.val, C.get_vec_length()); cout << "Validation = " << (correct?"True":"False") << endl; cout << "Load Time: " << load_timer.measure() << "ms." << endl; cout << "CPU Run Time: " << run_cpu_timer.measure() << " ms." << endl; cout << n_gpu << "-GPUs Run Time: " << run_timer.measure() << " ms." << endl; return correct; } int main() { cooMatrixTest(); csrMatrixTest(); cscMatrixTest(); denseMatrixTest(); spmmCsrTest(256,3.0,4.0,4); spmmCsrTest2(256,3.0,4.0,4); spmvCsrTest(3.0,4.0,4); return 0; }
87d41819da5a21909043c460b8c99ef1df4d1fdd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include "CudaDevInfo.h" #include "cuda_vec_op_unary.h" #include "cuda_vec_op_scalar.h" #include "cuda_sum_mean_var.h" #include "cc_vec_op_unary.h" #include "cc_vec_op_scalar.h" #include "cc_sum_mean_var.h" #include "fast_heap.h" #include "cuda_vec_do.h" template <class T> T max_abs_vec_diff(size_t n_vec, //!< no of elements const T *vec1, //!< The vector const T *vec2 //!< The vector ) { T mx = fabs(double(vec1[0]) - double(vec2[0])); size_t i; for(i=1; i<n_vec; i++) { T df = fabs(vec1[i] - vec2[i]); if(df > mx) mx = df; } return mx; } static void help(const char *cmd) { fprintf (stderr, "USAGE:\n" " %s [-v] <N> <rng0> <rng1>\n" "where <N> is a positive integer and <rng0>, <rng1> are floating point numbers.\n" "The program creates a scalar and vector of size <N>, both random in the range\n" "between <rng0> and <rng1>. Then various tests are run and the error is reported.\n" "This is repeated for double and float computation\n", cmd); exit(EXIT_FAILURE); } template<class T> void run_tests(size_t n_vec, T sclr, const T*vec, const char *type_name) { const size_t sz = n_vec * sizeof(T); const size_t sz1 = (n_vec+1) * sizeof(T); GenericHeapElement &ph_res_gpu = h_fast_heap->get(sz); T *h_res_gpu = static_cast<T *>(*ph_res_gpu); GenericHeapElement &pd_vec = d_fast_heap->get(sz); T *d_vec = static_cast<T *>(*pd_vec); GenericHeapElement &pd_res_gpu = d_fast_heap->get(sz1); T *d_res_gpu = static_cast<T *>(*ph_res_gpu); GenericHeapElement &pres_cpu = fast_heap->get(sz1); T *res_cpu = static_cast<T *>(*pres_cpu); gpuErrChk(hipMemcpy(d_vec, vec, sz, hipMemcpyHostToDevice),"cuda_tst_do:cuda_error", ""); // Test sum res_cpu[0] = c_sum_vec(n_vec, vec); h_sum_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sizeof(T), hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sum test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); // Test Mean and standard deviation res_cpu[0] = c_mean_vec(n_vec, vec); res_cpu[1] = c_stdv_vec(n_vec, vec, res_cpu[0]); h_mean_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sizeof(T), hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s mean test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); if(n_vec > 1) { h_stdv_vec(n_vec, d_vec, d_res_gpu, d_res_gpu+1); gpuErrChk(hipMemcpy(h_res_gpu+1, d_res_gpu+1, sizeof(T), hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s stdv test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu+1, res_cpu+1)); h_mean_stdv_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, 2*sizeof(T), hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s combined mean test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); printf("%s combined stdv test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu+1, res_cpu+1)); } // Test sqrt and abs c_vec_abs(vec, n_vec, res_cpu); c_vec_sqrt(res_cpu, n_vec, res_cpu); h_vec_abs(d_vec, n_vec, d_res_gpu); h_vec_sqrt(d_res_gpu, n_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sz, hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sqrt(abs()) test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test subtract scalar c_vec_sub_scalar(sclr, vec, n_vec, res_cpu); h_vec_sub_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sz, hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sub_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test add scalar c_vec_add_scalar(sclr, vec, n_vec, res_cpu); h_vec_add_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sz, hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s add_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test multiply by scalar c_vec_mlt_scalar(sclr, vec, n_vec, res_cpu); h_vec_mlt_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sz, hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s mlt_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test divide by scalar if(sclr != 0) { c_vec_div_scalar(sclr, vec, n_vec, res_cpu); h_vec_div_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(hipMemcpy(h_res_gpu, d_res_gpu, sz, hipMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s div_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); } pres_cpu.discard(); pd_res_gpu.discard(); pd_vec.discard(); ph_res_gpu.discard(); } int cuda_vec_do(int argc, const char *argv[]) { int nv; double rng0, rng1; size_t i; int verbose; if(argc < 4) help(argv[0]); verbose = !strcmp(argv[1],"-v"); if(argc-verbose != 4 || (sscanf(argv[verbose+1],"%d",&nv) + sscanf(argv[verbose+2],"%lg",&rng0) + sscanf(argv[verbose+3],"%lg",&rng1) != 3) || nv <= 0 ) help(argv[0]); // Memory allocation and initialize random data const size_t n_vec = nv; const double dbl_sclr = rng0 + ((rng1-rng0)/double(RAND_MAX))*double(rand()); const float flt_sclr = float(dbl_sclr); double *dbl = (double *) malloc(n_vec*sizeof(double)); float *flt = (float *) malloc(n_vec*sizeof(float)); // Initialize random data for(i=0; i<n_vec; i++) { dbl[i] = rng0 + ((rng1-rng0)/double(RAND_MAX))*double(rand()); flt[i] = float(dbl[i]); } if(verbose) { printf("scalar is: %g\n" "vector is:", dbl_sclr); for(i=0; i<n_vec; i++) printf("%s%g", (i%8)?" ":"\n", dbl[i]); printf("\n++++++++++++++++++\n"); } run_tests(n_vec, dbl_sclr, dbl, "double"); run_tests(n_vec, flt_sclr, flt, "float"); free(flt); free(dbl); return 0; }
87d41819da5a21909043c460b8c99ef1df4d1fdd.cu
#include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include "CudaDevInfo.h" #include "cuda_vec_op_unary.h" #include "cuda_vec_op_scalar.h" #include "cuda_sum_mean_var.h" #include "cc_vec_op_unary.h" #include "cc_vec_op_scalar.h" #include "cc_sum_mean_var.h" #include "fast_heap.h" #include "cuda_vec_do.h" template <class T> T max_abs_vec_diff(size_t n_vec, //!< no of elements const T *vec1, //!< The vector const T *vec2 //!< The vector ) { T mx = fabs(double(vec1[0]) - double(vec2[0])); size_t i; for(i=1; i<n_vec; i++) { T df = fabs(vec1[i] - vec2[i]); if(df > mx) mx = df; } return mx; } static void help(const char *cmd) { fprintf (stderr, "USAGE:\n" " %s [-v] <N> <rng0> <rng1>\n" "where <N> is a positive integer and <rng0>, <rng1> are floating point numbers.\n" "The program creates a scalar and vector of size <N>, both random in the range\n" "between <rng0> and <rng1>. Then various tests are run and the error is reported.\n" "This is repeated for double and float computation\n", cmd); exit(EXIT_FAILURE); } template<class T> void run_tests(size_t n_vec, T sclr, const T*vec, const char *type_name) { const size_t sz = n_vec * sizeof(T); const size_t sz1 = (n_vec+1) * sizeof(T); GenericHeapElement &ph_res_gpu = h_fast_heap->get(sz); T *h_res_gpu = static_cast<T *>(*ph_res_gpu); GenericHeapElement &pd_vec = d_fast_heap->get(sz); T *d_vec = static_cast<T *>(*pd_vec); GenericHeapElement &pd_res_gpu = d_fast_heap->get(sz1); T *d_res_gpu = static_cast<T *>(*ph_res_gpu); GenericHeapElement &pres_cpu = fast_heap->get(sz1); T *res_cpu = static_cast<T *>(*pres_cpu); gpuErrChk(cudaMemcpy(d_vec, vec, sz, cudaMemcpyHostToDevice),"cuda_tst_do:cuda_error", ""); // Test sum res_cpu[0] = c_sum_vec(n_vec, vec); h_sum_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sizeof(T), cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sum test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); // Test Mean and standard deviation res_cpu[0] = c_mean_vec(n_vec, vec); res_cpu[1] = c_stdv_vec(n_vec, vec, res_cpu[0]); h_mean_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sizeof(T), cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s mean test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); if(n_vec > 1) { h_stdv_vec(n_vec, d_vec, d_res_gpu, d_res_gpu+1); gpuErrChk(cudaMemcpy(h_res_gpu+1, d_res_gpu+1, sizeof(T), cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s stdv test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu+1, res_cpu+1)); h_mean_stdv_vec(n_vec, d_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, 2*sizeof(T), cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s combined mean test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu, res_cpu)); printf("%s combined stdv test: %g\n", type_name, max_abs_vec_diff(1, h_res_gpu+1, res_cpu+1)); } // Test sqrt and abs c_vec_abs(vec, n_vec, res_cpu); c_vec_sqrt(res_cpu, n_vec, res_cpu); h_vec_abs(d_vec, n_vec, d_res_gpu); h_vec_sqrt(d_res_gpu, n_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sz, cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sqrt(abs()) test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test subtract scalar c_vec_sub_scalar(sclr, vec, n_vec, res_cpu); h_vec_sub_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sz, cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s sub_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test add scalar c_vec_add_scalar(sclr, vec, n_vec, res_cpu); h_vec_add_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sz, cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s add_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test multiply by scalar c_vec_mlt_scalar(sclr, vec, n_vec, res_cpu); h_vec_mlt_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sz, cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s mlt_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); // Test divide by scalar if(sclr != 0) { c_vec_div_scalar(sclr, vec, n_vec, res_cpu); h_vec_div_scalar(sclr, d_vec, n_vec, d_res_gpu); gpuErrChk(cudaMemcpy(h_res_gpu, d_res_gpu, sz, cudaMemcpyDeviceToHost),"cuda_tst_do:cuda_error", ""); printf("%s div_scalar test: %g\n", type_name, max_abs_vec_diff(n_vec, h_res_gpu, res_cpu)); } pres_cpu.discard(); pd_res_gpu.discard(); pd_vec.discard(); ph_res_gpu.discard(); } int cuda_vec_do(int argc, const char *argv[]) { int nv; double rng0, rng1; size_t i; int verbose; if(argc < 4) help(argv[0]); verbose = !strcmp(argv[1],"-v"); if(argc-verbose != 4 || (sscanf(argv[verbose+1],"%d",&nv) + sscanf(argv[verbose+2],"%lg",&rng0) + sscanf(argv[verbose+3],"%lg",&rng1) != 3) || nv <= 0 ) help(argv[0]); // Memory allocation and initialize random data const size_t n_vec = nv; const double dbl_sclr = rng0 + ((rng1-rng0)/double(RAND_MAX))*double(rand()); const float flt_sclr = float(dbl_sclr); double *dbl = (double *) malloc(n_vec*sizeof(double)); float *flt = (float *) malloc(n_vec*sizeof(float)); // Initialize random data for(i=0; i<n_vec; i++) { dbl[i] = rng0 + ((rng1-rng0)/double(RAND_MAX))*double(rand()); flt[i] = float(dbl[i]); } if(verbose) { printf("scalar is: %g\n" "vector is:", dbl_sclr); for(i=0; i<n_vec; i++) printf("%s%g", (i%8)?" ":"\n", dbl[i]); printf("\n++++++++++++++++++\n"); } run_tests(n_vec, dbl_sclr, dbl, "double"); run_tests(n_vec, flt_sclr, flt, "float"); free(flt); free(dbl); return 0; }
d1b81c1e260548c7902dacde262948cf1109b3df.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_test0_write.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *_ptr = NULL; hipMalloc(&_ptr, XSIZE*YSIZE); char *end_ptr = NULL; hipMalloc(&end_ptr, XSIZE*YSIZE); unsigned int pattern = 1; unsigned int *err = NULL; hipMalloc(&err, XSIZE*YSIZE); unsigned long *err_addr = NULL; hipMalloc(&err_addr, XSIZE*YSIZE); unsigned long *err_expect = NULL; hipMalloc(&err_expect, XSIZE*YSIZE); unsigned long *err_current = NULL; hipMalloc(&err_current, XSIZE*YSIZE); unsigned long *err_second_read = NULL; hipMalloc(&err_second_read, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_test0_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_test0_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_test0_write), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d1b81c1e260548c7902dacde262948cf1109b3df.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_test0_write.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *_ptr = NULL; cudaMalloc(&_ptr, XSIZE*YSIZE); char *end_ptr = NULL; cudaMalloc(&end_ptr, XSIZE*YSIZE); unsigned int pattern = 1; unsigned int *err = NULL; cudaMalloc(&err, XSIZE*YSIZE); unsigned long *err_addr = NULL; cudaMalloc(&err_addr, XSIZE*YSIZE); unsigned long *err_expect = NULL; cudaMalloc(&err_expect, XSIZE*YSIZE); unsigned long *err_current = NULL; cudaMalloc(&err_current, XSIZE*YSIZE); unsigned long *err_second_read = NULL; cudaMalloc(&err_second_read, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_test0_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_test0_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_test0_write<<<gridBlock,threadBlock>>>(_ptr,end_ptr,pattern,err,err_addr,err_expect,err_current,err_second_read); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9fb677244bcdf15169c447610cfaaa9ac1772d2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* # * Parallel Graph Coloring: # * Author: Kartik Mankad # * Email: [email protected] # * Description: A parallel implementation of the FirstFit algorithm # */ #include "FirstFitCUDA.h" // Init the ColorValid array // TODO: Replace with one thrust::fill call __global__ void InitializeColorVector(int d_NumVertices, bool* d_ColorValid, int* d_ColorVector){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; if (threadID < d_NumVertices){ d_ColorValid[threadID] = false; d_ColorVector[threadID] = NO_COLOR; } }// // Actual Graph Coloring kernel __global__ void ColorGraph(int d_NumVertices, int d_NNZ, int* d_ColIdx, int* d_RowPtr, int* d_ColorVector, bool* d_changed){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; // Temp storage to store the neighbors' colors. int NeighborColors[MAX_DEGREE]; // Set the default value of changed to false *d_changed = false; if (threadID < d_NumVertices) { // So that we dont walk over the edge of the d_RowPtr array if (d_ColorVector[threadID] == NO_COLOR){ // if the vertex is not colored // Iterate over its neighbors int NumNeighbors = 0; for (int CurrNodeOffset=d_RowPtr[threadID]; CurrNodeOffset<d_RowPtr[threadID+1] ; CurrNodeOffset++){ // Mark the neighbor's colors unavailable by // pushing them into the NeighborColors vector int NodeIndex = d_ColIdx[CurrNodeOffset]; int NodeColor = d_ColorVector[NodeIndex]; NeighborColors[NumNeighbors++] = NodeColor; } // Here, we have the neighbor's colors // as first NumNeighbors elements of the NeighborColors array // We go over that array to find the first possible color we can assign // Now that we know what colors _cant_ be used, // lets find the first color that fits bool VertexColored = false; int VertexColor = 1; // We start our attempt from Color#1 bool IsNeighborColor; while(VertexColored != true){ IsNeighborColor = false; // Check if the color we're attempting to assign // is available for (int Neighbor=0; Neighbor < NumNeighbors; Neighbor++){ if (NeighborColors[Neighbor] == VertexColor){ IsNeighborColor = true; break; } } // If the color we're attempting is not already // assigned to one of the neighbors... if (IsNeighborColor == false){ // This is a valid color to assign d_ColorVector[threadID] = VertexColor; // Indicate that we colored a vertex, so the graph state has changed *d_changed = true; // Set the VertexColored flag and break out of the while loop VertexColored = true; break; } else { // Try with the next color VertexColor++; } } // end of while(VertexColored !=true) } // end if d_ColorVector[threadID] == NO_COLOR } // end if (threadID < d_NNZ) } __global__ void ResolveBadColoring(int d_NumVertices, int* d_ColIdx, int* d_RowPtr, int* d_ColorVector, bool* d_ColorValid){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; bool ColorValid = true; if ((threadID < d_NumVertices) && (d_ColorValid[threadID]==false)){ // Iterate over the neighbors and check if the coloring is valid for (int CurrNodeOffset=d_RowPtr[threadID]; CurrNodeOffset<d_RowPtr[threadID+1] ; CurrNodeOffset++){ int NeighborColor = d_ColorVector[d_ColIdx[CurrNodeOffset]]; if ((NeighborColor == d_ColorVector[threadID]) && (threadID<d_ColIdx[CurrNodeOffset])){ // If the color matches with any one neighbor // its not valid, and we must recolor ColorValid=false; d_ColorVector[threadID] = NO_COLOR; break; } // if (NeighborColor == d_ColorVector... } // end of for loop that goes over neighbors // Update the vertex's coloring status d_ColorValid[threadID] = ColorValid; }// end of if ((threadID < d_NumVertices) && (d_ColorValid[threadID]==false)){ }
9fb677244bcdf15169c447610cfaaa9ac1772d2d.cu
/* # * Parallel Graph Coloring: # * Author: Kartik Mankad # * Email: [email protected] # * Description: A parallel implementation of the FirstFit algorithm # */ #include "FirstFitCUDA.h" // Init the ColorValid array // TODO: Replace with one thrust::fill call __global__ void InitializeColorVector(int d_NumVertices, bool* d_ColorValid, int* d_ColorVector){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; if (threadID < d_NumVertices){ d_ColorValid[threadID] = false; d_ColorVector[threadID] = NO_COLOR; } }//初始化 // Actual Graph Coloring kernel __global__ void ColorGraph(int d_NumVertices, int d_NNZ, int* d_ColIdx, int* d_RowPtr, int* d_ColorVector, bool* d_changed){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; // Temp storage to store the neighbors' colors. int NeighborColors[MAX_DEGREE]; // Set the default value of changed to false *d_changed = false; if (threadID < d_NumVertices) { // So that we dont walk over the edge of the d_RowPtr array if (d_ColorVector[threadID] == NO_COLOR){ // if the vertex is not colored // Iterate over its neighbors int NumNeighbors = 0; for (int CurrNodeOffset=d_RowPtr[threadID]; CurrNodeOffset<d_RowPtr[threadID+1] ; CurrNodeOffset++){ // Mark the neighbor's colors unavailable by // pushing them into the NeighborColors vector int NodeIndex = d_ColIdx[CurrNodeOffset]; int NodeColor = d_ColorVector[NodeIndex]; NeighborColors[NumNeighbors++] = NodeColor; } // Here, we have the neighbor's colors // as first NumNeighbors elements of the NeighborColors array // We go over that array to find the first possible color we can assign // Now that we know what colors _cant_ be used, // lets find the first color that fits bool VertexColored = false; int VertexColor = 1; // We start our attempt from Color#1 bool IsNeighborColor; while(VertexColored != true){ IsNeighborColor = false; // Check if the color we're attempting to assign // is available for (int Neighbor=0; Neighbor < NumNeighbors; Neighbor++){ if (NeighborColors[Neighbor] == VertexColor){ IsNeighborColor = true; break; } } // If the color we're attempting is not already // assigned to one of the neighbors... if (IsNeighborColor == false){ // This is a valid color to assign d_ColorVector[threadID] = VertexColor; // Indicate that we colored a vertex, so the graph state has changed *d_changed = true; // Set the VertexColored flag and break out of the while loop VertexColored = true; break; } else { // Try with the next color VertexColor++; } } // end of while(VertexColored !=true) } // end if d_ColorVector[threadID] == NO_COLOR } // end if (threadID < d_NNZ) } __global__ void ResolveBadColoring(int d_NumVertices, int* d_ColIdx, int* d_RowPtr, int* d_ColorVector, bool* d_ColorValid){ int threadID = blockIdx.x * blockDim.x + threadIdx.x; bool ColorValid = true; if ((threadID < d_NumVertices) && (d_ColorValid[threadID]==false)){ // Iterate over the neighbors and check if the coloring is valid for (int CurrNodeOffset=d_RowPtr[threadID]; CurrNodeOffset<d_RowPtr[threadID+1] ; CurrNodeOffset++){ int NeighborColor = d_ColorVector[d_ColIdx[CurrNodeOffset]]; if ((NeighborColor == d_ColorVector[threadID]) && (threadID<d_ColIdx[CurrNodeOffset])){ // If the color matches with any one neighbor // its not valid, and we must recolor ColorValid=false; d_ColorVector[threadID] = NO_COLOR; break; } // if (NeighborColor == d_ColorVector... } // end of for loop that goes over neighbors // Update the vertex's coloring status d_ColorValid[threadID] = ColorValid; }// end of if ((threadID < d_NumVertices) && (d_ColorValid[threadID]==false)){ }
3bfe910a6f080844d9eff88f52a3ff2aae3ad970.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "mex.h" #include "Utilities.cuh" #define BLOCKSIZE 512 /*******************/ /* SQUARING KERNEL */ /*******************/ __global__ void squareKernel(double * __restrict__ d_vec, const int N) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= N) return; d_vec[tid] = d_vec[tid] * d_vec[tid]; } /****************/ /* MEX FUNCTION */ /****************/ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // --- Recovering the pointer to the input host variable double *h_input = mxGetPr(prhs[0]); // --- Recovering the number of elements of the input variable (the input variable can be also a matrix) int numElements = mxGetN(prhs[0]) * mxGetM(prhs[0]); // --- Allocating space for the input/output device variable double *d_vec; gpuErrchk(hipMalloc(&d_vec, numElements * sizeof(double))); // --- Moving the input from host to device gpuErrchk(hipMemcpy(d_vec, h_input, numElements * sizeof(double), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( squareKernel), dim3(iDivUp(numElements, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_vec, numElements); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // --- Allocating space for the output output host variable plhs[0] = mxCreateDoubleMatrix(1, numElements, mxREAL); // --- Recovering the pointer to the output host variable double *h_output = mxGetPr(plhs[0]); gpuErrchk(hipMemcpy(h_output, d_vec, numElements * sizeof(double), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(d_vec)); }
3bfe910a6f080844d9eff88f52a3ff2aae3ad970.cu
#include <stdio.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "mex.h" #include "Utilities.cuh" #define BLOCKSIZE 512 /*******************/ /* SQUARING KERNEL */ /*******************/ __global__ void squareKernel(double * __restrict__ d_vec, const int N) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= N) return; d_vec[tid] = d_vec[tid] * d_vec[tid]; } /****************/ /* MEX FUNCTION */ /****************/ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // --- Recovering the pointer to the input host variable double *h_input = mxGetPr(prhs[0]); // --- Recovering the number of elements of the input variable (the input variable can be also a matrix) int numElements = mxGetN(prhs[0]) * mxGetM(prhs[0]); // --- Allocating space for the input/output device variable double *d_vec; gpuErrchk(cudaMalloc(&d_vec, numElements * sizeof(double))); // --- Moving the input from host to device gpuErrchk(cudaMemcpy(d_vec, h_input, numElements * sizeof(double), cudaMemcpyHostToDevice)); squareKernel<<<iDivUp(numElements, BLOCKSIZE), BLOCKSIZE>>>(d_vec, numElements); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // --- Allocating space for the output output host variable plhs[0] = mxCreateDoubleMatrix(1, numElements, mxREAL); // --- Recovering the pointer to the output host variable double *h_output = mxGetPr(plhs[0]); gpuErrchk(cudaMemcpy(h_output, d_vec, numElements * sizeof(double), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_vec)); }
c7d6c839f5e076f387c57908134817870e84a646.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void kernel(float *a, float *b, int n) { *a = *b; } int main(void) { float *a, *b; hipMalloc(&a, 10 * sizeof(float)); hipMalloc(&b, 10 * sizeof(float)); hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, a,b,10); hipDeviceSynchronize(); }
c7d6c839f5e076f387c57908134817870e84a646.cu
__global__ void kernel(float *a, float *b, int n) { *a = *b; } int main(void) { float *a, *b; cudaMalloc(&a, 10 * sizeof(float)); cudaMalloc(&b, 10 * sizeof(float)); kernel<<<1,1>>>(a,b,10); cudaDeviceSynchronize(); }
36838d54e0bd336f18ca2a7ccd0466c332f35a1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "camera.h" #include "hittable.h" #include "image.h" #include "ray.h" #include "util.h" #include <cstdlib> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> __device__ glm::vec3 solarAttn(const glm::vec3& vec) { auto len = glm::length(vec); // return glm::max(len * 100.0f - 99.0f, 0.0f); return len < 0.3f ? glm::vec3(3.0f) : glm::vec3(0.5f, 0.7f, 1.0f); } __device__ glm::vec3 rayColor(hiprandState_t& rand_state, const cudaray::Ray& ray, const cudaray::Hittable& world, int depth) { glm::vec3 color(0.0f, 0.0f, 0.0f); cudaray::Ray current_ray = cudaray::Ray(ray.origin, ray.direction); glm::vec3 factor = glm::vec3(1.0f); int i = 0; while (i < depth) { i++; cudaray::Hit rec; if (world.hit(current_ray, 0.001, infinity, rec)) { // Ray scattered; glm::vec3 attenuation; if (rec.mat->scatter(current_ray, rec, attenuation, current_ray, rand_state)) { factor *= attenuation; } else { color = rec.mat->emitted(glm::vec3()); break; } } else { // #define SUN_ENABLED #ifdef SUN_ENABLED glm::vec3 unit_direction = glm::normalize(current_ray.direction); glm::vec3 cross_d_sun = glm::cross(glm::normalize(glm::vec3(-1)), unit_direction); // if (almostZero(cross_d_sun)) { color = solarAttn(cross_d_sun); #else color = glm::vec3(0.03f); #endif // } else { // auto t = 0.5f * (unit_direction.y + 1.0f); // color = 0.3f * // ((1.0f - t) * glm::vec3(1.0, 1.0, 1.0) + t * glm::vec3(0.5, 0.7, 1.0)); // } break; } } return color * factor; } __global__ void computeRays(const cudaray::Camera* camera, const cudaray::Hittable* world, int width, int height, glm::vec3* image_data) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; hiprandState_t rand_state; hiprand_init(hash(x + y + hash(x) + hash(y) + hash(blockIdx.z) + blockIdx.z), 0, 0, &rand_state); cudaray::Image image(image_data, width, height); auto pixel = glm::ivec2(x, y); auto uv = image.getUVCoord(pixel, glm::vec2(random_float(rand_state) / 2.0f, random_float(rand_state) / 2.0f)); uv.y = 1.0f - uv.y; // flip vertical auto ray = camera->getRay(uv); glm::vec3 out_color = rayColor(rand_state, ray, *world, 100); image.setPixel(pixel, image.getPixel(pixel) + out_color); }
36838d54e0bd336f18ca2a7ccd0466c332f35a1f.cu
#include "camera.h" #include "hittable.h" #include "image.h" #include "ray.h" #include "util.h" #include <cstdlib> #include <curand.h> #include <curand_kernel.h> __device__ glm::vec3 solarAttn(const glm::vec3& vec) { auto len = glm::length(vec); // return glm::max(len * 100.0f - 99.0f, 0.0f); return len < 0.3f ? glm::vec3(3.0f) : glm::vec3(0.5f, 0.7f, 1.0f); } __device__ glm::vec3 rayColor(curandState_t& rand_state, const cudaray::Ray& ray, const cudaray::Hittable& world, int depth) { glm::vec3 color(0.0f, 0.0f, 0.0f); cudaray::Ray current_ray = cudaray::Ray(ray.origin, ray.direction); glm::vec3 factor = glm::vec3(1.0f); int i = 0; while (i < depth) { i++; cudaray::Hit rec; if (world.hit(current_ray, 0.001, infinity, rec)) { // Ray scattered; glm::vec3 attenuation; if (rec.mat->scatter(current_ray, rec, attenuation, current_ray, rand_state)) { factor *= attenuation; } else { color = rec.mat->emitted(glm::vec3()); break; } } else { // #define SUN_ENABLED #ifdef SUN_ENABLED glm::vec3 unit_direction = glm::normalize(current_ray.direction); glm::vec3 cross_d_sun = glm::cross(glm::normalize(glm::vec3(-1)), unit_direction); // if (almostZero(cross_d_sun)) { color = solarAttn(cross_d_sun); #else color = glm::vec3(0.03f); #endif // } else { // auto t = 0.5f * (unit_direction.y + 1.0f); // color = 0.3f * // ((1.0f - t) * glm::vec3(1.0, 1.0, 1.0) + t * glm::vec3(0.5, 0.7, 1.0)); // } break; } } return color * factor; } __global__ void computeRays(const cudaray::Camera* camera, const cudaray::Hittable* world, int width, int height, glm::vec3* image_data) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; curandState_t rand_state; curand_init(hash(x + y + hash(x) + hash(y) + hash(blockIdx.z) + blockIdx.z), 0, 0, &rand_state); cudaray::Image image(image_data, width, height); auto pixel = glm::ivec2(x, y); auto uv = image.getUVCoord(pixel, glm::vec2(random_float(rand_state) / 2.0f, random_float(rand_state) / 2.0f)); uv.y = 1.0f - uv.y; // flip vertical auto ray = camera->getRay(uv); glm::vec3 out_color = rayColor(rand_state, ray, *world, 100); image.setPixel(pixel, image.getPixel(pixel) + out_color); }
1f958643401ae0cd1b4be64afee319f55b78a134.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // Jim Samson // CSF441 Computer Architecture // Assignment 4 // Most code is written by Dr. Mock // This HW Assignment uses cuda and the Sobel filter to convert an image. /*********************************************************************** * sobel-cpu.cu * * Implements a Sobel filter on the image that is hard-coded in main. * You might add the image name as a command line option if you were * to use this more than as a one-off assignment. * * See https://stackoverflow.com/questions/17815687/image-processing-implementing-sobel-filter * or https://blog.saush.com/2011/04/20/edge-detection-with-the-sobel-operator-in-ruby/ * for info on how the filter is implemented. * * Compile/run with: nvcc sobel-cpu.cu -lfreeimage * ***********************************************************************/ #define threadsPerBlock 22 // Returns the index into the 1d pixel array // Given te desired x,y, and image width __device__ int pixelIndex(int x, int y, int width) { return (y*width + x); } __global__ void sobel(char *returnPixels, int width, char *pixels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int x00 = -1; int x20 = 1; int x01 = -2; int x21 = 2; int x02 = -1; int x22 = 1; x00 *= pixels[pixelIndex(x-1,y-1,width)]; x01 *= pixels[pixelIndex(x-1,y,width)]; x02 *= pixels[pixelIndex(x-1,y+1,width)]; x20 *= pixels[pixelIndex(x+1,y-1,width)]; x21 *= pixels[pixelIndex(x+1,y,width)]; x22 *= pixels[pixelIndex(x+1,y+1,width)]; int y00 = -1; int y10 = -2; int y20 = -1; int y02 = 1; int y12 = 2; int y22 = 1; y00 *= pixels[pixelIndex(x-1,y-1,width)]; y10 *= pixels[pixelIndex(x,y-1,width)]; y20 *= pixels[pixelIndex(x+1,y-1,width)]; y02 *= pixels[pixelIndex(x-1,y+1,width)]; y12 *= pixels[pixelIndex(x,y+1,width)]; y22 *= pixels[pixelIndex(x+1,y+1,width)]; int px = x00 + x01 + x02 + x20 + x21 + x22; int py = y00 + y10 + y20 + y02 + y12 + y22; returnPixels[pixelIndex(x,y,width)] = sqrt(float(px*px + py*py)); }
1f958643401ae0cd1b4be64afee319f55b78a134.cu
#include "includes.h" // Jim Samson // CSF441 Computer Architecture // Assignment 4 // Most code is written by Dr. Mock // This HW Assignment uses cuda and the Sobel filter to convert an image. /*********************************************************************** * sobel-cpu.cu * * Implements a Sobel filter on the image that is hard-coded in main. * You might add the image name as a command line option if you were * to use this more than as a one-off assignment. * * See https://stackoverflow.com/questions/17815687/image-processing-implementing-sobel-filter * or https://blog.saush.com/2011/04/20/edge-detection-with-the-sobel-operator-in-ruby/ * for info on how the filter is implemented. * * Compile/run with: nvcc sobel-cpu.cu -lfreeimage * ***********************************************************************/ #define threadsPerBlock 22 // Returns the index into the 1d pixel array // Given te desired x,y, and image width __device__ int pixelIndex(int x, int y, int width) { return (y*width + x); } __global__ void sobel(char *returnPixels, int width, char *pixels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int x00 = -1; int x20 = 1; int x01 = -2; int x21 = 2; int x02 = -1; int x22 = 1; x00 *= pixels[pixelIndex(x-1,y-1,width)]; x01 *= pixels[pixelIndex(x-1,y,width)]; x02 *= pixels[pixelIndex(x-1,y+1,width)]; x20 *= pixels[pixelIndex(x+1,y-1,width)]; x21 *= pixels[pixelIndex(x+1,y,width)]; x22 *= pixels[pixelIndex(x+1,y+1,width)]; int y00 = -1; int y10 = -2; int y20 = -1; int y02 = 1; int y12 = 2; int y22 = 1; y00 *= pixels[pixelIndex(x-1,y-1,width)]; y10 *= pixels[pixelIndex(x,y-1,width)]; y20 *= pixels[pixelIndex(x+1,y-1,width)]; y02 *= pixels[pixelIndex(x-1,y+1,width)]; y12 *= pixels[pixelIndex(x,y+1,width)]; y22 *= pixels[pixelIndex(x+1,y+1,width)]; int px = x00 + x01 + x02 + x20 + x21 + x22; int py = y00 + y10 + y20 + y02 + y12 + y22; returnPixels[pixelIndex(x,y,width)] = sqrt(float(px*px + py*py)); }
2b6e5347a9f000f51f59f1f6780421e1d500a367.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file fil.cu implements forest inference */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <algorithm> #include <cmath> #include <limits> #include <stack> #include <utility> #include <cuml/fil/fil.h> #include <raft/cudart_utils.h> #include <cuml/common/cuml_allocator.hpp> #include "common_hip.cuh" namespace ML { namespace fil { using namespace MLCommon; namespace tl = treelite; void node_init(dense_node_t* n, val_t output, float thresh, int fid, bool def_left, bool is_leaf) { *n = dense_node(output, thresh, fid, def_left, is_leaf); } void node_decode(const dense_node_t* n, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf) { dense_node dn(*n); *output = dn.output<val_t>(); *thresh = dn.thresh(); *fid = dn.fid(); *def_left = dn.def_left(); *is_leaf = dn.is_leaf(); } inline void node_init_inline(sparse_node16_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { sparse_node16 n(output, thresh, fid, def_left, is_leaf, left_index); *node = sparse_node16_t(n, n); } void node_init(sparse_node16_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { node_init_inline(node, output, thresh, fid, def_left, is_leaf, left_index); } void node_decode(const sparse_node16_t* node, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf, int* left_index) { node_decode((const dense_node_t*)node, output, thresh, fid, def_left, is_leaf); *left_index = sparse_node16(*node).left_index(); } inline void node_init_inline(sparse_node8_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { sparse_node8 n(output, thresh, fid, def_left, is_leaf, left_index); *node = sparse_node8_t(n); } void node_init(sparse_node8_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { node_init_inline(node, output, thresh, fid, def_left, is_leaf, left_index); } void node_decode(const sparse_node8_t* node, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf, int* left_index) { node_decode((const dense_node_t*)node, output, thresh, fid, def_left, is_leaf); *left_index = sparse_node8(*node).left_index(); } __host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } /** performs additional transformations on the array of forest predictions (preds) of size n; the transformations are defined by output, and include averaging (multiplying by inv_num_trees), adding global_bias (always done), sigmoid and applying threshold. in case of complement_proba, fills in the complement probability */ __global__ void transform_k(float* preds, size_t n, output_t output, float inv_num_trees, float threshold, float global_bias, bool complement_proba) { size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (i >= n) return; if (complement_proba && i % 2 != 0) return; float result = preds[i]; if ((output & output_t::AVG) != 0) result *= inv_num_trees; result += global_bias; if ((output & output_t::SIGMOID) != 0) result = sigmoid(result); // will not be done on CATEGORICAL_LEAF because the whole kernel will not run if ((output & output_t::CLASS) != 0) { result = result > threshold ? 1.0f : 0.0f; } // sklearn outputs numpy array in 'C' order, with the number of classes being last dimension // that is also the default order, so we should use the same one if (complement_proba) { preds[i] = 1.0f - result; preds[i + 1] = result; } else preds[i] = result; } struct forest { void init_max_shm() { int max_shm_std = 48 * 1024; // 48 KiB int device = 0; // TODO(canonizer): use raft::handle_t for this CUDA_CHECK(hipGetDevice(&device)); CUDA_CHECK(hipDeviceGetAttribute( &max_shm_, hipDeviceAttributeSharedMemPerBlockOptin, device)); // TODO(canonizer): use >48KiB shared memory if available max_shm_ = ::min(max_shm_, max_shm_std); } void init_fixed_block_count(const raft::handle_t& h, int blocks_per_sm) { int max_threads_per_sm, sm_count; CUDA_CHECK(hipDeviceGetAttribute(&max_threads_per_sm, hipDeviceAttributeMaxThreadsPerMultiProcessor, h.get_device())); int max_blocks_per_sm = max_threads_per_sm / FIL_TPB; ASSERT(blocks_per_sm <= max_blocks_per_sm, "on this GPU, FIL blocks_per_sm cannot exceed %d", max_blocks_per_sm); CUDA_CHECK(hipDeviceGetAttribute(&sm_count, hipDeviceAttributeMultiprocessorCount, h.get_device())); fixed_block_count_ = blocks_per_sm * sm_count; } void init_common(const raft::handle_t& h, const forest_params_t* params) { depth_ = params->depth; num_trees_ = params->num_trees; num_cols_ = params->num_cols; algo_ = params->algo; output_ = params->output; threshold_ = params->threshold; global_bias_ = params->global_bias; leaf_algo_ = params->leaf_algo; num_classes_ = params->num_classes; init_max_shm(); init_fixed_block_count(h, params->blocks_per_sm); } virtual void infer(predict_params params, hipStream_t stream) = 0; void predict(const raft::handle_t& h, float* preds, const float* data, size_t num_rows, bool predict_proba) { // Initialize prediction parameters. predict_params params; params.num_cols = num_cols_; params.algo = algo_; params.preds = preds; params.data = data; params.num_rows = num_rows; params.max_shm = max_shm_; params.num_classes = num_classes_; params.leaf_algo = leaf_algo_; // fixed_block_count_ == 0 means the number of thread blocks is // proportional to the number of rows params.num_blocks = fixed_block_count_; /** The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows (always 2 outputs): RAW: output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: ignored write the output of the previous stages and its complement The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows (always 1 output): RAW (no values set): output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: apply threshold (equivalent to choosing best class) The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid CLASS is set: ignored The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest probability, else output label 0. All other flags (AVG, SIGMOID, CLASS) are ignored The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() is not implemented The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest margin, equal margins resolved in favor of smaller label integer All other flags (AVG, SIGMOID, CLASS) are ignored */ output_t ot = output_; bool complement_proba = false, do_transform; if (predict_proba) { // no threshold on probabilities ot = output_t(ot & ~output_t::CLASS); switch (leaf_algo_) { case leaf_algo_t::FLOAT_UNARY_BINARY: params.num_outputs = 2; complement_proba = true; do_transform = true; break; case leaf_algo_t::GROVE_PER_CLASS: // TODO(levsnv): add softmax to implement predict_proba ASSERT( false, "predict_proba not supported for multi-class gradient boosted " "decision trees (encountered in xgboost, scikit-learn, lightgbm)"); case leaf_algo_t::CATEGORICAL_LEAF: params.num_outputs = num_classes_; do_transform = ot != output_t::RAW || global_bias_ != 0.0f; break; default: ASSERT(false, "internal error: invalid leaf_algo_"); } } else { if (leaf_algo_ == leaf_algo_t::FLOAT_UNARY_BINARY) { do_transform = ot != output_t::RAW || global_bias_ != 0.0f; } else { // GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and // all transforms are monotonic. also, would break current code do_transform = false; } params.num_outputs = 1; } // Predict using the forest. hipStream_t stream = h.get_stream(); infer(params, stream); if (do_transform) { size_t num_values_to_transform = (size_t)num_rows * (size_t)params.num_outputs; hipLaunchKernelGGL(( transform_k), dim3(raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB)), dim3(FIL_TPB), 0, stream, preds, num_values_to_transform, ot, num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias_, complement_proba); CUDA_CHECK(hipPeekAtLastError()); } } virtual void free(const raft::handle_t& h) = 0; virtual ~forest() {} int num_trees_ = 0; int depth_ = 0; int num_cols_ = 0; algo_t algo_ = algo_t::NAIVE; int max_shm_ = 0; output_t output_ = output_t::RAW; float threshold_ = 0.5; float global_bias_ = 0; leaf_algo_t leaf_algo_ = leaf_algo_t::FLOAT_UNARY_BINARY; int num_classes_ = 1; int fixed_block_count_ = 0; }; struct dense_forest : forest { void transform_trees(const dense_node_t* nodes) { /* Populate node information: For each tree, the nodes are still stored in the breadth-first, left-to-right order. However, instead of storing the nodes of the same tree adjacently, it uses a different layout. In this layout, the roots of all trees (node 0) are stored first, followed by left children of the roots of all trees (node 1), followed by the right children of the roots of all trees (node 2), and so on. */ int global_node = 0; for (int tree = 0; tree < num_trees_; ++tree) { int tree_node = 0; // the counters `level` and `branch` are not used for computing node // indices, they are only here to highlight the node ordering within // each tree for (int level = 0; level <= depth_; ++level) { for (int branch = 0; branch < 1 << level; ++branch) { h_nodes_[tree_node * num_trees_ + tree] = dense_node(nodes[global_node]); ++tree_node; ++global_node; } } } } void init(const raft::handle_t& h, const dense_node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG; int num_nodes = forest_num_nodes(num_trees_, depth_); nodes_ = (dense_node*)h.get_device_allocator()->allocate( sizeof(dense_node) * num_nodes, h.get_stream()); h_nodes_.resize(num_nodes); if (algo_ == algo_t::NAIVE) { std::copy(nodes, nodes + num_nodes, h_nodes_.begin()); } else { transform_trees(nodes); } CUDA_CHECK(hipMemcpyAsync(nodes_, h_nodes_.data(), num_nodes * sizeof(dense_node), hipMemcpyHostToDevice, h.get_stream())); // copy must be finished before freeing the host data CUDA_CHECK(hipStreamSynchronize(h.get_stream())); h_nodes_.clear(); h_nodes_.shrink_to_fit(); } virtual void infer(predict_params params, hipStream_t stream) override { dense_storage forest(nodes_, num_trees_, algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1, algo_ == algo_t::NAIVE ? 1 : num_trees_); fil::infer(forest, params, stream); } virtual void free(const raft::handle_t& h) override { int num_nodes = forest_num_nodes(num_trees_, depth_); h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes, h.get_stream()); } dense_node* nodes_ = nullptr; thrust::host_vector<dense_node> h_nodes_; }; template <typename node_t> struct external_node {}; template <> struct external_node<sparse_node16> { typedef sparse_node16_t t; }; template <> struct external_node<sparse_node8> { typedef sparse_node8_t t; }; template <typename node_t> struct sparse_forest : forest { typedef typename external_node<node_t>::t external_node_t; void init(const raft::handle_t& h, const int* trees, const external_node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE; depth_ = 0; // a placeholder value num_nodes_ = params->num_nodes; // trees trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_, h.get_stream()); CUDA_CHECK(hipMemcpyAsync(trees_, trees, sizeof(int) * num_trees_, hipMemcpyHostToDevice, h.get_stream())); // nodes nodes_ = (node_t*)h.get_device_allocator()->allocate( sizeof(node_t) * num_nodes_, h.get_stream()); CUDA_CHECK(hipMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_, hipMemcpyHostToDevice, h.get_stream())); } virtual void infer(predict_params params, hipStream_t stream) override { sparse_storage<node_t> forest(trees_, nodes_, num_trees_); fil::infer(forest, params, stream); } void free(const raft::handle_t& h) override { h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_, h.get_stream()); h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_, h.get_stream()); } int num_nodes_ = 0; int* trees_ = nullptr; node_t* nodes_ = nullptr; }; void check_params(const forest_params_t* params, bool dense) { if (dense) { ASSERT(params->depth >= 0, "depth must be non-negative for dense forests"); } else { ASSERT(params->num_nodes >= 0, "num_nodes must be non-negative for sparse forests"); ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO, "only ALGO_AUTO and NAIVE algorithms are supported " "for sparse forests"); } ASSERT(params->num_trees >= 0, "num_trees must be non-negative"); ASSERT(params->num_cols >= 0, "num_cols must be non-negative"); switch (params->algo) { case algo_t::ALGO_AUTO: case algo_t::NAIVE: case algo_t::TREE_REORG: case algo_t::BATCH_TREE_REORG: break; default: ASSERT(false, "algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG"); } switch (params->leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: if ((params->output & output_t::CLASS) != 0) { ASSERT(params->num_classes == 2, "only supporting binary" " classification using FLOAT_UNARY_BINARY"); } else { ASSERT(params->num_classes == 1, "num_classes must be 1 for " "regression"); } break; case leaf_algo_t::GROVE_PER_CLASS: ASSERT(params->num_classes > 2, "num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS"); ASSERT(params->num_trees % params->num_classes == 0, "num_classes must divide num_trees evenly for GROVE_PER_CLASS"); break; case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(params->num_classes >= 2, "num_classes >= 2 is required for " "leaf_algo == CATEGORICAL_LEAF"); break; default: ASSERT(false, "leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF" " or GROVE_PER_CLASS"); } // output_t::RAW == 0, and doesn't have a separate flag output_t all_set = output_t(output_t::AVG | output_t::SIGMOID | output_t::CLASS); if ((params->output & ~all_set) != 0) { ASSERT(false, "output should be a combination of RAW, AVG, SIGMOID and CLASS"); } ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative"); } int tree_root(const tl::Tree& tree) { return 0; // Treelite format assumes that the root is 0 } int max_depth_helper(const tl::Tree& tree, int node_id, int limit) { if (tree.IsLeaf(node_id)) return 0; ASSERT(limit > 0, "recursion depth limit reached, might be a cycle in the tree"); return 1 + ::max(max_depth_helper(tree, tree.LeftChild(node_id), limit - 1), max_depth_helper(tree, tree.RightChild(node_id), limit - 1)); } inline int max_depth(const tl::Tree& tree) { // trees of this depth aren't used, so it most likely means bad input data, // e.g. cycles in the forest const int DEPTH_LIMIT = 500; int root_index = tree_root(tree); typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; stack.push(pair_t(root_index, 0)); int max_depth = 0; while (!stack.empty()) { const pair_t& pair = stack.top(); int node_id = pair.first; int depth = pair.second; stack.pop(); while (!tree.IsLeaf(node_id)) { stack.push(pair_t(tree.LeftChild(node_id), depth + 1)); node_id = tree.RightChild(node_id); depth++; ASSERT(depth < DEPTH_LIMIT, "depth limit reached, might be a cycle in the tree"); } // only need to update depth for leaves max_depth = ::max(max_depth, depth); } return max_depth; } int max_depth(const tl::Model& model) { int depth = 0; for (const auto& tree : model.trees) depth = ::max(depth, max_depth(tree)); return depth; } inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right, bool* default_left, tl::Operator comparison_op) { // in treelite (take left node if val [op] threshold), // the meaning of the condition is reversed compared to FIL; // thus, "<" in treelite corresonds to comparison ">=" used by FIL // https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243 switch (comparison_op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // x <= y is equivalent to x < y', where y' is the next representable float *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // x > y is equivalent to x >= y', where y' is the next representable float // left and right still need to be swapped *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(*tl_left, *tl_right); *default_left = !*default_left; break; default: ASSERT(false, "only <, >, <= and >= comparisons are supported"); } } /** if the vector consists of zeros and a single one, return the position for the one (assumed class label). Else, asserts false. If the vector contains a NAN, asserts false */ int find_class_label_from_one_hot(tl::tl_float* vector, int len) { bool found_label = false; int out; for (int i = 0; i < len; ++i) { if (vector[i] == 1.0f) { ASSERT(!found_label, "label vector contains multiple 1.0f"); out = i; found_label = true; } else { ASSERT(vector[i] == 0.0f, "label vector contains values other than 0.0 and 1.0"); } } ASSERT(found_label, "did not find 1.0f in vector"); return out; } template <typename fil_node_t> void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree& tl_tree, int tl_node_id, const forest_params_t& forest_params) { auto vec = tl_tree.LeafVector(tl_node_id); switch (forest_params.leaf_algo) { case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(vec.size() == forest_params.num_classes, "inconsistent number of classes in treelite leaves"); fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size()); break; case leaf_algo_t::FLOAT_UNARY_BINARY: case leaf_algo_t::GROVE_PER_CLASS: fil_node->val.f = tl_tree.LeafValue(tl_node_id); ASSERT(!tl_tree.HasLeafVector(tl_node_id), "some but not all treelite leaves have leaf_vector()"); break; default: ASSERT(false, "internal error: invalid leaf_algo"); }; } void node2fil_dense(std::vector<dense_node_t>* pnodes, int root, int cur, const tl::Tree& tree, int node_id, const forest_params_t& forest_params) { if (tree.IsLeaf(node_id)) { node_init(&(*pnodes)[root + cur], val_t{.f = NAN}, NAN, 0, false, true); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); return; } // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = tree.Threshold(node_id); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); node_init(&(*pnodes)[root + cur], val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false); int left = 2 * cur + 1; node2fil_dense(pnodes, root, left, tree, tl_left, forest_params); node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params); } void tree2fil_dense(std::vector<dense_node_t>* pnodes, int root, const tl::Tree& tree, const forest_params_t& forest_params) { node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params); } template <typename fil_node_t> int tree2fil_sparse(std::vector<fil_node_t>* pnodes, const tl::Tree& tree, const forest_params_t& forest_params) { typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; int root = pnodes->size(); pnodes->push_back(fil_node_t()); stack.push(pair_t(tree_root(tree), 0)); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int cur = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); // tl_left and tl_right are indices of the children in the treelite tree // (stored as an array of nodes) int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = tree.Threshold(node_id); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); // reserve space for child nodes // left is the offset of the left child node relative to the tree root // in the array of all nodes of the FIL sparse forest int left = pnodes->size() - root; pnodes->push_back(fil_node_t()); pnodes->push_back(fil_node_t()); node_init_inline(&(*pnodes)[root + cur], val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false, left); // push child nodes into the stack stack.push(pair_t(tl_right, left + 1)); //stack.push(pair_t(tl_left, left)); node_id = tl_left; cur = left; } // leaf node node_init_inline(&(*pnodes)[root + cur], val_t{.f = NAN}, NAN, 0, false, true, 0); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); } return root; } size_t tl_leaf_vector_size(const tl::Model& model) { const tl::Tree& tree = model.trees[0]; int node_key; for (node_key = tree_root(tree); !tree.IsLeaf(node_key); node_key = tree.RightChild(node_key)) ; if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size(); return 0; } // tl2fil_common is the part of conversion from a treelite model // common for dense and sparse forests void tl2fil_common(forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { // fill in forest-indendent params params->algo = tl_params->algo; params->threshold = tl_params->threshold; // fill in forest-dependent params params->depth = max_depth(model); // also checks for cycles const tl::ModelParam& param = model.param; // assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value() size_t leaf_vec_size = tl_leaf_vector_size(model); std::string pred_transform(param.pred_transform); if (leaf_vec_size > 0) { ASSERT(leaf_vec_size == model.num_output_group, "treelite model inconsistent"); params->num_classes = leaf_vec_size; params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT( pred_transform == "max_index" || pred_transform == "identity_multiclass", "only max_index and identity_multiclass values of pred_transform " "are supported for multi-class models"); } else { if (model.num_output_group > 1) { params->num_classes = model.num_output_group; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT(pred_transform == "sigmoid" || pred_transform == "identity" || pred_transform == "max_index" || pred_transform == "softmax" || pred_transform == "multiclass_ova", "only sigmoid, identity, max_index, multiclass_ova and softmax " "values of pred_transform are supported for xgboost-style " "multi-class classification models."); // this function should not know how many threads per block will be used params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS; } else { params->num_classes = tl_params->output_class ? 2 : 1; ASSERT(pred_transform == "sigmoid" || pred_transform == "identity", "only sigmoid and identity values of pred_transform " "are supported for binary classification and regression models."); params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; } } params->num_cols = model.num_feature; ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported"); params->global_bias = param.global_bias; params->output = output_t::RAW; /** output_t::CLASS denotes using a threshold in FIL, when predict_proba == false. For all multiclass models, the best class is selected using argmax instead. This happens when either leaf_algo == CATEGORICAL_LEAF or num_classes > 2. **/ if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF && params->num_classes <= 2) { params->output = output_t(params->output | output_t::CLASS); } // "random forest" in treelite means tree output averaging if (model.random_forest_flag) { params->output = output_t(params->output | output_t::AVG); } if (std::string(param.pred_transform) == "sigmoid") { params->output = output_t(params->output | output_t::SIGMOID); } params->num_trees = model.trees.size(); params->blocks_per_sm = tl_params->blocks_per_sm; } // uses treelite model with additional tl_params to initialize FIL params // and dense nodes (stored in *pnodes) void tl2fil_dense(std::vector<dense_node_t>* pnodes, forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); // convert the nodes int num_nodes = forest_num_nodes(params->num_trees, params->depth); pnodes->resize(num_nodes, dense_node_t{0, 0}); for (int i = 0; i < model.trees.size(); ++i) { tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i], *params); } } template <typename fil_node_t> struct tl2fil_sparse_check_t { static void check(const tl::Model& model) { ASSERT(false, "internal error: " "only a specialization of this tempalte should be used"); } }; template <> struct tl2fil_sparse_check_t<sparse_node16_t> { // no extra check for 16-byte sparse nodes static void check(const tl::Model& model) {} }; template <> struct tl2fil_sparse_check_t<sparse_node8_t> { static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS; static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1; static void check(const tl::Model& model) { // check the number of features int num_features = model.num_feature; ASSERT(num_features <= MAX_FEATURES, "model has %d features, " "but only %d supported for 8-byte sparse nodes", num_features, MAX_FEATURES); // check the number of tree nodes const std::vector<tl::Tree>& trees = model.trees; for (int i = 0; i < trees.size(); ++i) { int num_nodes = trees[i].num_nodes; ASSERT(num_nodes <= MAX_TREE_NODES, "tree %d has %d nodes, " "but only %d supported for 8-byte sparse nodes", i, num_nodes, MAX_TREE_NODES); } } }; // uses treelite model with additional tl_params to initialize FIL params, // trees (stored in *ptrees) and sparse nodes (stored in *pnodes) template <typename fil_node_t> void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes, forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); tl2fil_sparse_check_t<fil_node_t>::check(model); // convert the nodes for (int i = 0; i < model.trees.size(); ++i) { int root = tree2fil_sparse(pnodes, model.trees[i], *params); ptrees->push_back(root); } params->num_nodes = pnodes->size(); } void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node_t* nodes, const forest_params_t* params) { check_params(params, true); dense_forest* f = new dense_forest; f->init(h, nodes, params); *pf = f; } template <typename fil_node_t> void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const typename external_node<fil_node_t>::t* nodes, const forest_params_t* params) { check_params(params, false); sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>; f->init(h, trees, nodes, params); *pf = f; } void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node16_t* nodes, const forest_params_t* params) { init_sparse<sparse_node16>(h, pf, trees, nodes, params); } void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node8_t* nodes, const forest_params_t* params) { init_sparse<sparse_node8>(h, pf, trees, nodes, params); } void from_treelite(const raft::handle_t& handle, forest_t* pforest, ModelHandle model, const treelite_params_t* tl_params) { storage_type_t storage_type = tl_params->storage_type; // build dense trees by default const tl::Model& model_ref = *(tl::Model*)model; if (storage_type == storage_type_t::AUTO) { if (tl_params->algo == algo_t::ALGO_AUTO || tl_params->algo == algo_t::NAIVE) { int depth = max_depth(model_ref); // max 2**25 dense nodes, 256 MiB dense model size const int LOG2_MAX_DENSE_NODES = 25; int log2_num_dense_nodes = depth + 1 + int(ceil(std::log2(model_ref.trees.size()))); storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES ? storage_type_t::SPARSE : storage_type_t::DENSE; } else { // only dense storage is supported for other algorithms storage_type = storage_type_t::DENSE; } } forest_params_t params; switch (storage_type) { case storage_type_t::DENSE: { std::vector<dense_node_t> nodes; tl2fil_dense(&nodes, &params, model_ref, tl_params); init_dense(handle, pforest, nodes.data(), &params); // sync is necessary as nodes is used in init_dense(), // but destructed at the end of this function CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); break; } case storage_type_t::SPARSE: { std::vector<int> trees; std::vector<sparse_node16_t> nodes; tl2fil_sparse(&trees, &nodes, &params, model_ref, tl_params); init_sparse<sparse_node16>(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); break; } case storage_type_t::SPARSE8: { std::vector<int> trees; std::vector<sparse_node8_t> nodes; tl2fil_sparse(&trees, &nodes, &params, model_ref, tl_params); init_sparse<sparse_node8>(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(hipStreamSynchronize(handle.get_stream())); break; } default: ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE"); } } void free(const raft::handle_t& h, forest_t f) { f->free(h); delete f; } void predict(const raft::handle_t& h, forest_t f, float* preds, const float* data, size_t num_rows, bool predict_proba) { f->predict(h, preds, data, num_rows, predict_proba); } } // namespace fil } // namespace ML
2b6e5347a9f000f51f59f1f6780421e1d500a367.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** @file fil.cu implements forest inference */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <algorithm> #include <cmath> #include <limits> #include <stack> #include <utility> #include <cuml/fil/fil.h> #include <raft/cudart_utils.h> #include <cuml/common/cuml_allocator.hpp> #include "common.cuh" namespace ML { namespace fil { using namespace MLCommon; namespace tl = treelite; void node_init(dense_node_t* n, val_t output, float thresh, int fid, bool def_left, bool is_leaf) { *n = dense_node(output, thresh, fid, def_left, is_leaf); } void node_decode(const dense_node_t* n, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf) { dense_node dn(*n); *output = dn.output<val_t>(); *thresh = dn.thresh(); *fid = dn.fid(); *def_left = dn.def_left(); *is_leaf = dn.is_leaf(); } inline void node_init_inline(sparse_node16_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { sparse_node16 n(output, thresh, fid, def_left, is_leaf, left_index); *node = sparse_node16_t(n, n); } void node_init(sparse_node16_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { node_init_inline(node, output, thresh, fid, def_left, is_leaf, left_index); } void node_decode(const sparse_node16_t* node, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf, int* left_index) { node_decode((const dense_node_t*)node, output, thresh, fid, def_left, is_leaf); *left_index = sparse_node16(*node).left_index(); } inline void node_init_inline(sparse_node8_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { sparse_node8 n(output, thresh, fid, def_left, is_leaf, left_index); *node = sparse_node8_t(n); } void node_init(sparse_node8_t* node, val_t output, float thresh, int fid, bool def_left, bool is_leaf, int left_index) { node_init_inline(node, output, thresh, fid, def_left, is_leaf, left_index); } void node_decode(const sparse_node8_t* node, val_t* output, float* thresh, int* fid, bool* def_left, bool* is_leaf, int* left_index) { node_decode((const dense_node_t*)node, output, thresh, fid, def_left, is_leaf); *left_index = sparse_node8(*node).left_index(); } __host__ __device__ float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } /** performs additional transformations on the array of forest predictions (preds) of size n; the transformations are defined by output, and include averaging (multiplying by inv_num_trees), adding global_bias (always done), sigmoid and applying threshold. in case of complement_proba, fills in the complement probability */ __global__ void transform_k(float* preds, size_t n, output_t output, float inv_num_trees, float threshold, float global_bias, bool complement_proba) { size_t i = threadIdx.x + size_t(blockIdx.x) * blockDim.x; if (i >= n) return; if (complement_proba && i % 2 != 0) return; float result = preds[i]; if ((output & output_t::AVG) != 0) result *= inv_num_trees; result += global_bias; if ((output & output_t::SIGMOID) != 0) result = sigmoid(result); // will not be done on CATEGORICAL_LEAF because the whole kernel will not run if ((output & output_t::CLASS) != 0) { result = result > threshold ? 1.0f : 0.0f; } // sklearn outputs numpy array in 'C' order, with the number of classes being last dimension // that is also the default order, so we should use the same one if (complement_proba) { preds[i] = 1.0f - result; preds[i + 1] = result; } else preds[i] = result; } struct forest { void init_max_shm() { int max_shm_std = 48 * 1024; // 48 KiB int device = 0; // TODO(canonizer): use raft::handle_t for this CUDA_CHECK(cudaGetDevice(&device)); CUDA_CHECK(cudaDeviceGetAttribute( &max_shm_, cudaDevAttrMaxSharedMemoryPerBlockOptin, device)); // TODO(canonizer): use >48KiB shared memory if available max_shm_ = std::min(max_shm_, max_shm_std); } void init_fixed_block_count(const raft::handle_t& h, int blocks_per_sm) { int max_threads_per_sm, sm_count; CUDA_CHECK(cudaDeviceGetAttribute(&max_threads_per_sm, cudaDevAttrMaxThreadsPerMultiProcessor, h.get_device())); int max_blocks_per_sm = max_threads_per_sm / FIL_TPB; ASSERT(blocks_per_sm <= max_blocks_per_sm, "on this GPU, FIL blocks_per_sm cannot exceed %d", max_blocks_per_sm); CUDA_CHECK(cudaDeviceGetAttribute(&sm_count, cudaDevAttrMultiProcessorCount, h.get_device())); fixed_block_count_ = blocks_per_sm * sm_count; } void init_common(const raft::handle_t& h, const forest_params_t* params) { depth_ = params->depth; num_trees_ = params->num_trees; num_cols_ = params->num_cols; algo_ = params->algo; output_ = params->output; threshold_ = params->threshold; global_bias_ = params->global_bias; leaf_algo_ = params->leaf_algo; num_classes_ = params->num_classes; init_max_shm(); init_fixed_block_count(h, params->blocks_per_sm); } virtual void infer(predict_params params, cudaStream_t stream) = 0; void predict(const raft::handle_t& h, float* preds, const float* data, size_t num_rows, bool predict_proba) { // Initialize prediction parameters. predict_params params; params.num_cols = num_cols_; params.algo = algo_; params.preds = preds; params.data = data; params.num_rows = num_rows; params.max_shm = max_shm_; params.num_classes = num_classes_; params.leaf_algo = leaf_algo_; // fixed_block_count_ == 0 means the number of thread blocks is // proportional to the number of rows params.num_blocks = fixed_block_count_; /** The binary classification / regression (FLOAT_UNARY_BINARY) predict_proba() works as follows (always 2 outputs): RAW: output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: ignored write the output of the previous stages and its complement The binary classification / regression (FLOAT_UNARY_BINARY) predict() works as follows (always 1 output): RAW (no values set): output the sum of tree predictions AVG is set: divide by the number of trees (averaging) SIGMOID is set: apply sigmoid CLASS is set: apply threshold (equivalent to choosing best class) The multi-class classification / regression (CATEGORICAL_LEAF) predict_proba() works as follows (always num_classes outputs): RAW (no values set): output class votes AVG is set: divide by the number of trees (averaging, output class probability) SIGMOID is set: apply sigmoid CLASS is set: ignored The multi-class classification / regression (CATEGORICAL_LEAF) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest probability, else output label 0. All other flags (AVG, SIGMOID, CLASS) are ignored The multi-class classification / regression (GROVE_PER_CLASS) predict_proba() is not implemented The multi-class classification / regression (GROVE_PER_CLASS) predict() works as follows (always 1 output): RAW (no values set): output the label of the class with highest margin, equal margins resolved in favor of smaller label integer All other flags (AVG, SIGMOID, CLASS) are ignored */ output_t ot = output_; bool complement_proba = false, do_transform; if (predict_proba) { // no threshold on probabilities ot = output_t(ot & ~output_t::CLASS); switch (leaf_algo_) { case leaf_algo_t::FLOAT_UNARY_BINARY: params.num_outputs = 2; complement_proba = true; do_transform = true; break; case leaf_algo_t::GROVE_PER_CLASS: // TODO(levsnv): add softmax to implement predict_proba ASSERT( false, "predict_proba not supported for multi-class gradient boosted " "decision trees (encountered in xgboost, scikit-learn, lightgbm)"); case leaf_algo_t::CATEGORICAL_LEAF: params.num_outputs = num_classes_; do_transform = ot != output_t::RAW || global_bias_ != 0.0f; break; default: ASSERT(false, "internal error: invalid leaf_algo_"); } } else { if (leaf_algo_ == leaf_algo_t::FLOAT_UNARY_BINARY) { do_transform = ot != output_t::RAW || global_bias_ != 0.0f; } else { // GROVE_PER_CLASS, CATEGORICAL_LEAF: moot since choosing best class and // all transforms are monotonic. also, would break current code do_transform = false; } params.num_outputs = 1; } // Predict using the forest. cudaStream_t stream = h.get_stream(); infer(params, stream); if (do_transform) { size_t num_values_to_transform = (size_t)num_rows * (size_t)params.num_outputs; transform_k<<<raft::ceildiv(num_values_to_transform, (size_t)FIL_TPB), FIL_TPB, 0, stream>>>( preds, num_values_to_transform, ot, num_trees_ > 0 ? (1.0f / num_trees_) : 1.0f, threshold_, global_bias_, complement_proba); CUDA_CHECK(cudaPeekAtLastError()); } } virtual void free(const raft::handle_t& h) = 0; virtual ~forest() {} int num_trees_ = 0; int depth_ = 0; int num_cols_ = 0; algo_t algo_ = algo_t::NAIVE; int max_shm_ = 0; output_t output_ = output_t::RAW; float threshold_ = 0.5; float global_bias_ = 0; leaf_algo_t leaf_algo_ = leaf_algo_t::FLOAT_UNARY_BINARY; int num_classes_ = 1; int fixed_block_count_ = 0; }; struct dense_forest : forest { void transform_trees(const dense_node_t* nodes) { /* Populate node information: For each tree, the nodes are still stored in the breadth-first, left-to-right order. However, instead of storing the nodes of the same tree adjacently, it uses a different layout. In this layout, the roots of all trees (node 0) are stored first, followed by left children of the roots of all trees (node 1), followed by the right children of the roots of all trees (node 2), and so on. */ int global_node = 0; for (int tree = 0; tree < num_trees_; ++tree) { int tree_node = 0; // the counters `level` and `branch` are not used for computing node // indices, they are only here to highlight the node ordering within // each tree for (int level = 0; level <= depth_; ++level) { for (int branch = 0; branch < 1 << level; ++branch) { h_nodes_[tree_node * num_trees_ + tree] = dense_node(nodes[global_node]); ++tree_node; ++global_node; } } } } void init(const raft::handle_t& h, const dense_node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::NAIVE) algo_ = algo_t::BATCH_TREE_REORG; int num_nodes = forest_num_nodes(num_trees_, depth_); nodes_ = (dense_node*)h.get_device_allocator()->allocate( sizeof(dense_node) * num_nodes, h.get_stream()); h_nodes_.resize(num_nodes); if (algo_ == algo_t::NAIVE) { std::copy(nodes, nodes + num_nodes, h_nodes_.begin()); } else { transform_trees(nodes); } CUDA_CHECK(cudaMemcpyAsync(nodes_, h_nodes_.data(), num_nodes * sizeof(dense_node), cudaMemcpyHostToDevice, h.get_stream())); // copy must be finished before freeing the host data CUDA_CHECK(cudaStreamSynchronize(h.get_stream())); h_nodes_.clear(); h_nodes_.shrink_to_fit(); } virtual void infer(predict_params params, cudaStream_t stream) override { dense_storage forest(nodes_, num_trees_, algo_ == algo_t::NAIVE ? tree_num_nodes(depth_) : 1, algo_ == algo_t::NAIVE ? 1 : num_trees_); fil::infer(forest, params, stream); } virtual void free(const raft::handle_t& h) override { int num_nodes = forest_num_nodes(num_trees_, depth_); h.get_device_allocator()->deallocate(nodes_, sizeof(dense_node) * num_nodes, h.get_stream()); } dense_node* nodes_ = nullptr; thrust::host_vector<dense_node> h_nodes_; }; template <typename node_t> struct external_node {}; template <> struct external_node<sparse_node16> { typedef sparse_node16_t t; }; template <> struct external_node<sparse_node8> { typedef sparse_node8_t t; }; template <typename node_t> struct sparse_forest : forest { typedef typename external_node<node_t>::t external_node_t; void init(const raft::handle_t& h, const int* trees, const external_node_t* nodes, const forest_params_t* params) { init_common(h, params); if (algo_ == algo_t::ALGO_AUTO) algo_ = algo_t::NAIVE; depth_ = 0; // a placeholder value num_nodes_ = params->num_nodes; // trees trees_ = (int*)h.get_device_allocator()->allocate(sizeof(int) * num_trees_, h.get_stream()); CUDA_CHECK(cudaMemcpyAsync(trees_, trees, sizeof(int) * num_trees_, cudaMemcpyHostToDevice, h.get_stream())); // nodes nodes_ = (node_t*)h.get_device_allocator()->allocate( sizeof(node_t) * num_nodes_, h.get_stream()); CUDA_CHECK(cudaMemcpyAsync(nodes_, nodes, sizeof(node_t) * num_nodes_, cudaMemcpyHostToDevice, h.get_stream())); } virtual void infer(predict_params params, cudaStream_t stream) override { sparse_storage<node_t> forest(trees_, nodes_, num_trees_); fil::infer(forest, params, stream); } void free(const raft::handle_t& h) override { h.get_device_allocator()->deallocate(trees_, sizeof(int) * num_trees_, h.get_stream()); h.get_device_allocator()->deallocate(nodes_, sizeof(node_t) * num_nodes_, h.get_stream()); } int num_nodes_ = 0; int* trees_ = nullptr; node_t* nodes_ = nullptr; }; void check_params(const forest_params_t* params, bool dense) { if (dense) { ASSERT(params->depth >= 0, "depth must be non-negative for dense forests"); } else { ASSERT(params->num_nodes >= 0, "num_nodes must be non-negative for sparse forests"); ASSERT(params->algo == algo_t::NAIVE || params->algo == algo_t::ALGO_AUTO, "only ALGO_AUTO and NAIVE algorithms are supported " "for sparse forests"); } ASSERT(params->num_trees >= 0, "num_trees must be non-negative"); ASSERT(params->num_cols >= 0, "num_cols must be non-negative"); switch (params->algo) { case algo_t::ALGO_AUTO: case algo_t::NAIVE: case algo_t::TREE_REORG: case algo_t::BATCH_TREE_REORG: break; default: ASSERT(false, "algo should be ALGO_AUTO, NAIVE, TREE_REORG or BATCH_TREE_REORG"); } switch (params->leaf_algo) { case leaf_algo_t::FLOAT_UNARY_BINARY: if ((params->output & output_t::CLASS) != 0) { ASSERT(params->num_classes == 2, "only supporting binary" " classification using FLOAT_UNARY_BINARY"); } else { ASSERT(params->num_classes == 1, "num_classes must be 1 for " "regression"); } break; case leaf_algo_t::GROVE_PER_CLASS: ASSERT(params->num_classes > 2, "num_classes > 2 is required for leaf_algo == GROVE_PER_CLASS"); ASSERT(params->num_trees % params->num_classes == 0, "num_classes must divide num_trees evenly for GROVE_PER_CLASS"); break; case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(params->num_classes >= 2, "num_classes >= 2 is required for " "leaf_algo == CATEGORICAL_LEAF"); break; default: ASSERT(false, "leaf_algo must be FLOAT_UNARY_BINARY, CATEGORICAL_LEAF" " or GROVE_PER_CLASS"); } // output_t::RAW == 0, and doesn't have a separate flag output_t all_set = output_t(output_t::AVG | output_t::SIGMOID | output_t::CLASS); if ((params->output & ~all_set) != 0) { ASSERT(false, "output should be a combination of RAW, AVG, SIGMOID and CLASS"); } ASSERT(params->blocks_per_sm >= 0, "blocks_per_sm must be nonnegative"); } int tree_root(const tl::Tree& tree) { return 0; // Treelite format assumes that the root is 0 } int max_depth_helper(const tl::Tree& tree, int node_id, int limit) { if (tree.IsLeaf(node_id)) return 0; ASSERT(limit > 0, "recursion depth limit reached, might be a cycle in the tree"); return 1 + std::max(max_depth_helper(tree, tree.LeftChild(node_id), limit - 1), max_depth_helper(tree, tree.RightChild(node_id), limit - 1)); } inline int max_depth(const tl::Tree& tree) { // trees of this depth aren't used, so it most likely means bad input data, // e.g. cycles in the forest const int DEPTH_LIMIT = 500; int root_index = tree_root(tree); typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; stack.push(pair_t(root_index, 0)); int max_depth = 0; while (!stack.empty()) { const pair_t& pair = stack.top(); int node_id = pair.first; int depth = pair.second; stack.pop(); while (!tree.IsLeaf(node_id)) { stack.push(pair_t(tree.LeftChild(node_id), depth + 1)); node_id = tree.RightChild(node_id); depth++; ASSERT(depth < DEPTH_LIMIT, "depth limit reached, might be a cycle in the tree"); } // only need to update depth for leaves max_depth = std::max(max_depth, depth); } return max_depth; } int max_depth(const tl::Model& model) { int depth = 0; for (const auto& tree : model.trees) depth = std::max(depth, max_depth(tree)); return depth; } inline void adjust_threshold(float* pthreshold, int* tl_left, int* tl_right, bool* default_left, tl::Operator comparison_op) { // in treelite (take left node if val [op] threshold), // the meaning of the condition is reversed compared to FIL; // thus, "<" in treelite corresonds to comparison ">=" used by FIL // https://github.com/dmlc/treelite/blob/master/include/treelite/tree.h#L243 switch (comparison_op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // x <= y is equivalent to x < y', where y' is the next representable float *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // x > y is equivalent to x >= y', where y' is the next representable float // left and right still need to be swapped *pthreshold = std::nextafterf(*pthreshold, std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(*tl_left, *tl_right); *default_left = !*default_left; break; default: ASSERT(false, "only <, >, <= and >= comparisons are supported"); } } /** if the vector consists of zeros and a single one, return the position for the one (assumed class label). Else, asserts false. If the vector contains a NAN, asserts false */ int find_class_label_from_one_hot(tl::tl_float* vector, int len) { bool found_label = false; int out; for (int i = 0; i < len; ++i) { if (vector[i] == 1.0f) { ASSERT(!found_label, "label vector contains multiple 1.0f"); out = i; found_label = true; } else { ASSERT(vector[i] == 0.0f, "label vector contains values other than 0.0 and 1.0"); } } ASSERT(found_label, "did not find 1.0f in vector"); return out; } template <typename fil_node_t> void tl2fil_leaf_payload(fil_node_t* fil_node, const tl::Tree& tl_tree, int tl_node_id, const forest_params_t& forest_params) { auto vec = tl_tree.LeafVector(tl_node_id); switch (forest_params.leaf_algo) { case leaf_algo_t::CATEGORICAL_LEAF: ASSERT(vec.size() == forest_params.num_classes, "inconsistent number of classes in treelite leaves"); fil_node->val.idx = find_class_label_from_one_hot(&vec[0], vec.size()); break; case leaf_algo_t::FLOAT_UNARY_BINARY: case leaf_algo_t::GROVE_PER_CLASS: fil_node->val.f = tl_tree.LeafValue(tl_node_id); ASSERT(!tl_tree.HasLeafVector(tl_node_id), "some but not all treelite leaves have leaf_vector()"); break; default: ASSERT(false, "internal error: invalid leaf_algo"); }; } void node2fil_dense(std::vector<dense_node_t>* pnodes, int root, int cur, const tl::Tree& tree, int node_id, const forest_params_t& forest_params) { if (tree.IsLeaf(node_id)) { node_init(&(*pnodes)[root + cur], val_t{.f = NAN}, NAN, 0, false, true); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); return; } // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = tree.Threshold(node_id); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); node_init(&(*pnodes)[root + cur], val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false); int left = 2 * cur + 1; node2fil_dense(pnodes, root, left, tree, tl_left, forest_params); node2fil_dense(pnodes, root, left + 1, tree, tl_right, forest_params); } void tree2fil_dense(std::vector<dense_node_t>* pnodes, int root, const tl::Tree& tree, const forest_params_t& forest_params) { node2fil_dense(pnodes, root, 0, tree, tree_root(tree), forest_params); } template <typename fil_node_t> int tree2fil_sparse(std::vector<fil_node_t>* pnodes, const tl::Tree& tree, const forest_params_t& forest_params) { typedef std::pair<int, int> pair_t; std::stack<pair_t> stack; int root = pnodes->size(); pnodes->push_back(fil_node_t()); stack.push(pair_t(tree_root(tree), 0)); while (!stack.empty()) { const pair_t& top = stack.top(); int node_id = top.first; int cur = top.second; stack.pop(); while (!tree.IsLeaf(node_id)) { // inner node ASSERT(tree.SplitType(node_id) == tl::SplitFeatureType::kNumerical, "only numerical split nodes are supported"); // tl_left and tl_right are indices of the children in the treelite tree // (stored as an array of nodes) int tl_left = tree.LeftChild(node_id), tl_right = tree.RightChild(node_id); bool default_left = tree.DefaultLeft(node_id); float threshold = tree.Threshold(node_id); adjust_threshold(&threshold, &tl_left, &tl_right, &default_left, tree.ComparisonOp(node_id)); // reserve space for child nodes // left is the offset of the left child node relative to the tree root // in the array of all nodes of the FIL sparse forest int left = pnodes->size() - root; pnodes->push_back(fil_node_t()); pnodes->push_back(fil_node_t()); node_init_inline(&(*pnodes)[root + cur], val_t{.f = 0}, threshold, tree.SplitIndex(node_id), default_left, false, left); // push child nodes into the stack stack.push(pair_t(tl_right, left + 1)); //stack.push(pair_t(tl_left, left)); node_id = tl_left; cur = left; } // leaf node node_init_inline(&(*pnodes)[root + cur], val_t{.f = NAN}, NAN, 0, false, true, 0); tl2fil_leaf_payload(&(*pnodes)[root + cur], tree, node_id, forest_params); } return root; } size_t tl_leaf_vector_size(const tl::Model& model) { const tl::Tree& tree = model.trees[0]; int node_key; for (node_key = tree_root(tree); !tree.IsLeaf(node_key); node_key = tree.RightChild(node_key)) ; if (tree.HasLeafVector(node_key)) return tree.LeafVector(node_key).size(); return 0; } // tl2fil_common is the part of conversion from a treelite model // common for dense and sparse forests void tl2fil_common(forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { // fill in forest-indendent params params->algo = tl_params->algo; params->threshold = tl_params->threshold; // fill in forest-dependent params params->depth = max_depth(model); // also checks for cycles const tl::ModelParam& param = model.param; // assuming either all leaves use the .leaf_vector() or all leaves use .leaf_value() size_t leaf_vec_size = tl_leaf_vector_size(model); std::string pred_transform(param.pred_transform); if (leaf_vec_size > 0) { ASSERT(leaf_vec_size == model.num_output_group, "treelite model inconsistent"); params->num_classes = leaf_vec_size; params->leaf_algo = leaf_algo_t::CATEGORICAL_LEAF; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT( pred_transform == "max_index" || pred_transform == "identity_multiclass", "only max_index and identity_multiclass values of pred_transform " "are supported for multi-class models"); } else { if (model.num_output_group > 1) { params->num_classes = model.num_output_group; ASSERT(tl_params->output_class, "output_class==true is required for multi-class models"); ASSERT(pred_transform == "sigmoid" || pred_transform == "identity" || pred_transform == "max_index" || pred_transform == "softmax" || pred_transform == "multiclass_ova", "only sigmoid, identity, max_index, multiclass_ova and softmax " "values of pred_transform are supported for xgboost-style " "multi-class classification models."); // this function should not know how many threads per block will be used params->leaf_algo = leaf_algo_t::GROVE_PER_CLASS; } else { params->num_classes = tl_params->output_class ? 2 : 1; ASSERT(pred_transform == "sigmoid" || pred_transform == "identity", "only sigmoid and identity values of pred_transform " "are supported for binary classification and regression models."); params->leaf_algo = leaf_algo_t::FLOAT_UNARY_BINARY; } } params->num_cols = model.num_feature; ASSERT(param.sigmoid_alpha == 1.0f, "sigmoid_alpha not supported"); params->global_bias = param.global_bias; params->output = output_t::RAW; /** output_t::CLASS denotes using a threshold in FIL, when predict_proba == false. For all multiclass models, the best class is selected using argmax instead. This happens when either leaf_algo == CATEGORICAL_LEAF or num_classes > 2. **/ if (tl_params->output_class && params->leaf_algo != CATEGORICAL_LEAF && params->num_classes <= 2) { params->output = output_t(params->output | output_t::CLASS); } // "random forest" in treelite means tree output averaging if (model.random_forest_flag) { params->output = output_t(params->output | output_t::AVG); } if (std::string(param.pred_transform) == "sigmoid") { params->output = output_t(params->output | output_t::SIGMOID); } params->num_trees = model.trees.size(); params->blocks_per_sm = tl_params->blocks_per_sm; } // uses treelite model with additional tl_params to initialize FIL params // and dense nodes (stored in *pnodes) void tl2fil_dense(std::vector<dense_node_t>* pnodes, forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); // convert the nodes int num_nodes = forest_num_nodes(params->num_trees, params->depth); pnodes->resize(num_nodes, dense_node_t{0, 0}); for (int i = 0; i < model.trees.size(); ++i) { tree2fil_dense(pnodes, i * tree_num_nodes(params->depth), model.trees[i], *params); } } template <typename fil_node_t> struct tl2fil_sparse_check_t { static void check(const tl::Model& model) { ASSERT(false, "internal error: " "only a specialization of this tempalte should be used"); } }; template <> struct tl2fil_sparse_check_t<sparse_node16_t> { // no extra check for 16-byte sparse nodes static void check(const tl::Model& model) {} }; template <> struct tl2fil_sparse_check_t<sparse_node8_t> { static const int MAX_FEATURES = 1 << sparse_node8::FID_NUM_BITS; static const int MAX_TREE_NODES = (1 << sparse_node8::LEFT_NUM_BITS) - 1; static void check(const tl::Model& model) { // check the number of features int num_features = model.num_feature; ASSERT(num_features <= MAX_FEATURES, "model has %d features, " "but only %d supported for 8-byte sparse nodes", num_features, MAX_FEATURES); // check the number of tree nodes const std::vector<tl::Tree>& trees = model.trees; for (int i = 0; i < trees.size(); ++i) { int num_nodes = trees[i].num_nodes; ASSERT(num_nodes <= MAX_TREE_NODES, "tree %d has %d nodes, " "but only %d supported for 8-byte sparse nodes", i, num_nodes, MAX_TREE_NODES); } } }; // uses treelite model with additional tl_params to initialize FIL params, // trees (stored in *ptrees) and sparse nodes (stored in *pnodes) template <typename fil_node_t> void tl2fil_sparse(std::vector<int>* ptrees, std::vector<fil_node_t>* pnodes, forest_params_t* params, const tl::Model& model, const treelite_params_t* tl_params) { tl2fil_common(params, model, tl_params); tl2fil_sparse_check_t<fil_node_t>::check(model); // convert the nodes for (int i = 0; i < model.trees.size(); ++i) { int root = tree2fil_sparse(pnodes, model.trees[i], *params); ptrees->push_back(root); } params->num_nodes = pnodes->size(); } void init_dense(const raft::handle_t& h, forest_t* pf, const dense_node_t* nodes, const forest_params_t* params) { check_params(params, true); dense_forest* f = new dense_forest; f->init(h, nodes, params); *pf = f; } template <typename fil_node_t> void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const typename external_node<fil_node_t>::t* nodes, const forest_params_t* params) { check_params(params, false); sparse_forest<fil_node_t>* f = new sparse_forest<fil_node_t>; f->init(h, trees, nodes, params); *pf = f; } void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node16_t* nodes, const forest_params_t* params) { init_sparse<sparse_node16>(h, pf, trees, nodes, params); } void init_sparse(const raft::handle_t& h, forest_t* pf, const int* trees, const sparse_node8_t* nodes, const forest_params_t* params) { init_sparse<sparse_node8>(h, pf, trees, nodes, params); } void from_treelite(const raft::handle_t& handle, forest_t* pforest, ModelHandle model, const treelite_params_t* tl_params) { storage_type_t storage_type = tl_params->storage_type; // build dense trees by default const tl::Model& model_ref = *(tl::Model*)model; if (storage_type == storage_type_t::AUTO) { if (tl_params->algo == algo_t::ALGO_AUTO || tl_params->algo == algo_t::NAIVE) { int depth = max_depth(model_ref); // max 2**25 dense nodes, 256 MiB dense model size const int LOG2_MAX_DENSE_NODES = 25; int log2_num_dense_nodes = depth + 1 + int(ceil(std::log2(model_ref.trees.size()))); storage_type = log2_num_dense_nodes > LOG2_MAX_DENSE_NODES ? storage_type_t::SPARSE : storage_type_t::DENSE; } else { // only dense storage is supported for other algorithms storage_type = storage_type_t::DENSE; } } forest_params_t params; switch (storage_type) { case storage_type_t::DENSE: { std::vector<dense_node_t> nodes; tl2fil_dense(&nodes, &params, model_ref, tl_params); init_dense(handle, pforest, nodes.data(), &params); // sync is necessary as nodes is used in init_dense(), // but destructed at the end of this function CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); break; } case storage_type_t::SPARSE: { std::vector<int> trees; std::vector<sparse_node16_t> nodes; tl2fil_sparse(&trees, &nodes, &params, model_ref, tl_params); init_sparse<sparse_node16>(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); break; } case storage_type_t::SPARSE8: { std::vector<int> trees; std::vector<sparse_node8_t> nodes; tl2fil_sparse(&trees, &nodes, &params, model_ref, tl_params); init_sparse<sparse_node8>(handle, pforest, trees.data(), nodes.data(), &params); CUDA_CHECK(cudaStreamSynchronize(handle.get_stream())); break; } default: ASSERT(false, "tl_params->sparse must be one of AUTO, DENSE or SPARSE"); } } void free(const raft::handle_t& h, forest_t f) { f->free(h); delete f; } void predict(const raft::handle_t& h, forest_t f, float* preds, const float* data, size_t num_rows, bool predict_proba) { f->predict(h, preds, data, num_rows, predict_proba); } } // namespace fil } // namespace ML
4a9b3d24177357a1c3d508a5e226435ab129da32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "WarpField.h" #include "GpuMesh.h" #include "device_utils.h" #include "TsdfVolume.h" #include "cudpp\thrust_wrapper.h" #include "cudpp\ModerGpuWrapper.h" #include "GpuKdTree.h" #include <set> #include <algorithm> #include <queue> namespace dfusion { #pragma region --warpmesh struct MeshWarper { const GpuMesh::PointType* vsrc; const GpuMesh::PointType* nsrc; const GpuMesh::PointType* csrc; hipTextureObject_t knnTex; hipTextureObject_t nodesDqVwTex; GpuMesh::PointType* vdst; GpuMesh::PointType* ndst; GpuMesh::PointType* cdst; int num; Tbx::Mat3 R; float3 t; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int tid) { float3 p = GpuMesh::from_point(vsrc[tid]); float3 n = GpuMesh::from_point(nsrc[tid]); Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, p, origion, invVoxelSize); Tbx::Point3 dq_p = dq_blend.transform(Tbx::Point3(convert(p))); Tbx::Vec3 dq_n = dq_blend.rotate(convert(n)); //vdst[tid] = GpuMesh::to_point(convert(R.rotate(dq_p)) + t); //ndst[tid] = GpuMesh::to_point(convert(R.rotate(dq_n))); vdst[tid] = GpuMesh::to_point(convert(R*dq_p) + t); ndst[tid] = GpuMesh::to_point(convert(R*dq_n)); cdst[tid] = csrc[tid]; } }; __global__ void warp_mesh_kernel(MeshWarper warper) { unsigned int i = blockIdx.x * (blockDim.x << 3) + threadIdx.x; #pragma unroll for (int k = 0; k < 8; k++) { if (i < warper.num) { warper(i); } i += blockDim.x; } } struct MapWarper { PtrStep<float4> vsrc; PtrStep<float4> nsrc; hipTextureObject_t knnTex; hipTextureObject_t nodesDqVwTex; PtrStep<float4> vdst; PtrStep<float4> ndst; int w; int h; Tbx::Mat3 R; float3 t; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int x, int y) { float3 p = GpuMesh::from_point(vsrc(y,x)); float3 n = GpuMesh::from_point(nsrc(y,x)); Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, p, origion, invVoxelSize); Tbx::Point3 dq_p = dq_blend.transform(Tbx::Point3(convert(p))); Tbx::Vec3 dq_n = dq_blend.rotate(convert(n)); //vdst(y, x) = GpuMesh::to_point(convert(R.rotate(dq_p)) + t); //ndst(y, x) = GpuMesh::to_point(convert(R.rotate(dq_n))); vdst(y, x) = GpuMesh::to_point(convert(R*dq_p) + t); ndst(y, x) = GpuMesh::to_point(convert(R*dq_n)); } }; __global__ void warp_map_kernel(MapWarper warper) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < warper.w && y < warper.h) warper(x, y); } void WarpField::warp(GpuMesh& src, GpuMesh& dst) { if (src.num() == 0) return; dst.create(src.num()); src.lockVertsNormals(); dst.lockVertsNormals(); MeshWarper warper; warper.t = convert(m_rigidTransform.get_translation()); warper.R = m_rigidTransform.get_mat3();// Tbx::Quat_cu(m_rigidTransform); warper.knnTex = getKnnFieldTexture(); warper.nodesDqVwTex = getNodesDqVwTexture(); warper.vsrc = src.verts(); warper.nsrc = src.normals(); warper.csrc = src.colors(); warper.vdst = dst.verts(); warper.ndst = dst.normals(); warper.cdst = dst.colors(); warper.num = src.num(); warper.origion = m_volume->getOrigion(); warper.invVoxelSize = 1.f / m_volume->getVoxelSize(); dim3 block(512); dim3 grid(1, 1, 1); grid.x = divUp(dst.num(), block.x << 3); warp_mesh_kernel << <grid, block >> >(warper); cudaSafeCall(hipGetLastError(), "warp mesh"); dst.unlockVertsNormals(); src.unlockVertsNormals(); } void WarpField::warp(const MapArr& srcVmap, const MapArr& srcNmap, MapArr& dstVmap, MapArr& dstNmap) { const int w = srcVmap.cols(); const int h = srcNmap.rows(); dstVmap.create(h, w); dstNmap.create(h, w); MapWarper warper; warper.t = convert(m_rigidTransform.get_translation()); warper.R = m_rigidTransform.get_mat3();// Tbx::Quat_cu(m_rigidTransform); warper.knnTex = getKnnFieldTexture(); warper.nodesDqVwTex = getNodesDqVwTexture(); warper.vsrc = srcVmap; warper.nsrc = srcNmap; warper.vdst = dstVmap; warper.ndst = dstNmap; warper.w = w; warper.h = h; warper.origion = m_volume->getOrigion(); warper.invVoxelSize = 1.f / m_volume->getVoxelSize(); dim3 block(32, 8); dim3 grid(divUp(w, block.x), divUp(h, block.y), 1); warp_map_kernel << <grid, block >> >(warper); cudaSafeCall(hipGetLastError(), "warp map"); } #pragma endregion #pragma region --init knn field __global__ void initKnnFieldKernel(hipSurfaceObject_t knnSurf, int3 resolution) { int ix = blockDim.x*blockIdx.x + threadIdx.x; int iy = blockDim.y*blockIdx.y + threadIdx.y; int iz = blockDim.z*blockIdx.z + threadIdx.z; if (ix < resolution.x && iy < resolution.y && iz < resolution.z) write_knn(make_knn(WarpField::MaxNodeNum), knnSurf, ix, iy, iz); } __global__ void initKnnFieldKernel1(KnnIdx* knnPtr, int n) { int ix = blockDim.x*blockIdx.x + threadIdx.x; if (ix < n) knnPtr[ix] = make_knn(WarpField::MaxNodeNum); } void WarpField::initKnnField() { int3 res = m_volume->getResolution(); dim3 block(32, 8, 2); dim3 grid(divUp(res.x, block.x), divUp(res.y, block.y), divUp(res.z, block.z)); hipSurfaceObject_t surf = getKnnFieldSurface(); initKnnFieldKernel << <grid, block >> >(surf, res); cudaSafeCall(hipGetLastError(), "initKnnFieldKernel"); dim3 block1(256); dim3 grid1(divUp(m_nodesGraph.size(), block1.x)); initKnnFieldKernel1 << <grid, block >> >(m_nodesGraph.ptr(), m_nodesGraph.size()); cudaSafeCall(hipGetLastError(), "initKnnFieldKernel1"); } #pragma endregion #pragma region --update nodes __device__ int newPoints_global_count = 0; __device__ int newPoints_output_count; __device__ unsigned int newPoints_blocks_done = 0; struct NewPointsCounter { enum { CTA_SIZE = 256, WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE }; mutable int* out_keys; mutable float4* out_points; GpuMesh::PointType* input_points; hipTextureObject_t knnTex; hipTextureObject_t nodesDqVwTex; float4* nodesDqVw; int n; int step; float3 origion; int numNodes; float inv_search_radius_sqr; // for volume index float vol_invVoxelSize; int3 vol_res; // for key generation float key_invGridSize; int3 key_gridRes; __device__ __forceinline__ void operator () () const { int tid = threadIdx.x + blockIdx.x * CTA_SIZE; if (__all(tid >= n)) return; int warp_id = Warp::id(); int lane_id = Warp::laneId(); volatile __shared__ int warps_buffer[WARPS_COUNT]; int flag = 0; int key = 0; float4 p4; if (tid < n) { float3 p = GpuMesh::from_point(input_points[tid*step]); p4 = GpuMesh::to_point(p, 1.f); // generating key float3 p1 = (p - origion)*key_invGridSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); key = (z*key_gridRes.y + y)*key_gridRes.x + x; // identify voxel p1 = (p - origion)*vol_invVoxelSize; x = int(p1.x); y = int(p1.y); z = int(p1.z); // assert knnIdx sorted, thus the 1st should be the nearest KnnIdx knnIdx = read_knn_tex(knnTex, x, y, z); if (knn_k(knnIdx, 0) < numNodes) { float4 nearestVw = make_float4(0, 0, 0, 1); tex1Dfetch(&nearestVw, nodesDqVwTex, knn_k(knnIdx, 0) * 3 + 2); // [q0-q1-vw] memory stored float3 nearestV = make_float3(nearestVw.x, nearestVw.y, nearestVw.z); // DIFFERENT from the paper ldp: // here we insert a node if the point is outside the search radius, // but NOT 1/dw // note .w store 1/radius float dif = dot(nearestV - p, nearestV - p) * inv_search_radius_sqr; flag = (dif > 1.f); } else flag = 1.f; } int total = __popc(__ballot(flag>0)); if (total) { if (lane_id == 0) { int old = atomicAdd(&newPoints_global_count, total); warps_buffer[warp_id] = old; } int old_global_voxels_count = warps_buffer[warp_id]; int offs = Warp::binaryExclScan(__ballot(flag>0)); if (old_global_voxels_count + offs < n && flag) { out_keys[old_global_voxels_count + offs] = key; out_points[old_global_voxels_count + offs] = p4; } }// end if total if (Block::flattenedThreadId() == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&newPoints_blocks_done, total_blocks); //last block if (value == total_blocks - 1) { newPoints_output_count = newPoints_global_count; newPoints_blocks_done = 0; newPoints_global_count = 0; } } } /* operator () */ }; __global__ void get_newPoints_kernel(NewPointsCounter counter) { counter(); } __global__ void pointToKey_kernel( const GpuMesh::PointType* points, int* key, float4* copypoints, int n, int step, float invGridSize, float3 origion, int3 gridRes) { unsigned int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; unsigned int threadId = __mul24(blockId, blockDim.x << 3) + threadIdx.x; #pragma unroll for (int k = 0; k < 8; k++, threadId += blockDim.x) { if (threadId < n) { float3 p = GpuMesh::from_point(points[threadId*step]); float3 p1 = (p- origion)*invGridSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); key[threadId] = (z*gridRes.y + y)*gridRes.x + x; copypoints[threadId] = GpuMesh::to_point(p, 1.f); } } } __device__ int validVoxel_global_count = 0; __device__ int validVoxel_output_count; __device__ unsigned int validVoxel_blocks_done = 0; struct ValidVoxelCounter { enum { CTA_SIZE = 256, WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE }; mutable int* key_sorted; mutable int* counts; const float4* points_scaned; float weight_thre; int n; __device__ __forceinline__ void operator () () const { int tid = threadIdx.x + blockIdx.x * CTA_SIZE; if (__all(tid >= n)) return; int warp_id = Warp::id(); int lane_id = Warp::laneId(); volatile __shared__ int warps_buffer[WARPS_COUNT]; int flag = 0; if (tid < n) flag = (points_scaned[tid].w > weight_thre) && (key_sorted[tid] != key_sorted[tid + 1] || tid == n - 1); int total = __popc(__ballot(flag>0)); if (total) { if (lane_id == 0) { int old = atomicAdd(&validVoxel_global_count, total); warps_buffer[warp_id] = old; } int old_global_voxels_count = warps_buffer[warp_id]; int offs = Warp::binaryExclScan(__ballot(flag>0)); if (old_global_voxels_count + offs < n && flag) counts[old_global_voxels_count + offs] = tid; }// end if total if (Block::flattenedThreadId() == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&validVoxel_blocks_done, total_blocks); //last block if (value == total_blocks - 1) { validVoxel_output_count = validVoxel_global_count; validVoxel_blocks_done = 0; validVoxel_global_count = 0; } } } /* operator () */ }; __global__ void get_validVoxel_kernel(ValidVoxelCounter counter) { counter(); } struct NodesWriter { const float4* points_not_compact; const int* index; float4* nodesDqVw; float inv_weight_radius; int num; hipTextureObject_t knnTex; hipTextureObject_t nodesDqVwTex; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int threadId) { int idx = index[threadId]; float4 p = points_not_compact[idx]; float inv_w = 1.f / p.w; p.x *= inv_w; p.y *= inv_w; p.z *= inv_w; p.w = inv_weight_radius; nodesDqVw[threadId * 3 + 2] = p; Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, make_float3(p.x, p.y, p.z), origion, invVoxelSize); unpack_dual_quat(dq_blend, nodesDqVw[threadId * 3], nodesDqVw[threadId * 3 + 1]); } __device__ __forceinline__ void update_nodes_dq_assume_compact_nodes(int threadId) { float4 p = nodesDqVw[threadId * 3 + 2]; Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, make_float3(p.x, p.y, p.z), origion, invVoxelSize); unpack_dual_quat(dq_blend, nodesDqVw[threadId * 3], nodesDqVw[threadId * 3 + 1]); } }; __global__ void write_nodes_kernel(NodesWriter nw) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < nw.num) { nw(threadId); } } __global__ void update_nodes_dq_assume_compact_nodes_kernel(NodesWriter nw) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < nw.num) { nw.update_nodes_dq_assume_compact_nodes(threadId); } } void WarpField::insertNewNodes(GpuMesh& src) { // make a larger buffer to prevent allocation each time int step = m_param.warp_point_step_before_update_node; int num_points = src.num() / step; if (num_points == 0) return; if (num_points > m_current_point_buffer_size) { m_current_point_buffer_size = num_points * 1.5; m_meshPointsSorted.create(m_current_point_buffer_size); m_meshPointsKey.create(m_current_point_buffer_size); m_meshPointsFlags.create(m_current_point_buffer_size); m_tmpBuffer.create(m_current_point_buffer_size); hipMemset(m_meshPointsSorted.ptr(), 0, m_meshPointsSorted.size()*m_meshPointsSorted.elem_size); hipMemset(m_meshPointsKey.ptr(), 0, m_meshPointsKey.size()*m_meshPointsKey.elem_size); hipMemset(m_meshPointsFlags.ptr(), 0, m_meshPointsFlags.size()*m_meshPointsFlags.elem_size); hipMemset(m_tmpBuffer.ptr(), 0, m_tmpBuffer.size()*m_tmpBuffer.elem_size); } // reset symbols int zero_mem_symbol = 0; hipMemcpyToSymbol(newPoints_global_count, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(newPoints_blocks_done, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(validVoxel_global_count, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(validVoxel_blocks_done, &zero_mem_symbol, sizeof(int)); cudaSafeCall(hipDeviceSynchronize(), "set zero: new point"); // if 1st in, then collect all points if (m_lastNumNodes[0] == 0) { dim3 block(256); dim3 grid(1, 1, 1); grid.x = divUp(num_points, block.x << 3); // copy to new buffer and generate sort key src.lockVertsNormals(); pointToKey_kernel << <grid, block >> >( src.verts(), m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points, step, 1.f / m_param.warp_radius_search_epsilon, m_volume->getOrigion(), m_nodesGridSize); cudaSafeCall(hipGetLastError(), "pointToKey_kernel"); src.unlockVertsNormals(); } // else, collect non-covered points else { src.lockVertsNormals(); NewPointsCounter counter; counter.n = num_points; counter.step = step; counter.origion = m_volume->getOrigion(); counter.key_gridRes = m_nodesGridSize; counter.key_invGridSize = 1.f / m_param.warp_radius_search_epsilon; counter.vol_invVoxelSize = 1.f / m_volume->getVoxelSize(); counter.vol_res = m_volume->getResolution(); counter.inv_search_radius_sqr = 1.f / (m_param.warp_radius_search_epsilon * m_param.warp_radius_search_epsilon); counter.input_points = src.verts(); counter.out_points = m_meshPointsSorted.ptr(); counter.out_keys = m_meshPointsKey.ptr(); counter.knnTex = getKnnFieldTexture(); counter.nodesDqVwTex = getNodesDqVwTexture(); counter.nodesDqVw = getNodesDqVwPtr(0); counter.numNodes = m_numNodes[0]; dim3 block1(NewPointsCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_newPoints_kernel << <grid1, block1 >> >(counter); cudaSafeCall(hipGetLastError(), "get_newPoints_kernel"); cudaSafeCall(hipDeviceSynchronize(), "get_newPoints_kernel sync"); cudaSafeCall(hipMemcpyFromSymbol(&num_points, newPoints_output_count, sizeof(int)), "get_newPoints_kernel memcpy from symbol"); src.unlockVertsNormals(); }// end else if (num_points == 0) return; // sort thrust_wrapper::sort_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points); // segment scan thrust_wrapper::inclusive_scan_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr(), num_points); // compact ValidVoxelCounter counter; counter.counts = m_meshPointsFlags.ptr(); counter.key_sorted = m_meshPointsKey.ptr(); counter.n = num_points; counter.weight_thre = m_param.warp_valid_point_num_each_node; counter.points_scaned = m_meshPointsSorted.ptr(); { dim3 block1(ValidVoxelCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_validVoxel_kernel << <grid1, block1 >> >(counter); cudaSafeCall(hipGetLastError(), "get_validVoxel_kernel"); cudaSafeCall(hipDeviceSynchronize(), "get_validVoxel_kernel sync"); } int num_after_compact = 0; cudaSafeCall(hipMemcpyFromSymbol(&num_after_compact, validVoxel_output_count, sizeof(int)), "copy voxel count from symbol"); if (num_after_compact == 0 && m_lastNumNodes[0] == 0) num_after_compact = 1; // at least one point needed. m_numNodes[0] = min(m_lastNumNodes[0] + num_after_compact, MaxNodeNum); if (num_after_compact + m_lastNumNodes[0] > MaxNodeNum) printf("warning: too many nodes %d vs %d\n", num_after_compact + m_lastNumNodes[0], MaxNodeNum); if (m_numNodes[0] > m_lastNumNodes[0]) { dim3 block(256); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[0] - m_lastNumNodes[0], block.x); NodesWriter nw; nw.points_not_compact = m_meshPointsSorted.ptr(); nw.index = m_meshPointsFlags.ptr(); nw.nodesDqVw = getNodesDqVwPtr(0) + m_lastNumNodes[0] * 3; nw.num = m_numNodes[0] - m_lastNumNodes[0]; nw.inv_weight_radius = 1.f / m_param.warp_param_dw; nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); write_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(hipGetLastError(), "write_nodes_kernel"); } } #pragma endregion #pragma region --update ann field __global__ void seperate_xyz_nodes(const float4* nodesDqVw, float* x, float* y, float* z, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) { float4 dqVw = nodesDqVw[tid * 3 + 2]; x[tid] = dqVw.x; y[tid] = dqVw.y; z[tid] = dqVw.z; } } __global__ void collect_aabb_box_kernel(float4* aabb_min, float4* aabb_max, const float* x, const float* y, const float* z, int n) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid == 0) { aabb_min[0] = make_float4(x[0], y[0], z[0], 0); aabb_max[0] = make_float4(x[n-1], y[n - 1], z[n - 1], 0); } } __global__ void bruteforce_updateKnn_kernel(hipTextureObject_t nodesDqVwTex, hipSurfaceObject_t knnSurf, int3 res, int newNodesBegin, int newNodesEnd, float3 origion, float voxelSize, int maxK) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int z = threadIdx.z + blockIdx.z*blockDim.z; if (x < res.x && y < res.y && z < res.z) { // compute all 4 dists stored KnnIdx knn = read_knn_surf(knnSurf, x, y, z); float3 voxelPos = origion + voxelSize*make_float3(x, y, z); float oldDists2[KnnK]; for (int k = 0; k < maxK; k++) { float4 p; tex1Dfetch(&p, nodesDqVwTex, knn_k(knn, k)*3 + 2); oldDists2[k] = norm2(make_float3(p.x, p.y, p.z) - voxelPos); } // update new nodes for (int iNode = newNodesBegin; iNode < newNodesEnd; iNode++) { float4 p; tex1Dfetch(&p, nodesDqVwTex, iNode * 3 + 2); float newDist2 = norm2(make_float3(p.x, p.y, p.z) - voxelPos); // we swap the farest nodes out // note that the knn is kept sorted int swapPos = maxK; for (int k = 0; k < maxK; k++) { if (newDist2 < oldDists2[k]) { swapPos = k; break; } } if (swapPos < maxK) { KnnIdx newKnn = knn; knn_k(newKnn, swapPos) = iNode; for (int k = swapPos + 1; k < maxK; k++) knn_k(newKnn, k) = knn_k(knn, k - 1); write_knn(newKnn, knnSurf, x, y, z); } }// end for iNode } } void WarpField::updateAnnField() { float3 origion = m_volume->getOrigion(); int3 res = m_volume->getResolution(); float vsz = m_volume->getVoxelSize(); // if 1st frame, then perform whole-volume search, which is slow if (m_lastNumNodes[0] == 0) { m_nodeTree[0]->buildTree(m_nodesQuatTransVw.ptr() + 2, m_numNodes[0], 3); hipSurfaceObject_t surf = getKnnFieldSurface(); m_nodeTree[0]->knnSearchGpu(surf, make_int3(0, 0, 0), res, origion, vsz, m_param.warp_knn_k_eachlevel[0]); } // else, collect voxels around the new added node and then perform sub-volume searching else { int nNewNodes = m_numNodes[0] - m_lastNumNodes[0]; #if 0 // 1st step, collect bounding box of new nodes to avoid additional computation float* xptr = m_tmpBuffer.ptr() + nNewNodes; float* yptr = xptr + nNewNodes; float* zptr = yptr + nNewNodes; if (nNewNodes) { dim3 block(32); dim3 grid(divUp(nNewNodes, block.x)); seperate_xyz_nodes << <grid, block >> >(getNodesDqVwPtr(0) + m_lastNumNodes[0] * 3, xptr, yptr, zptr, nNewNodes); cudaSafeCall(hipGetLastError(), "seperate_xyz_nodes"); } modergpu_wrapper::mergesort(xptr, nNewNodes); modergpu_wrapper::mergesort(yptr, nNewNodes); modergpu_wrapper::mergesort(zptr, nNewNodes); // bounding box info float4 box[2]; { dim3 block(1); dim3 grid(1); collect_aabb_box_kernel << <grid, block >> >( m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr() + 1, xptr, yptr, zptr, nNewNodes); cudaSafeCall(hipGetLastError(), "collect_aabb_box_kernel"); cudaSafeCall(hipMemcpy(box, m_meshPointsSorted.ptr(), 2 * sizeof(float4), hipMemcpyDeviceToHost)); } // convert to volume index int3 begin = make_int3((box[0].x - origion.x) / vsz, (box[0].y - origion.y) / vsz, (box[0].z - origion.z) / vsz); int3 end = make_int3((box[1].x - origion.x) / vsz + 1, (box[1].y - origion.y) / vsz + 1, (box[1].z - origion.z) / vsz + 1); int ext = ceil(m_param.warp_param_dw / vsz); begin.x = min(res.x - 1, max(0, begin.x - ext)); begin.y = min(res.y - 1, max(0, begin.y - ext)); begin.z = min(res.z - 1, max(0, begin.z - ext)); end.x = max(1, min(res.x, end.x + ext)); end.y = max(1, min(res.y, end.y + ext)); end.z = max(1, min(res.z, end.z + ext)); // perform knn search on the sub volume m_nodeTree[0]->buildTree(m_nodesQuatTransVw.ptr() + 2, m_numNodes[0], 3); hipSurfaceObject_t surf = bindKnnFieldSurface(); m_nodeTree[0]->knnSearchGpu(surf, begin, end, origion, vsz, KnnK); //m_nodeTree[0]->knnSearchGpu(surf, make_int3(0,0,0), res, origion, vsz, KnnK); unBindKnnFieldSurface(surf); #else //tranverse each voxel to update if (nNewNodes > 0) { int3 res = m_volume->getResolution(); float3 origion = m_volume->getOrigion(); float vsz = m_volume->getVoxelSize(); dim3 block(32, 8, 2); dim3 grid(divUp(res.x, block.x), divUp(res.y, block.y), divUp(res.z, block.z)); hipSurfaceObject_t surf = getKnnFieldSurface(); hipTextureObject_t tex = getNodesDqVwTexture(); bruteforce_updateKnn_kernel << <grid, block >> >( tex, surf, res, m_lastNumNodes[0], m_numNodes[0], origion, vsz, m_param.warp_knn_k_eachlevel[0]); cudaSafeCall(hipGetLastError(), "bruteforce_updateKnn_kernel"); } #endif } } #pragma endregion #pragma region remove small graph components struct sort_int2_less { bool operator()(const int2& left, const int2& right)const { return (left.x < right.x) || (left.x == right.x && left.y < right.y); } }; __global__ void copy_nodes_kernel(float4* dst, const float4* src, const int* idxMap, int nSrc) { int iSrc = threadIdx.x + blockIdx.x * blockDim.x; if (iSrc < nSrc) { int iDst = idxMap[iSrc]; if (iDst >= 0) { for (int k = 0; k < 3; k++) dst[iDst * 3 + k] = src[iSrc * 3 + k]; } } } void WarpField::remove_small_graph_components() { // we only perform removal for single-level graph if (!m_param.graph_single_level || m_numNodes[0] <= 1 || m_param.graph_remove_small_components_ratio >= 1.f || m_numNodes[0] == m_lastNumNodes[0]) return; std::vector<KnnIdx> knnGraph(m_numNodes[0]); cudaSafeCall(hipMemcpy(knnGraph.data(), m_nodesGraph.ptr(), m_numNodes[0] * sizeof(KnnIdx), hipMemcpyDeviceToHost), "WarpField::remove_small_graph_components, cudaMemcpy1"); std::vector<int2> edges; edges.reserve(knnGraph.size() * KnnK); for (int i = 0; i < knnGraph.size(); i++) { KnnIdx knn = knnGraph[i]; for (int k = 0; k < KnnK; k++) { int nb = knn_k(knn, k); if (nb < m_numNodes[0]) { edges.push_back(make_int2(i, nb)); edges.push_back(make_int2(nb, i)); } }// k }// i std::sort(edges.begin(), edges.end(), sort_int2_less()); std::vector<int> edgeHeader(m_numNodes[0] + 1, 0); for (int i = 1; i < edges.size(); i++) { if (edges[i].x != edges[i - 1].x) edgeHeader[edges[i].x] = i; } edgeHeader[m_numNodes[0]] = edges.size(); // find indepedent components std::set<int> verts; for (int i = 0; i < m_numNodes[0]; i++) verts.insert(i); std::vector<int> componentsSize; std::vector<int> componentsFlag(m_numNodes[0], -1); while (!verts.empty()) { componentsSize.push_back(0); int& cpSz = componentsSize.back(); auto set_iter = verts.begin(); std::queue<int> queue; queue.push(*set_iter); verts.erase(set_iter); while (!queue.empty()) { const int v = queue.front(); queue.pop(); cpSz++; componentsFlag[v] = componentsSize.size() - 1; for (int i = edgeHeader[v]; i < edgeHeader[v + 1]; i++) { const int v1 = edges[i].y; set_iter = verts.find(v1); if (set_iter != verts.end()) { queue.push(v1); verts.erase(set_iter); } }// end for i }// end while }// end while verts // if only one components, then nothing to remove if (componentsSize.size() <= 1) return; // find idx that map origional nodes to removed nodes set const int thre = std::lroundf(m_param.graph_remove_small_components_ratio * m_numNodes[0]); std::set<int> componentsToRemove; for (int i = 0; i < componentsSize.size(); i++) if (componentsSize[i] < thre) componentsToRemove.insert(i); if (componentsToRemove.size() == 0) return; int totalIdx = 0; std::vector<int> idxMap(componentsFlag.size()); for (int i = 0; i < componentsFlag.size(); i++) { if (componentsToRemove.find(componentsFlag[i]) != componentsToRemove.end()) { idxMap[i] = -1; if (i < m_lastNumNodes[0]) { //printf("illegal: %d < %d, current: %d\n", i, m_lastNumNodes[0], m_numNodes[0]); //throw std::exception("error in removing small components, last nodes not illegal!"); idxMap[i] = totalIdx++; } } else idxMap[i] = totalIdx++; } // if (m_meshPointsKey.size() < m_numNodes[0]) m_meshPointsKey.create(m_numNodes[0] * 1.5); if (m_meshPointsSorted.size() < m_numNodes[0] * 3) m_meshPointsSorted.create(m_numNodes[0] * 3 * 1.5); cudaSafeCall(hipMemcpy(m_meshPointsSorted, m_nodesQuatTransVw, m_numNodes[0] * sizeof(float4)* 3, hipMemcpyDeviceToDevice), "WarpField::remove_small_graph_components, cudaMemcpy2"); cudaSafeCall(hipMemcpy(m_meshPointsKey, idxMap.data(), m_numNodes[0] * sizeof(int), hipMemcpyHostToDevice), "WarpField::remove_small_graph_components, cudaMemcpy3"); copy_nodes_kernel << <divUp(m_numNodes[0], 256), 256 >> >(m_nodesQuatTransVw, m_meshPointsSorted, m_meshPointsKey, m_numNodes[0]); cudaSafeCall(hipGetLastError(), "WarpField::remove_small_graph_components, copy nodes"); printf("Nodes Removal: %d -> %d, last=%d\n", m_numNodes[0], totalIdx, m_lastNumNodes[0]); m_numNodes[0] = totalIdx; updateGraph_singleLevel(); } #pragma endregion #pragma region --update graph void WarpField::updateGraph(int level) { if (level == 0) throw std::exception("called an invalid level function\n"); int num_points = m_numNodes[level - 1]; if (num_points == 0) { m_numNodes[level] = 0; return; } // re-define structure only if lv0 structure changed=============================== if (m_lastNumNodes[0] != m_numNodes[0]) { // reset symbols int zero_mem_symbol = 0; hipMemcpyToSymbol(newPoints_global_count, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(newPoints_blocks_done, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(validVoxel_global_count, &zero_mem_symbol, sizeof(int)); hipMemcpyToSymbol(validVoxel_blocks_done, &zero_mem_symbol, sizeof(int)); cudaSafeCall(hipDeviceSynchronize(), "set zero: new point"); float radius = m_param.warp_radius_search_epsilon * pow(m_param.warp_radius_search_beta, level); { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(num_points, block.x << 3); // copy to new buffer and generate sort key pointToKey_kernel << <grid, block >> >( getNodesDqVwPtr(level - 1) + 2, m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points, 3, 1.f / radius, m_volume->getOrigion(), m_nodesGridSize); cudaSafeCall(hipGetLastError(), "pointToKey_kernel lv"); } if (num_points == 0) return; // sort thrust_wrapper::sort_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points); // segment scan thrust_wrapper::inclusive_scan_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr(), num_points); // compact ValidVoxelCounter counter; counter.counts = m_meshPointsFlags.ptr(); counter.key_sorted = m_meshPointsKey.ptr(); counter.n = num_points; counter.weight_thre = 1; counter.points_scaned = m_meshPointsSorted.ptr(); if (num_points) { dim3 block1(ValidVoxelCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_validVoxel_kernel << <grid1, block1 >> >(counter); cudaSafeCall(hipGetLastError(), "get_validVoxel_kernel lv"); cudaSafeCall(hipDeviceSynchronize(), "get_validVoxel_kernel lv sync"); } int num_after_compact = 0; cudaSafeCall(hipMemcpyFromSymbol(&num_after_compact, validVoxel_output_count, sizeof(int)), "copy voxel count from symbol"); m_numNodes[level] = min(num_after_compact, MaxNodeNum); if (num_after_compact > MaxNodeNum) printf("warning: too many nodes %d vs %d in level\n", num_after_compact + m_lastNumNodes[0], MaxNodeNum, level); // write level nodes if (m_numNodes[level] > 0) { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[level], block.x); NodesWriter nw; nw.points_not_compact = m_meshPointsSorted.ptr(); nw.index = m_meshPointsFlags.ptr(); nw.nodesDqVw = getNodesDqVwPtr(level); nw.num = m_numNodes[level]; nw.inv_weight_radius = 1.f / (m_param.warp_param_dw*pow(m_param.warp_radius_search_beta, level)); nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); write_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(hipGetLastError(), "write_nodes_kernel"); } // build graph if (m_numNodes[level] > 0) { m_nodeTree[level]->buildTree(getNodesDqVwPtr(level) + 2, m_numNodes[level], 3); dim3 block1(256); dim3 grid1(divUp(getNumNodesInLevel(level-1)*KnnK, block1.x)); initKnnFieldKernel1 << <grid1, block1 >> >(getNodesEdgesPtr(level - 1), getNumNodesInLevel(level - 1)*KnnK); cudaSafeCall(hipGetLastError(), "initKnnFieldKernel1-1"); m_nodeTree[level]->knnSearchGpu(getNodesDqVwPtr(level - 1) + 2, 3, (KnnIdxType*)getNodesEdgesPtr(level - 1), nullptr, m_param.warp_knn_k_eachlevel[level], getNumNodesInLevel(level - 1), KnnK); } }// end if (m_lastNumNodes[0] != m_numNodes[0]) else if (m_numNodes[level])// else we only update the graph quaternions { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[level], block.x); NodesWriter nw; nw.nodesDqVw = getNodesDqVwPtr(level); nw.num = m_numNodes[level]; nw.inv_weight_radius = 1.f / (m_param.warp_param_dw*pow(m_param.warp_param_dw_lvup_scale, level)); nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); update_nodes_dq_assume_compact_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(hipGetLastError(), "update_nodes_dq_assume_compact_nodes_kernel"); }// end else (m_lastNumNodes[0] == m_numNodes[0]) } void WarpField::updateGraph_singleLevel() { // build graph if (m_lastNumNodes[0] != m_numNodes[0]) { m_nodeTree[0]->buildTree(getNodesDqVwPtr(0) + 2, m_numNodes[0], 3); dim3 block1(256); dim3 grid1(divUp(getNumNodesInLevel(0)*KnnK, block1.x)); initKnnFieldKernel1 << <grid1, block1 >> >(getNodesEdgesPtr(0), getNumNodesInLevel(0)*KnnK); cudaSafeCall(hipGetLastError(), "initKnnFieldKernel1-1"); m_nodeTree[0]->knnSearchGpu(getNodesDqVwPtr(0) + 2, 3, (KnnIdxType*)getNodesEdgesPtr(0), nullptr, m_param.warp_knn_k_eachlevel[1], getNumNodesInLevel(0), KnnK, m_param.graph_single_level); } else if (m_numNodes[0])// else we only update the graph quaternions { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[0], block.x); NodesWriter nw; nw.nodesDqVw = getNodesDqVwPtr(0); nw.num = m_numNodes[0]; nw.inv_weight_radius = 1.f / m_param.warp_param_dw; nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); update_nodes_dq_assume_compact_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(hipGetLastError(), "update_nodes_dq_assume_compact_nodes_kernel"); }// end else (m_lastNumNodes[0] == m_numNodes[0]) } #pragma endregion #pragma region --extract_for_vmap struct IdxContainter { int id[WarpField::GraphLevelNum+1]; __device__ __host__ int& operator [](int i) { return id[i]; } }; __global__ void extract_knn_for_vmap_kernel(PtrStepSz<float4> vmap, PtrStepSz<KnnIdx> vmapKnn, float3 origion, float invVoxelSize, hipTextureObject_t knnTex, IdxContainter ic) { int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if (u < vmap.cols && v < vmap.rows) { float3 p = GpuMesh::from_point(vmap(v, u)); KnnIdx knnIdx = make_knn(ic[WarpField::GraphLevelNum]); if (!isnan(p.x)) { float3 p1 = (p - origion)*invVoxelSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); knnIdx = read_knn_tex(knnTex, x, y, z); for (int k = 0; k < KnnK; k++) { if (knn_k(knnIdx, k) >= WarpField::MaxNodeNum) knn_k(knnIdx, k) = ic[WarpField::GraphLevelNum]; } } vmapKnn(v, u) = knnIdx; } } void WarpField::extract_knn_for_vmap(const MapArr& vmap, DeviceArray2D<KnnIdx>& vmapKnn)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; vmapKnn.create(vmap.rows(), vmap.cols()); dim3 block(32, 8); dim3 grid(divUp(vmap.cols(), block.x), divUp(vmap.rows(), block.y)); hipTextureObject_t knnTex = getKnnFieldTexture(); extract_knn_for_vmap_kernel << <grid, block >> >(vmap, vmapKnn, m_volume->getOrigion(), 1.f / m_volume->getVoxelSize(), knnTex, ic); cudaSafeCall(hipGetLastError(), "extract_knn_for_vmap_kernel"); } __global__ void extract_nodes_info_kernel(const float4* nodesDqVw, float* twist, float4* vw, const KnnIdx* nodesKnnIn, KnnIdx* nodesKnnOut, IdxContainter ic, bool single_graph_level) { int iout = blockIdx.x * blockDim.x + threadIdx.x; if (iout >= ic[WarpField::GraphLevelNum]) return; int level = 0; for (int k = 0; k < WarpField::GraphLevelNum; k++) if (iout >= ic[k] && iout < ic[k + 1]) { level = k; break; } int iin = level*WarpField::MaxNodeNum + iout - ic[level]; // write twist Tbx::Dual_quat_cu dq = pack_dual_quat(nodesDqVw[iin * 3], nodesDqVw[iin * 3 + 1]); Tbx::Vec3 r, t; dq.to_twist(r, t); twist[iout * 6 + 0] = r.x; twist[iout * 6 + 1] = r.y; twist[iout * 6 + 2] = r.z; twist[iout * 6 + 3] = t.x; twist[iout * 6 + 4] = t.y; twist[iout * 6 + 5] = t.z; vw[iout] = nodesDqVw[iin * 3 + 2]; // write knn KnnIdx kid = nodesKnnIn[iin]; for (int k = 0; k < KnnK; k++) { if (!single_graph_level) knn_k(kid, k) = (knn_k(kid, k) < ic[level + 1] - ic[level] ? knn_k(kid, k) + ic[level + 1] : ic[WarpField::GraphLevelNum]); else knn_k(kid, k) = (knn_k(kid, k) < WarpField::MaxNodeNum ? knn_k(kid, k) : ic[WarpField::GraphLevelNum]); } nodesKnnOut[iout] = kid; } void WarpField::extract_nodes_info(DeviceArray<KnnIdx>& nodesKnn, DeviceArray<float>& twist, DeviceArray<float4>& vw)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (ic[GraphLevelNum] == 0) return; nodesKnn.create(ic[GraphLevelNum]); twist.create(ic[GraphLevelNum] * 6); vw.create(ic[GraphLevelNum]); extract_nodes_info_no_allocation(nodesKnn, twist, vw); } void WarpField::extract_nodes_info_no_allocation( DeviceArray<KnnIdx>& nodesKnn, DeviceArray<float>& twist, DeviceArray<float4>& vw)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (ic[GraphLevelNum] == 0) return; dim3 block(256); dim3 grid(divUp(ic[GraphLevelNum], block.x)); extract_nodes_info_kernel << <grid, block >> >(getNodesDqVwPtr(0), twist.ptr(), vw.ptr(), getNodesEdgesPtr(0), nodesKnn.ptr(), ic, m_param.graph_single_level); cudaSafeCall(hipGetLastError(), "extract_nodes_info_kernel"); } __global__ void update_nodes_via_twist_kernel(float4* nodesDqVw, const float* twist, IdxContainter ic) { int iout = blockIdx.x * blockDim.x + threadIdx.x; if (iout >= ic[WarpField::GraphLevelNum]) return; int level = 0; for (int k = 0; k < WarpField::GraphLevelNum; k++) if (iout >= ic[k] && iout < ic[k + 1]) { level = k; break; } int iin = level*WarpField::MaxNodeNum + iout - ic[level]; // write twist Tbx::Vec3 r, t; r.x = twist[iout * 6 + 0]; r.y = twist[iout * 6 + 1]; r.z = twist[iout * 6 + 2]; t.x = twist[iout * 6 + 3]; t.y = twist[iout * 6 + 4]; t.z = twist[iout * 6 + 5]; Tbx::Dual_quat_cu dq; dq.from_twist(r, t); unpack_dual_quat(dq, nodesDqVw[iin * 3], nodesDqVw[iin * 3 + 1]); } void WarpField::update_nodes_via_twist(const DeviceArray<float>& twist) { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (twist.size() < ic[GraphLevelNum]*6) throw std::exception("size not matched in WarpField::update_nodes_via_twist()"); dim3 block(256); dim3 grid(divUp(ic[GraphLevelNum], block.x)); update_nodes_via_twist_kernel << <grid, block >> >(getNodesDqVwPtr(0), twist.ptr(), ic); cudaSafeCall(hipGetLastError(), "update_nodes_via_twist"); } #pragma endregion #pragma region --getKnnAt __global__ void getKnnAtKernel(KnnIdx* data, int3 p, hipTextureObject_t tex) { data[0] = read_knn_tex(tex, p.x, p.y, p.z); } KnnIdx WarpField::getKnnAt(float3 volumePos)const { if (m_volume == nullptr) throw std::exception("WarpField::getKnnAt(): null pointer"); float3 ori = m_volume->getOrigion(); float vsz = m_volume->getVoxelSize(); float3 p = (volumePos - ori) / vsz; return getKnnAt(make_int3(p.x, p.y, p.z)); } KnnIdx WarpField::getKnnAt(int3 gridXYZ)const { if (m_volume == nullptr) throw std::exception("WarpField::getKnnAt(): null pointer"); int3 res = m_volume->getResolution(); int x = gridXYZ.x, y = gridXYZ.y, z = gridXYZ.z; if (x < 0 || y < 0 || z < 0 || x >= res.x || y >= res.y || z >= res.z) return make_knn(MaxNodeNum); static DeviceArray<KnnIdx> knn; knn.create(1); hipTextureObject_t tex = getKnnFieldTexture(); getKnnAtKernel << <dim3(1), dim3(1) >> >(knn.ptr(), gridXYZ, tex); cudaSafeCall(hipGetLastError(), "WarpField::getKnnAtKernel"); KnnIdx host; cudaSafeCall(hipMemcpy(&host, knn.ptr(), sizeof(KnnIdx), hipMemcpyDeviceToHost), "WarpField::getKnnAtKernel, post copy"); return host; } #pragma endregion }
4a9b3d24177357a1c3d508a5e226435ab129da32.cu
#include "WarpField.h" #include "GpuMesh.h" #include "device_utils.h" #include "TsdfVolume.h" #include "cudpp\thrust_wrapper.h" #include "cudpp\ModerGpuWrapper.h" #include "GpuKdTree.h" #include <set> #include <algorithm> #include <queue> namespace dfusion { #pragma region --warpmesh struct MeshWarper { const GpuMesh::PointType* vsrc; const GpuMesh::PointType* nsrc; const GpuMesh::PointType* csrc; cudaTextureObject_t knnTex; cudaTextureObject_t nodesDqVwTex; GpuMesh::PointType* vdst; GpuMesh::PointType* ndst; GpuMesh::PointType* cdst; int num; Tbx::Mat3 R; float3 t; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int tid) { float3 p = GpuMesh::from_point(vsrc[tid]); float3 n = GpuMesh::from_point(nsrc[tid]); Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, p, origion, invVoxelSize); Tbx::Point3 dq_p = dq_blend.transform(Tbx::Point3(convert(p))); Tbx::Vec3 dq_n = dq_blend.rotate(convert(n)); //vdst[tid] = GpuMesh::to_point(convert(R.rotate(dq_p)) + t); //ndst[tid] = GpuMesh::to_point(convert(R.rotate(dq_n))); vdst[tid] = GpuMesh::to_point(convert(R*dq_p) + t); ndst[tid] = GpuMesh::to_point(convert(R*dq_n)); cdst[tid] = csrc[tid]; } }; __global__ void warp_mesh_kernel(MeshWarper warper) { unsigned int i = blockIdx.x * (blockDim.x << 3) + threadIdx.x; #pragma unroll for (int k = 0; k < 8; k++) { if (i < warper.num) { warper(i); } i += blockDim.x; } } struct MapWarper { PtrStep<float4> vsrc; PtrStep<float4> nsrc; cudaTextureObject_t knnTex; cudaTextureObject_t nodesDqVwTex; PtrStep<float4> vdst; PtrStep<float4> ndst; int w; int h; Tbx::Mat3 R; float3 t; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int x, int y) { float3 p = GpuMesh::from_point(vsrc(y,x)); float3 n = GpuMesh::from_point(nsrc(y,x)); Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, p, origion, invVoxelSize); Tbx::Point3 dq_p = dq_blend.transform(Tbx::Point3(convert(p))); Tbx::Vec3 dq_n = dq_blend.rotate(convert(n)); //vdst(y, x) = GpuMesh::to_point(convert(R.rotate(dq_p)) + t); //ndst(y, x) = GpuMesh::to_point(convert(R.rotate(dq_n))); vdst(y, x) = GpuMesh::to_point(convert(R*dq_p) + t); ndst(y, x) = GpuMesh::to_point(convert(R*dq_n)); } }; __global__ void warp_map_kernel(MapWarper warper) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < warper.w && y < warper.h) warper(x, y); } void WarpField::warp(GpuMesh& src, GpuMesh& dst) { if (src.num() == 0) return; dst.create(src.num()); src.lockVertsNormals(); dst.lockVertsNormals(); MeshWarper warper; warper.t = convert(m_rigidTransform.get_translation()); warper.R = m_rigidTransform.get_mat3();// Tbx::Quat_cu(m_rigidTransform); warper.knnTex = getKnnFieldTexture(); warper.nodesDqVwTex = getNodesDqVwTexture(); warper.vsrc = src.verts(); warper.nsrc = src.normals(); warper.csrc = src.colors(); warper.vdst = dst.verts(); warper.ndst = dst.normals(); warper.cdst = dst.colors(); warper.num = src.num(); warper.origion = m_volume->getOrigion(); warper.invVoxelSize = 1.f / m_volume->getVoxelSize(); dim3 block(512); dim3 grid(1, 1, 1); grid.x = divUp(dst.num(), block.x << 3); warp_mesh_kernel << <grid, block >> >(warper); cudaSafeCall(cudaGetLastError(), "warp mesh"); dst.unlockVertsNormals(); src.unlockVertsNormals(); } void WarpField::warp(const MapArr& srcVmap, const MapArr& srcNmap, MapArr& dstVmap, MapArr& dstNmap) { const int w = srcVmap.cols(); const int h = srcNmap.rows(); dstVmap.create(h, w); dstNmap.create(h, w); MapWarper warper; warper.t = convert(m_rigidTransform.get_translation()); warper.R = m_rigidTransform.get_mat3();// Tbx::Quat_cu(m_rigidTransform); warper.knnTex = getKnnFieldTexture(); warper.nodesDqVwTex = getNodesDqVwTexture(); warper.vsrc = srcVmap; warper.nsrc = srcNmap; warper.vdst = dstVmap; warper.ndst = dstNmap; warper.w = w; warper.h = h; warper.origion = m_volume->getOrigion(); warper.invVoxelSize = 1.f / m_volume->getVoxelSize(); dim3 block(32, 8); dim3 grid(divUp(w, block.x), divUp(h, block.y), 1); warp_map_kernel << <grid, block >> >(warper); cudaSafeCall(cudaGetLastError(), "warp map"); } #pragma endregion #pragma region --init knn field __global__ void initKnnFieldKernel(cudaSurfaceObject_t knnSurf, int3 resolution) { int ix = blockDim.x*blockIdx.x + threadIdx.x; int iy = blockDim.y*blockIdx.y + threadIdx.y; int iz = blockDim.z*blockIdx.z + threadIdx.z; if (ix < resolution.x && iy < resolution.y && iz < resolution.z) write_knn(make_knn(WarpField::MaxNodeNum), knnSurf, ix, iy, iz); } __global__ void initKnnFieldKernel1(KnnIdx* knnPtr, int n) { int ix = blockDim.x*blockIdx.x + threadIdx.x; if (ix < n) knnPtr[ix] = make_knn(WarpField::MaxNodeNum); } void WarpField::initKnnField() { int3 res = m_volume->getResolution(); dim3 block(32, 8, 2); dim3 grid(divUp(res.x, block.x), divUp(res.y, block.y), divUp(res.z, block.z)); cudaSurfaceObject_t surf = getKnnFieldSurface(); initKnnFieldKernel << <grid, block >> >(surf, res); cudaSafeCall(cudaGetLastError(), "initKnnFieldKernel"); dim3 block1(256); dim3 grid1(divUp(m_nodesGraph.size(), block1.x)); initKnnFieldKernel1 << <grid, block >> >(m_nodesGraph.ptr(), m_nodesGraph.size()); cudaSafeCall(cudaGetLastError(), "initKnnFieldKernel1"); } #pragma endregion #pragma region --update nodes __device__ int newPoints_global_count = 0; __device__ int newPoints_output_count; __device__ unsigned int newPoints_blocks_done = 0; struct NewPointsCounter { enum { CTA_SIZE = 256, WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE }; mutable int* out_keys; mutable float4* out_points; GpuMesh::PointType* input_points; cudaTextureObject_t knnTex; cudaTextureObject_t nodesDqVwTex; float4* nodesDqVw; int n; int step; float3 origion; int numNodes; float inv_search_radius_sqr; // for volume index float vol_invVoxelSize; int3 vol_res; // for key generation float key_invGridSize; int3 key_gridRes; __device__ __forceinline__ void operator () () const { int tid = threadIdx.x + blockIdx.x * CTA_SIZE; if (__all(tid >= n)) return; int warp_id = Warp::id(); int lane_id = Warp::laneId(); volatile __shared__ int warps_buffer[WARPS_COUNT]; int flag = 0; int key = 0; float4 p4; if (tid < n) { float3 p = GpuMesh::from_point(input_points[tid*step]); p4 = GpuMesh::to_point(p, 1.f); // generating key float3 p1 = (p - origion)*key_invGridSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); key = (z*key_gridRes.y + y)*key_gridRes.x + x; // identify voxel p1 = (p - origion)*vol_invVoxelSize; x = int(p1.x); y = int(p1.y); z = int(p1.z); // assert knnIdx sorted, thus the 1st should be the nearest KnnIdx knnIdx = read_knn_tex(knnTex, x, y, z); if (knn_k(knnIdx, 0) < numNodes) { float4 nearestVw = make_float4(0, 0, 0, 1); tex1Dfetch(&nearestVw, nodesDqVwTex, knn_k(knnIdx, 0) * 3 + 2); // [q0-q1-vw] memory stored float3 nearestV = make_float3(nearestVw.x, nearestVw.y, nearestVw.z); // DIFFERENT from the paper ldp: // here we insert a node if the point is outside the search radius, // but NOT 1/dw // note .w store 1/radius float dif = dot(nearestV - p, nearestV - p) * inv_search_radius_sqr; flag = (dif > 1.f); } else flag = 1.f; } int total = __popc(__ballot(flag>0)); if (total) { if (lane_id == 0) { int old = atomicAdd(&newPoints_global_count, total); warps_buffer[warp_id] = old; } int old_global_voxels_count = warps_buffer[warp_id]; int offs = Warp::binaryExclScan(__ballot(flag>0)); if (old_global_voxels_count + offs < n && flag) { out_keys[old_global_voxels_count + offs] = key; out_points[old_global_voxels_count + offs] = p4; } }// end if total if (Block::flattenedThreadId() == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&newPoints_blocks_done, total_blocks); //last block if (value == total_blocks - 1) { newPoints_output_count = newPoints_global_count; newPoints_blocks_done = 0; newPoints_global_count = 0; } } } /* operator () */ }; __global__ void get_newPoints_kernel(NewPointsCounter counter) { counter(); } __global__ void pointToKey_kernel( const GpuMesh::PointType* points, int* key, float4* copypoints, int n, int step, float invGridSize, float3 origion, int3 gridRes) { unsigned int blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; unsigned int threadId = __mul24(blockId, blockDim.x << 3) + threadIdx.x; #pragma unroll for (int k = 0; k < 8; k++, threadId += blockDim.x) { if (threadId < n) { float3 p = GpuMesh::from_point(points[threadId*step]); float3 p1 = (p- origion)*invGridSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); key[threadId] = (z*gridRes.y + y)*gridRes.x + x; copypoints[threadId] = GpuMesh::to_point(p, 1.f); } } } __device__ int validVoxel_global_count = 0; __device__ int validVoxel_output_count; __device__ unsigned int validVoxel_blocks_done = 0; struct ValidVoxelCounter { enum { CTA_SIZE = 256, WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE }; mutable int* key_sorted; mutable int* counts; const float4* points_scaned; float weight_thre; int n; __device__ __forceinline__ void operator () () const { int tid = threadIdx.x + blockIdx.x * CTA_SIZE; if (__all(tid >= n)) return; int warp_id = Warp::id(); int lane_id = Warp::laneId(); volatile __shared__ int warps_buffer[WARPS_COUNT]; int flag = 0; if (tid < n) flag = (points_scaned[tid].w > weight_thre) && (key_sorted[tid] != key_sorted[tid + 1] || tid == n - 1); int total = __popc(__ballot(flag>0)); if (total) { if (lane_id == 0) { int old = atomicAdd(&validVoxel_global_count, total); warps_buffer[warp_id] = old; } int old_global_voxels_count = warps_buffer[warp_id]; int offs = Warp::binaryExclScan(__ballot(flag>0)); if (old_global_voxels_count + offs < n && flag) counts[old_global_voxels_count + offs] = tid; }// end if total if (Block::flattenedThreadId() == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&validVoxel_blocks_done, total_blocks); //last block if (value == total_blocks - 1) { validVoxel_output_count = validVoxel_global_count; validVoxel_blocks_done = 0; validVoxel_global_count = 0; } } } /* operator () */ }; __global__ void get_validVoxel_kernel(ValidVoxelCounter counter) { counter(); } struct NodesWriter { const float4* points_not_compact; const int* index; float4* nodesDqVw; float inv_weight_radius; int num; cudaTextureObject_t knnTex; cudaTextureObject_t nodesDqVwTex; float3 origion; float invVoxelSize; __device__ __forceinline__ void operator()(int threadId) { int idx = index[threadId]; float4 p = points_not_compact[idx]; float inv_w = 1.f / p.w; p.x *= inv_w; p.y *= inv_w; p.z *= inv_w; p.w = inv_weight_radius; nodesDqVw[threadId * 3 + 2] = p; Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, make_float3(p.x, p.y, p.z), origion, invVoxelSize); unpack_dual_quat(dq_blend, nodesDqVw[threadId * 3], nodesDqVw[threadId * 3 + 1]); } __device__ __forceinline__ void update_nodes_dq_assume_compact_nodes(int threadId) { float4 p = nodesDqVw[threadId * 3 + 2]; Tbx::Dual_quat_cu dq_blend = WarpField::calc_dual_quat_blend_on_p(knnTex, nodesDqVwTex, make_float3(p.x, p.y, p.z), origion, invVoxelSize); unpack_dual_quat(dq_blend, nodesDqVw[threadId * 3], nodesDqVw[threadId * 3 + 1]); } }; __global__ void write_nodes_kernel(NodesWriter nw) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < nw.num) { nw(threadId); } } __global__ void update_nodes_dq_assume_compact_nodes_kernel(NodesWriter nw) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId < nw.num) { nw.update_nodes_dq_assume_compact_nodes(threadId); } } void WarpField::insertNewNodes(GpuMesh& src) { // make a larger buffer to prevent allocation each time int step = m_param.warp_point_step_before_update_node; int num_points = src.num() / step; if (num_points == 0) return; if (num_points > m_current_point_buffer_size) { m_current_point_buffer_size = num_points * 1.5; m_meshPointsSorted.create(m_current_point_buffer_size); m_meshPointsKey.create(m_current_point_buffer_size); m_meshPointsFlags.create(m_current_point_buffer_size); m_tmpBuffer.create(m_current_point_buffer_size); cudaMemset(m_meshPointsSorted.ptr(), 0, m_meshPointsSorted.size()*m_meshPointsSorted.elem_size); cudaMemset(m_meshPointsKey.ptr(), 0, m_meshPointsKey.size()*m_meshPointsKey.elem_size); cudaMemset(m_meshPointsFlags.ptr(), 0, m_meshPointsFlags.size()*m_meshPointsFlags.elem_size); cudaMemset(m_tmpBuffer.ptr(), 0, m_tmpBuffer.size()*m_tmpBuffer.elem_size); } // reset symbols int zero_mem_symbol = 0; cudaMemcpyToSymbol(newPoints_global_count, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(newPoints_blocks_done, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(validVoxel_global_count, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(validVoxel_blocks_done, &zero_mem_symbol, sizeof(int)); cudaSafeCall(cudaDeviceSynchronize(), "set zero: new point"); // if 1st in, then collect all points if (m_lastNumNodes[0] == 0) { dim3 block(256); dim3 grid(1, 1, 1); grid.x = divUp(num_points, block.x << 3); // copy to new buffer and generate sort key src.lockVertsNormals(); pointToKey_kernel << <grid, block >> >( src.verts(), m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points, step, 1.f / m_param.warp_radius_search_epsilon, m_volume->getOrigion(), m_nodesGridSize); cudaSafeCall(cudaGetLastError(), "pointToKey_kernel"); src.unlockVertsNormals(); } // else, collect non-covered points else { src.lockVertsNormals(); NewPointsCounter counter; counter.n = num_points; counter.step = step; counter.origion = m_volume->getOrigion(); counter.key_gridRes = m_nodesGridSize; counter.key_invGridSize = 1.f / m_param.warp_radius_search_epsilon; counter.vol_invVoxelSize = 1.f / m_volume->getVoxelSize(); counter.vol_res = m_volume->getResolution(); counter.inv_search_radius_sqr = 1.f / (m_param.warp_radius_search_epsilon * m_param.warp_radius_search_epsilon); counter.input_points = src.verts(); counter.out_points = m_meshPointsSorted.ptr(); counter.out_keys = m_meshPointsKey.ptr(); counter.knnTex = getKnnFieldTexture(); counter.nodesDqVwTex = getNodesDqVwTexture(); counter.nodesDqVw = getNodesDqVwPtr(0); counter.numNodes = m_numNodes[0]; dim3 block1(NewPointsCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_newPoints_kernel << <grid1, block1 >> >(counter); cudaSafeCall(cudaGetLastError(), "get_newPoints_kernel"); cudaSafeCall(cudaDeviceSynchronize(), "get_newPoints_kernel sync"); cudaSafeCall(cudaMemcpyFromSymbol(&num_points, newPoints_output_count, sizeof(int)), "get_newPoints_kernel memcpy from symbol"); src.unlockVertsNormals(); }// end else if (num_points == 0) return; // sort thrust_wrapper::sort_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points); // segment scan thrust_wrapper::inclusive_scan_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr(), num_points); // compact ValidVoxelCounter counter; counter.counts = m_meshPointsFlags.ptr(); counter.key_sorted = m_meshPointsKey.ptr(); counter.n = num_points; counter.weight_thre = m_param.warp_valid_point_num_each_node; counter.points_scaned = m_meshPointsSorted.ptr(); { dim3 block1(ValidVoxelCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_validVoxel_kernel << <grid1, block1 >> >(counter); cudaSafeCall(cudaGetLastError(), "get_validVoxel_kernel"); cudaSafeCall(cudaDeviceSynchronize(), "get_validVoxel_kernel sync"); } int num_after_compact = 0; cudaSafeCall(cudaMemcpyFromSymbol(&num_after_compact, validVoxel_output_count, sizeof(int)), "copy voxel count from symbol"); if (num_after_compact == 0 && m_lastNumNodes[0] == 0) num_after_compact = 1; // at least one point needed. m_numNodes[0] = min(m_lastNumNodes[0] + num_after_compact, MaxNodeNum); if (num_after_compact + m_lastNumNodes[0] > MaxNodeNum) printf("warning: too many nodes %d vs %d\n", num_after_compact + m_lastNumNodes[0], MaxNodeNum); if (m_numNodes[0] > m_lastNumNodes[0]) { dim3 block(256); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[0] - m_lastNumNodes[0], block.x); NodesWriter nw; nw.points_not_compact = m_meshPointsSorted.ptr(); nw.index = m_meshPointsFlags.ptr(); nw.nodesDqVw = getNodesDqVwPtr(0) + m_lastNumNodes[0] * 3; nw.num = m_numNodes[0] - m_lastNumNodes[0]; nw.inv_weight_radius = 1.f / m_param.warp_param_dw; nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); write_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(cudaGetLastError(), "write_nodes_kernel"); } } #pragma endregion #pragma region --update ann field __global__ void seperate_xyz_nodes(const float4* nodesDqVw, float* x, float* y, float* z, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) { float4 dqVw = nodesDqVw[tid * 3 + 2]; x[tid] = dqVw.x; y[tid] = dqVw.y; z[tid] = dqVw.z; } } __global__ void collect_aabb_box_kernel(float4* aabb_min, float4* aabb_max, const float* x, const float* y, const float* z, int n) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid == 0) { aabb_min[0] = make_float4(x[0], y[0], z[0], 0); aabb_max[0] = make_float4(x[n-1], y[n - 1], z[n - 1], 0); } } __global__ void bruteforce_updateKnn_kernel(cudaTextureObject_t nodesDqVwTex, cudaSurfaceObject_t knnSurf, int3 res, int newNodesBegin, int newNodesEnd, float3 origion, float voxelSize, int maxK) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int z = threadIdx.z + blockIdx.z*blockDim.z; if (x < res.x && y < res.y && z < res.z) { // compute all 4 dists stored KnnIdx knn = read_knn_surf(knnSurf, x, y, z); float3 voxelPos = origion + voxelSize*make_float3(x, y, z); float oldDists2[KnnK]; for (int k = 0; k < maxK; k++) { float4 p; tex1Dfetch(&p, nodesDqVwTex, knn_k(knn, k)*3 + 2); oldDists2[k] = norm2(make_float3(p.x, p.y, p.z) - voxelPos); } // update new nodes for (int iNode = newNodesBegin; iNode < newNodesEnd; iNode++) { float4 p; tex1Dfetch(&p, nodesDqVwTex, iNode * 3 + 2); float newDist2 = norm2(make_float3(p.x, p.y, p.z) - voxelPos); // we swap the farest nodes out // note that the knn is kept sorted int swapPos = maxK; for (int k = 0; k < maxK; k++) { if (newDist2 < oldDists2[k]) { swapPos = k; break; } } if (swapPos < maxK) { KnnIdx newKnn = knn; knn_k(newKnn, swapPos) = iNode; for (int k = swapPos + 1; k < maxK; k++) knn_k(newKnn, k) = knn_k(knn, k - 1); write_knn(newKnn, knnSurf, x, y, z); } }// end for iNode } } void WarpField::updateAnnField() { float3 origion = m_volume->getOrigion(); int3 res = m_volume->getResolution(); float vsz = m_volume->getVoxelSize(); // if 1st frame, then perform whole-volume search, which is slow if (m_lastNumNodes[0] == 0) { m_nodeTree[0]->buildTree(m_nodesQuatTransVw.ptr() + 2, m_numNodes[0], 3); cudaSurfaceObject_t surf = getKnnFieldSurface(); m_nodeTree[0]->knnSearchGpu(surf, make_int3(0, 0, 0), res, origion, vsz, m_param.warp_knn_k_eachlevel[0]); } // else, collect voxels around the new added node and then perform sub-volume searching else { int nNewNodes = m_numNodes[0] - m_lastNumNodes[0]; #if 0 // 1st step, collect bounding box of new nodes to avoid additional computation float* xptr = m_tmpBuffer.ptr() + nNewNodes; float* yptr = xptr + nNewNodes; float* zptr = yptr + nNewNodes; if (nNewNodes) { dim3 block(32); dim3 grid(divUp(nNewNodes, block.x)); seperate_xyz_nodes << <grid, block >> >(getNodesDqVwPtr(0) + m_lastNumNodes[0] * 3, xptr, yptr, zptr, nNewNodes); cudaSafeCall(cudaGetLastError(), "seperate_xyz_nodes"); } modergpu_wrapper::mergesort(xptr, nNewNodes); modergpu_wrapper::mergesort(yptr, nNewNodes); modergpu_wrapper::mergesort(zptr, nNewNodes); // bounding box info float4 box[2]; { dim3 block(1); dim3 grid(1); collect_aabb_box_kernel << <grid, block >> >( m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr() + 1, xptr, yptr, zptr, nNewNodes); cudaSafeCall(cudaGetLastError(), "collect_aabb_box_kernel"); cudaSafeCall(cudaMemcpy(box, m_meshPointsSorted.ptr(), 2 * sizeof(float4), cudaMemcpyDeviceToHost)); } // convert to volume index int3 begin = make_int3((box[0].x - origion.x) / vsz, (box[0].y - origion.y) / vsz, (box[0].z - origion.z) / vsz); int3 end = make_int3((box[1].x - origion.x) / vsz + 1, (box[1].y - origion.y) / vsz + 1, (box[1].z - origion.z) / vsz + 1); int ext = ceil(m_param.warp_param_dw / vsz); begin.x = min(res.x - 1, max(0, begin.x - ext)); begin.y = min(res.y - 1, max(0, begin.y - ext)); begin.z = min(res.z - 1, max(0, begin.z - ext)); end.x = max(1, min(res.x, end.x + ext)); end.y = max(1, min(res.y, end.y + ext)); end.z = max(1, min(res.z, end.z + ext)); // perform knn search on the sub volume m_nodeTree[0]->buildTree(m_nodesQuatTransVw.ptr() + 2, m_numNodes[0], 3); cudaSurfaceObject_t surf = bindKnnFieldSurface(); m_nodeTree[0]->knnSearchGpu(surf, begin, end, origion, vsz, KnnK); //m_nodeTree[0]->knnSearchGpu(surf, make_int3(0,0,0), res, origion, vsz, KnnK); unBindKnnFieldSurface(surf); #else //tranverse each voxel to update if (nNewNodes > 0) { int3 res = m_volume->getResolution(); float3 origion = m_volume->getOrigion(); float vsz = m_volume->getVoxelSize(); dim3 block(32, 8, 2); dim3 grid(divUp(res.x, block.x), divUp(res.y, block.y), divUp(res.z, block.z)); cudaSurfaceObject_t surf = getKnnFieldSurface(); cudaTextureObject_t tex = getNodesDqVwTexture(); bruteforce_updateKnn_kernel << <grid, block >> >( tex, surf, res, m_lastNumNodes[0], m_numNodes[0], origion, vsz, m_param.warp_knn_k_eachlevel[0]); cudaSafeCall(cudaGetLastError(), "bruteforce_updateKnn_kernel"); } #endif } } #pragma endregion #pragma region remove small graph components struct sort_int2_less { bool operator()(const int2& left, const int2& right)const { return (left.x < right.x) || (left.x == right.x && left.y < right.y); } }; __global__ void copy_nodes_kernel(float4* dst, const float4* src, const int* idxMap, int nSrc) { int iSrc = threadIdx.x + blockIdx.x * blockDim.x; if (iSrc < nSrc) { int iDst = idxMap[iSrc]; if (iDst >= 0) { for (int k = 0; k < 3; k++) dst[iDst * 3 + k] = src[iSrc * 3 + k]; } } } void WarpField::remove_small_graph_components() { // we only perform removal for single-level graph if (!m_param.graph_single_level || m_numNodes[0] <= 1 || m_param.graph_remove_small_components_ratio >= 1.f || m_numNodes[0] == m_lastNumNodes[0]) return; std::vector<KnnIdx> knnGraph(m_numNodes[0]); cudaSafeCall(cudaMemcpy(knnGraph.data(), m_nodesGraph.ptr(), m_numNodes[0] * sizeof(KnnIdx), cudaMemcpyDeviceToHost), "WarpField::remove_small_graph_components, cudaMemcpy1"); std::vector<int2> edges; edges.reserve(knnGraph.size() * KnnK); for (int i = 0; i < knnGraph.size(); i++) { KnnIdx knn = knnGraph[i]; for (int k = 0; k < KnnK; k++) { int nb = knn_k(knn, k); if (nb < m_numNodes[0]) { edges.push_back(make_int2(i, nb)); edges.push_back(make_int2(nb, i)); } }// k }// i std::sort(edges.begin(), edges.end(), sort_int2_less()); std::vector<int> edgeHeader(m_numNodes[0] + 1, 0); for (int i = 1; i < edges.size(); i++) { if (edges[i].x != edges[i - 1].x) edgeHeader[edges[i].x] = i; } edgeHeader[m_numNodes[0]] = edges.size(); // find indepedent components std::set<int> verts; for (int i = 0; i < m_numNodes[0]; i++) verts.insert(i); std::vector<int> componentsSize; std::vector<int> componentsFlag(m_numNodes[0], -1); while (!verts.empty()) { componentsSize.push_back(0); int& cpSz = componentsSize.back(); auto set_iter = verts.begin(); std::queue<int> queue; queue.push(*set_iter); verts.erase(set_iter); while (!queue.empty()) { const int v = queue.front(); queue.pop(); cpSz++; componentsFlag[v] = componentsSize.size() - 1; for (int i = edgeHeader[v]; i < edgeHeader[v + 1]; i++) { const int v1 = edges[i].y; set_iter = verts.find(v1); if (set_iter != verts.end()) { queue.push(v1); verts.erase(set_iter); } }// end for i }// end while }// end while verts // if only one components, then nothing to remove if (componentsSize.size() <= 1) return; // find idx that map origional nodes to removed nodes set const int thre = std::lroundf(m_param.graph_remove_small_components_ratio * m_numNodes[0]); std::set<int> componentsToRemove; for (int i = 0; i < componentsSize.size(); i++) if (componentsSize[i] < thre) componentsToRemove.insert(i); if (componentsToRemove.size() == 0) return; int totalIdx = 0; std::vector<int> idxMap(componentsFlag.size()); for (int i = 0; i < componentsFlag.size(); i++) { if (componentsToRemove.find(componentsFlag[i]) != componentsToRemove.end()) { idxMap[i] = -1; if (i < m_lastNumNodes[0]) { //printf("illegal: %d < %d, current: %d\n", i, m_lastNumNodes[0], m_numNodes[0]); //throw std::exception("error in removing small components, last nodes not illegal!"); idxMap[i] = totalIdx++; } } else idxMap[i] = totalIdx++; } // if (m_meshPointsKey.size() < m_numNodes[0]) m_meshPointsKey.create(m_numNodes[0] * 1.5); if (m_meshPointsSorted.size() < m_numNodes[0] * 3) m_meshPointsSorted.create(m_numNodes[0] * 3 * 1.5); cudaSafeCall(cudaMemcpy(m_meshPointsSorted, m_nodesQuatTransVw, m_numNodes[0] * sizeof(float4)* 3, cudaMemcpyDeviceToDevice), "WarpField::remove_small_graph_components, cudaMemcpy2"); cudaSafeCall(cudaMemcpy(m_meshPointsKey, idxMap.data(), m_numNodes[0] * sizeof(int), cudaMemcpyHostToDevice), "WarpField::remove_small_graph_components, cudaMemcpy3"); copy_nodes_kernel << <divUp(m_numNodes[0], 256), 256 >> >(m_nodesQuatTransVw, m_meshPointsSorted, m_meshPointsKey, m_numNodes[0]); cudaSafeCall(cudaGetLastError(), "WarpField::remove_small_graph_components, copy nodes"); printf("Nodes Removal: %d -> %d, last=%d\n", m_numNodes[0], totalIdx, m_lastNumNodes[0]); m_numNodes[0] = totalIdx; updateGraph_singleLevel(); } #pragma endregion #pragma region --update graph void WarpField::updateGraph(int level) { if (level == 0) throw std::exception("called an invalid level function\n"); int num_points = m_numNodes[level - 1]; if (num_points == 0) { m_numNodes[level] = 0; return; } // re-define structure only if lv0 structure changed=============================== if (m_lastNumNodes[0] != m_numNodes[0]) { // reset symbols int zero_mem_symbol = 0; cudaMemcpyToSymbol(newPoints_global_count, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(newPoints_blocks_done, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(validVoxel_global_count, &zero_mem_symbol, sizeof(int)); cudaMemcpyToSymbol(validVoxel_blocks_done, &zero_mem_symbol, sizeof(int)); cudaSafeCall(cudaDeviceSynchronize(), "set zero: new point"); float radius = m_param.warp_radius_search_epsilon * pow(m_param.warp_radius_search_beta, level); { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(num_points, block.x << 3); // copy to new buffer and generate sort key pointToKey_kernel << <grid, block >> >( getNodesDqVwPtr(level - 1) + 2, m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points, 3, 1.f / radius, m_volume->getOrigion(), m_nodesGridSize); cudaSafeCall(cudaGetLastError(), "pointToKey_kernel lv"); } if (num_points == 0) return; // sort thrust_wrapper::sort_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), num_points); // segment scan thrust_wrapper::inclusive_scan_by_key(m_meshPointsKey.ptr(), m_meshPointsSorted.ptr(), m_meshPointsSorted.ptr(), num_points); // compact ValidVoxelCounter counter; counter.counts = m_meshPointsFlags.ptr(); counter.key_sorted = m_meshPointsKey.ptr(); counter.n = num_points; counter.weight_thre = 1; counter.points_scaned = m_meshPointsSorted.ptr(); if (num_points) { dim3 block1(ValidVoxelCounter::CTA_SIZE); dim3 grid1(divUp(num_points, block1.x)); get_validVoxel_kernel << <grid1, block1 >> >(counter); cudaSafeCall(cudaGetLastError(), "get_validVoxel_kernel lv"); cudaSafeCall(cudaDeviceSynchronize(), "get_validVoxel_kernel lv sync"); } int num_after_compact = 0; cudaSafeCall(cudaMemcpyFromSymbol(&num_after_compact, validVoxel_output_count, sizeof(int)), "copy voxel count from symbol"); m_numNodes[level] = min(num_after_compact, MaxNodeNum); if (num_after_compact > MaxNodeNum) printf("warning: too many nodes %d vs %d in level\n", num_after_compact + m_lastNumNodes[0], MaxNodeNum, level); // write level nodes if (m_numNodes[level] > 0) { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[level], block.x); NodesWriter nw; nw.points_not_compact = m_meshPointsSorted.ptr(); nw.index = m_meshPointsFlags.ptr(); nw.nodesDqVw = getNodesDqVwPtr(level); nw.num = m_numNodes[level]; nw.inv_weight_radius = 1.f / (m_param.warp_param_dw*pow(m_param.warp_radius_search_beta, level)); nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); write_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(cudaGetLastError(), "write_nodes_kernel"); } // build graph if (m_numNodes[level] > 0) { m_nodeTree[level]->buildTree(getNodesDqVwPtr(level) + 2, m_numNodes[level], 3); dim3 block1(256); dim3 grid1(divUp(getNumNodesInLevel(level-1)*KnnK, block1.x)); initKnnFieldKernel1 << <grid1, block1 >> >(getNodesEdgesPtr(level - 1), getNumNodesInLevel(level - 1)*KnnK); cudaSafeCall(cudaGetLastError(), "initKnnFieldKernel1-1"); m_nodeTree[level]->knnSearchGpu(getNodesDqVwPtr(level - 1) + 2, 3, (KnnIdxType*)getNodesEdgesPtr(level - 1), nullptr, m_param.warp_knn_k_eachlevel[level], getNumNodesInLevel(level - 1), KnnK); } }// end if (m_lastNumNodes[0] != m_numNodes[0]) else if (m_numNodes[level])// else we only update the graph quaternions { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[level], block.x); NodesWriter nw; nw.nodesDqVw = getNodesDqVwPtr(level); nw.num = m_numNodes[level]; nw.inv_weight_radius = 1.f / (m_param.warp_param_dw*pow(m_param.warp_param_dw_lvup_scale, level)); nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); update_nodes_dq_assume_compact_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(cudaGetLastError(), "update_nodes_dq_assume_compact_nodes_kernel"); }// end else (m_lastNumNodes[0] == m_numNodes[0]) } void WarpField::updateGraph_singleLevel() { // build graph if (m_lastNumNodes[0] != m_numNodes[0]) { m_nodeTree[0]->buildTree(getNodesDqVwPtr(0) + 2, m_numNodes[0], 3); dim3 block1(256); dim3 grid1(divUp(getNumNodesInLevel(0)*KnnK, block1.x)); initKnnFieldKernel1 << <grid1, block1 >> >(getNodesEdgesPtr(0), getNumNodesInLevel(0)*KnnK); cudaSafeCall(cudaGetLastError(), "initKnnFieldKernel1-1"); m_nodeTree[0]->knnSearchGpu(getNodesDqVwPtr(0) + 2, 3, (KnnIdxType*)getNodesEdgesPtr(0), nullptr, m_param.warp_knn_k_eachlevel[1], getNumNodesInLevel(0), KnnK, m_param.graph_single_level); } else if (m_numNodes[0])// else we only update the graph quaternions { dim3 block(32); dim3 grid(1, 1, 1); grid.x = divUp(m_numNodes[0], block.x); NodesWriter nw; nw.nodesDqVw = getNodesDqVwPtr(0); nw.num = m_numNodes[0]; nw.inv_weight_radius = 1.f / m_param.warp_param_dw; nw.origion = m_volume->getOrigion(); nw.invVoxelSize = 1.f / m_volume->getVoxelSize(); nw.knnTex = getKnnFieldTexture(); nw.nodesDqVwTex = getNodesDqVwTexture(); update_nodes_dq_assume_compact_nodes_kernel << <grid, block >> >(nw); cudaSafeCall(cudaGetLastError(), "update_nodes_dq_assume_compact_nodes_kernel"); }// end else (m_lastNumNodes[0] == m_numNodes[0]) } #pragma endregion #pragma region --extract_for_vmap struct IdxContainter { int id[WarpField::GraphLevelNum+1]; __device__ __host__ int& operator [](int i) { return id[i]; } }; __global__ void extract_knn_for_vmap_kernel(PtrStepSz<float4> vmap, PtrStepSz<KnnIdx> vmapKnn, float3 origion, float invVoxelSize, cudaTextureObject_t knnTex, IdxContainter ic) { int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if (u < vmap.cols && v < vmap.rows) { float3 p = GpuMesh::from_point(vmap(v, u)); KnnIdx knnIdx = make_knn(ic[WarpField::GraphLevelNum]); if (!isnan(p.x)) { float3 p1 = (p - origion)*invVoxelSize; int x = int(p1.x); int y = int(p1.y); int z = int(p1.z); knnIdx = read_knn_tex(knnTex, x, y, z); for (int k = 0; k < KnnK; k++) { if (knn_k(knnIdx, k) >= WarpField::MaxNodeNum) knn_k(knnIdx, k) = ic[WarpField::GraphLevelNum]; } } vmapKnn(v, u) = knnIdx; } } void WarpField::extract_knn_for_vmap(const MapArr& vmap, DeviceArray2D<KnnIdx>& vmapKnn)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; vmapKnn.create(vmap.rows(), vmap.cols()); dim3 block(32, 8); dim3 grid(divUp(vmap.cols(), block.x), divUp(vmap.rows(), block.y)); cudaTextureObject_t knnTex = getKnnFieldTexture(); extract_knn_for_vmap_kernel << <grid, block >> >(vmap, vmapKnn, m_volume->getOrigion(), 1.f / m_volume->getVoxelSize(), knnTex, ic); cudaSafeCall(cudaGetLastError(), "extract_knn_for_vmap_kernel"); } __global__ void extract_nodes_info_kernel(const float4* nodesDqVw, float* twist, float4* vw, const KnnIdx* nodesKnnIn, KnnIdx* nodesKnnOut, IdxContainter ic, bool single_graph_level) { int iout = blockIdx.x * blockDim.x + threadIdx.x; if (iout >= ic[WarpField::GraphLevelNum]) return; int level = 0; for (int k = 0; k < WarpField::GraphLevelNum; k++) if (iout >= ic[k] && iout < ic[k + 1]) { level = k; break; } int iin = level*WarpField::MaxNodeNum + iout - ic[level]; // write twist Tbx::Dual_quat_cu dq = pack_dual_quat(nodesDqVw[iin * 3], nodesDqVw[iin * 3 + 1]); Tbx::Vec3 r, t; dq.to_twist(r, t); twist[iout * 6 + 0] = r.x; twist[iout * 6 + 1] = r.y; twist[iout * 6 + 2] = r.z; twist[iout * 6 + 3] = t.x; twist[iout * 6 + 4] = t.y; twist[iout * 6 + 5] = t.z; vw[iout] = nodesDqVw[iin * 3 + 2]; // write knn KnnIdx kid = nodesKnnIn[iin]; for (int k = 0; k < KnnK; k++) { if (!single_graph_level) knn_k(kid, k) = (knn_k(kid, k) < ic[level + 1] - ic[level] ? knn_k(kid, k) + ic[level + 1] : ic[WarpField::GraphLevelNum]); else knn_k(kid, k) = (knn_k(kid, k) < WarpField::MaxNodeNum ? knn_k(kid, k) : ic[WarpField::GraphLevelNum]); } nodesKnnOut[iout] = kid; } void WarpField::extract_nodes_info(DeviceArray<KnnIdx>& nodesKnn, DeviceArray<float>& twist, DeviceArray<float4>& vw)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (ic[GraphLevelNum] == 0) return; nodesKnn.create(ic[GraphLevelNum]); twist.create(ic[GraphLevelNum] * 6); vw.create(ic[GraphLevelNum]); extract_nodes_info_no_allocation(nodesKnn, twist, vw); } void WarpField::extract_nodes_info_no_allocation( DeviceArray<KnnIdx>& nodesKnn, DeviceArray<float>& twist, DeviceArray<float4>& vw)const { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (ic[GraphLevelNum] == 0) return; dim3 block(256); dim3 grid(divUp(ic[GraphLevelNum], block.x)); extract_nodes_info_kernel << <grid, block >> >(getNodesDqVwPtr(0), twist.ptr(), vw.ptr(), getNodesEdgesPtr(0), nodesKnn.ptr(), ic, m_param.graph_single_level); cudaSafeCall(cudaGetLastError(), "extract_nodes_info_kernel"); } __global__ void update_nodes_via_twist_kernel(float4* nodesDqVw, const float* twist, IdxContainter ic) { int iout = blockIdx.x * blockDim.x + threadIdx.x; if (iout >= ic[WarpField::GraphLevelNum]) return; int level = 0; for (int k = 0; k < WarpField::GraphLevelNum; k++) if (iout >= ic[k] && iout < ic[k + 1]) { level = k; break; } int iin = level*WarpField::MaxNodeNum + iout - ic[level]; // write twist Tbx::Vec3 r, t; r.x = twist[iout * 6 + 0]; r.y = twist[iout * 6 + 1]; r.z = twist[iout * 6 + 2]; t.x = twist[iout * 6 + 3]; t.y = twist[iout * 6 + 4]; t.z = twist[iout * 6 + 5]; Tbx::Dual_quat_cu dq; dq.from_twist(r, t); unpack_dual_quat(dq, nodesDqVw[iin * 3], nodesDqVw[iin * 3 + 1]); } void WarpField::update_nodes_via_twist(const DeviceArray<float>& twist) { IdxContainter ic; ic[0] = 0; for (int k = 0; k < GraphLevelNum; k++) ic[k + 1] = ic[k] + m_numNodes[k]; if (twist.size() < ic[GraphLevelNum]*6) throw std::exception("size not matched in WarpField::update_nodes_via_twist()"); dim3 block(256); dim3 grid(divUp(ic[GraphLevelNum], block.x)); update_nodes_via_twist_kernel << <grid, block >> >(getNodesDqVwPtr(0), twist.ptr(), ic); cudaSafeCall(cudaGetLastError(), "update_nodes_via_twist"); } #pragma endregion #pragma region --getKnnAt __global__ void getKnnAtKernel(KnnIdx* data, int3 p, cudaTextureObject_t tex) { data[0] = read_knn_tex(tex, p.x, p.y, p.z); } KnnIdx WarpField::getKnnAt(float3 volumePos)const { if (m_volume == nullptr) throw std::exception("WarpField::getKnnAt(): null pointer"); float3 ori = m_volume->getOrigion(); float vsz = m_volume->getVoxelSize(); float3 p = (volumePos - ori) / vsz; return getKnnAt(make_int3(p.x, p.y, p.z)); } KnnIdx WarpField::getKnnAt(int3 gridXYZ)const { if (m_volume == nullptr) throw std::exception("WarpField::getKnnAt(): null pointer"); int3 res = m_volume->getResolution(); int x = gridXYZ.x, y = gridXYZ.y, z = gridXYZ.z; if (x < 0 || y < 0 || z < 0 || x >= res.x || y >= res.y || z >= res.z) return make_knn(MaxNodeNum); static DeviceArray<KnnIdx> knn; knn.create(1); cudaTextureObject_t tex = getKnnFieldTexture(); getKnnAtKernel << <dim3(1), dim3(1) >> >(knn.ptr(), gridXYZ, tex); cudaSafeCall(cudaGetLastError(), "WarpField::getKnnAtKernel"); KnnIdx host; cudaSafeCall(cudaMemcpy(&host, knn.ptr(), sizeof(KnnIdx), cudaMemcpyDeviceToHost), "WarpField::getKnnAtKernel, post copy"); return host; } #pragma endregion }
af933b5a202b029adb25df26a44ded9874bd5065.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <hip/hip_vector_types.h> #include <optix_device.h> #include "optixWhitted.h" #include "random.h" #include "helpers.h" #include <cuda/helpers.h> extern "C" { __constant__ Params params; } extern "C" __global__ void __raygen__pinhole_camera() { const uint3 idx = optixGetLaunchIndex(); const uint3 dim = optixGetLaunchDimensions(); const CameraData* camera = (CameraData*) optixGetSbtDataPointer(); const unsigned int image_index = params.width * idx.y + idx.x; unsigned int seed = tea<16>( image_index, params.subframe_index ); // Subpixel jitter: send the ray through a different position inside the pixel each time, // to provide antialiasing. The center of each pixel is at fraction (0.5,0.5) float2 subpixel_jitter = params.subframe_index == 0 ? make_float2(0.5f, 0.5f) : make_float2(rnd( seed ), rnd( seed )); float2 d = ((make_float2(idx.x, idx.y) + subpixel_jitter) / make_float2(params.width, params.height)) * 2.f - 1.f; float3 ray_origin = camera->eye; float3 ray_direction = normalize(d.x*camera->U + d.y*camera->V + camera->W); RadiancePRD prd; prd.importance = 1.f; prd.depth = 0; optixTrace( params.handle, ray_origin, ray_direction, params.scene_epsilon, 1e16f, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, RAY_TYPE_COUNT, RAY_TYPE_RADIANCE, float3_as_args(prd.result), reinterpret_cast<unsigned int&>(prd.importance), reinterpret_cast<unsigned int&>(prd.depth) ); float4 acc_val = params.accum_buffer[image_index]; if( params.subframe_index > 0 ) { acc_val = lerp( acc_val, make_float4( prd.result, 0.f), 1.0f / static_cast<float>( params.subframe_index+1 ) ); } else { acc_val = make_float4(prd.result, 0.f); } params.frame_buffer[image_index] = make_color( acc_val ); params.accum_buffer[image_index] = acc_val; }
af933b5a202b029adb25df26a44ded9874bd5065.cu
// // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <vector_types.h> #include <optix_device.h> #include "optixWhitted.h" #include "random.h" #include "helpers.h" #include <cuda/helpers.h> extern "C" { __constant__ Params params; } extern "C" __global__ void __raygen__pinhole_camera() { const uint3 idx = optixGetLaunchIndex(); const uint3 dim = optixGetLaunchDimensions(); const CameraData* camera = (CameraData*) optixGetSbtDataPointer(); const unsigned int image_index = params.width * idx.y + idx.x; unsigned int seed = tea<16>( image_index, params.subframe_index ); // Subpixel jitter: send the ray through a different position inside the pixel each time, // to provide antialiasing. The center of each pixel is at fraction (0.5,0.5) float2 subpixel_jitter = params.subframe_index == 0 ? make_float2(0.5f, 0.5f) : make_float2(rnd( seed ), rnd( seed )); float2 d = ((make_float2(idx.x, idx.y) + subpixel_jitter) / make_float2(params.width, params.height)) * 2.f - 1.f; float3 ray_origin = camera->eye; float3 ray_direction = normalize(d.x*camera->U + d.y*camera->V + camera->W); RadiancePRD prd; prd.importance = 1.f; prd.depth = 0; optixTrace( params.handle, ray_origin, ray_direction, params.scene_epsilon, 1e16f, 0.0f, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, RAY_TYPE_COUNT, RAY_TYPE_RADIANCE, float3_as_args(prd.result), reinterpret_cast<unsigned int&>(prd.importance), reinterpret_cast<unsigned int&>(prd.depth) ); float4 acc_val = params.accum_buffer[image_index]; if( params.subframe_index > 0 ) { acc_val = lerp( acc_val, make_float4( prd.result, 0.f), 1.0f / static_cast<float>( params.subframe_index+1 ) ); } else { acc_val = make_float4(prd.result, 0.f); } params.frame_buffer[image_index] = make_color( acc_val ); params.accum_buffer[image_index] = acc_val; }
045cf30c95c1e0b7d4cfb8094e05966e83b31614.hip
// !!! This is a file automatically generated by hipify!!! #include <windows.h> #include "avisynth.h" #include <hip/hip_runtime_api.h> #include <cuda_device_runtime_api.h> int nblocks(int n, int block) { return (n + block - 1) / block; } class InvertNeg : public GenericVideoFilter { public: InvertNeg(PClip _child, IScriptEnvironment* env); PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env); int __stdcall SetCacheHints(int cachehints, int frame_range); }; InvertNeg::InvertNeg(PClip _child, IScriptEnvironment* env) : GenericVideoFilter(_child) { if (!vi.IsPlanar() || !vi.IsYUV()) { env->ThrowError("InvertNeg: planar YUV data only!"); } } __global__ void InvertNegKernel( const unsigned char* srcp, unsigned char* dstp, int src_pitch, int dst_pitch, int row_size, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < row_size && y < height) { dstp[x + y * dst_pitch] = srcp[x + y * src_pitch] ^ 255; } } PVideoFrame __stdcall InvertNeg::GetFrame(int n, IScriptEnvironment* env_) { IScriptEnvironment2* env = static_cast<IScriptEnvironment2*>(env_); if (env->GetProperty(AEP_DEVICE_TYPE) != DEV_TYPE_CUDA) { env->ThrowError("InvertNeg: Only CUDA frame is supported."); } PVideoFrame src = child->GetFrame(n, env); PVideoFrame dst = env->NewVideoFrame(vi); int planes[] = { PLANAR_Y, PLANAR_V, PLANAR_U }; for (int p = 0; p<3; p++) { const unsigned char* srcp = src->GetReadPtr(planes[p]); unsigned char* dstp = dst->GetWritePtr(planes[p]); int src_pitch = src->GetPitch(planes[p]); int dst_pitch = dst->GetPitch(planes[p]); int row_size = dst->GetRowSize(planes[p]); int height = dst->GetHeight(planes[p]); dim3 threads(32, 16); dim3 blocks(nblocks(row_size, threads.x), nblocks(height, threads.y)); InvertNegKernel << <blocks, threads >> >(srcp, dstp, src_pitch, dst_pitch, row_size, height); } return dst; } int __stdcall InvertNeg::SetCacheHints(int cachehints, int frame_range) { if (cachehints == CACHE_GET_MTMODE) return MT_NICE_FILTER; if (cachehints == CACHE_GET_DEV_TYPE) return DEV_TYPE_CUDA; // Only CUDA is supported return 0; } AVSValue __cdecl Create_InvertNeg(AVSValue args, void* user_data, IScriptEnvironment* env) { return new InvertNeg(args[0].AsClip(), env); } const AVS_Linkage *AVS_linkage = 0; extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit3(IScriptEnvironment* env, const AVS_Linkage* const vectors) { AVS_linkage = vectors; env->AddFunction("InvertNeg", "c", Create_InvertNeg, 0); return "CUDA InvertNeg sample plugin"; }
045cf30c95c1e0b7d4cfb8094e05966e83b31614.cu
#include <windows.h> #include "avisynth.h" #include <cuda_runtime_api.h> #include <cuda_device_runtime_api.h> int nblocks(int n, int block) { return (n + block - 1) / block; } class InvertNeg : public GenericVideoFilter { public: InvertNeg(PClip _child, IScriptEnvironment* env); PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env); int __stdcall SetCacheHints(int cachehints, int frame_range); }; InvertNeg::InvertNeg(PClip _child, IScriptEnvironment* env) : GenericVideoFilter(_child) { if (!vi.IsPlanar() || !vi.IsYUV()) { env->ThrowError("InvertNeg: planar YUV data only!"); } } __global__ void InvertNegKernel( const unsigned char* srcp, unsigned char* dstp, int src_pitch, int dst_pitch, int row_size, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < row_size && y < height) { dstp[x + y * dst_pitch] = srcp[x + y * src_pitch] ^ 255; } } PVideoFrame __stdcall InvertNeg::GetFrame(int n, IScriptEnvironment* env_) { IScriptEnvironment2* env = static_cast<IScriptEnvironment2*>(env_); if (env->GetProperty(AEP_DEVICE_TYPE) != DEV_TYPE_CUDA) { env->ThrowError("InvertNeg: Only CUDA frame is supported."); } PVideoFrame src = child->GetFrame(n, env); PVideoFrame dst = env->NewVideoFrame(vi); int planes[] = { PLANAR_Y, PLANAR_V, PLANAR_U }; for (int p = 0; p<3; p++) { const unsigned char* srcp = src->GetReadPtr(planes[p]); unsigned char* dstp = dst->GetWritePtr(planes[p]); int src_pitch = src->GetPitch(planes[p]); int dst_pitch = dst->GetPitch(planes[p]); int row_size = dst->GetRowSize(planes[p]); int height = dst->GetHeight(planes[p]); dim3 threads(32, 16); dim3 blocks(nblocks(row_size, threads.x), nblocks(height, threads.y)); InvertNegKernel << <blocks, threads >> >(srcp, dstp, src_pitch, dst_pitch, row_size, height); } return dst; } int __stdcall InvertNeg::SetCacheHints(int cachehints, int frame_range) { if (cachehints == CACHE_GET_MTMODE) return MT_NICE_FILTER; if (cachehints == CACHE_GET_DEV_TYPE) return DEV_TYPE_CUDA; // Only CUDA is supported return 0; } AVSValue __cdecl Create_InvertNeg(AVSValue args, void* user_data, IScriptEnvironment* env) { return new InvertNeg(args[0].AsClip(), env); } const AVS_Linkage *AVS_linkage = 0; extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit3(IScriptEnvironment* env, const AVS_Linkage* const vectors) { AVS_linkage = vectors; env->AddFunction("InvertNeg", "c", Create_InvertNeg, 0); return "CUDA InvertNeg sample plugin"; }
927626216c4155276219c4b27228f2019c63004c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file is part of the JetBrains-Research/CFPQ-on-GPGPU project. // Project link https://github.com/JetBrains-Research/CFPQ-on-GPGPU // Project is licensed under MIT license. // License link https://github.com/JetBrains-Research/CFPQ-on-GPGPU/blob/master/LICENSE // // This source code files are used to compare the performance of the // boolean matrix multiplication between two dense matrix multiplications implementations. #include <iostream> #include <naive-gpu-shared/Multiplication.h> #include <naive-gpu-shared/Parameters.h> #define cuda_handle_error(ans) { gpuAssert((ans), __FILE__, __LINE__); } namespace naive_gpu_shared { using namespace gpu_lib; inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { std::cout << "GPU assert: " << hipGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) { exit(code); } } } __device__ bool matrix_was_changed; int gpu_lib::rows(int N) { return N / TYPE_SIZE + (N % TYPE_SIZE ? 1 : 0); } int gpu_lib::cols(int N) { return N; } inline size_t matrix_memsize(int N) { return rows(N) * cols(N) * sizeof(TYPE); } __device__ TYPE row_column_product(TYPE *A, TYPE *B, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int rows = cols / TYPE_SIZE + (cols % TYPE_SIZE ? 1 : 0); int row_start = blockIdx.y * cols; __shared__ TYPE A_shared[THREADS_PER_BLOCK]; TYPE acc = 0; TYPE b_el; for (TYPE i = 0; i < rows; ++i) { if (i == (rows - 1) && x >= cols) { return 0; } if ((i % (THREADS_PER_BLOCK / TYPE_SIZE)) == 0) { A_shared[threadIdx.x] = A[row_start + i * TYPE_SIZE + threadIdx.x]; if (THREADS_PER_BLOCK > 32) { __syncthreads(); } } __syncthreads(); b_el = B[i * cols + x]; #pragma unroll for (TYPE b = 0; b < TYPE_SIZE; ++b) { if (b_el & 1) { acc |= A_shared[(i % (THREADS_PER_BLOCK / TYPE_SIZE)) * TYPE_SIZE + b]; } b_el >>= 1; } } return acc; } __device__ void or_value(TYPE *M, TYPE val) { TYPE old_value = *M; if (old_value != (val | old_value)) { matrix_was_changed = true; *M = val | old_value; } } __global__ void matrix_product_add(TYPE *A, TYPE *B, TYPE *C, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int row_start = blockIdx.y * cols; TYPE acc = row_column_product(A, B, cols); if (acc == 0) return; or_value(&C[row_start + x], acc); } __global__ void matrix_product(TYPE *A, TYPE *B, TYPE *C, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int row_start = blockIdx.y * cols; TYPE acc = row_column_product(A, B, cols); if (acc == 0) return; C[row_start + x] = acc; } __global__ void matrix_add_to_left(TYPE *A, TYPE *B, int cols) { int index = blockIdx.y * cols + blockIdx.x * blockDim.x + threadIdx.x; if ((blockIdx.x * blockDim.x + threadIdx.x) >= cols) return; or_value(&A[index], B[index]); } void gpu_lib::synchronize() { hipDeviceSynchronize(); } void set_value(int N, TYPE *d_M, int val) { cuda_handle_error(hipMemset(d_M, val, matrix_memsize(N))); } TYPE *gpu_lib::device_matrix_alloc(int N) { TYPE *d_M; cuda_handle_error(hipMalloc(reinterpret_cast<void **>(&d_M), matrix_memsize(N))); return d_M; } void gpu_lib::device_matrix_dealloc(TYPE *M) { cuda_handle_error(hipFree(M)); } TYPE *gpu_lib::host_matrix_calloc(int N) { TYPE *M; cuda_handle_error(hipHostMalloc(reinterpret_cast<void **>(&M), matrix_memsize(N))); set_value(N, M, 0); return M; } void gpu_lib::host_matrix_dealloc(TYPE *M) { cuda_handle_error(hipHostFree(M)); } void gpu_lib::gpu_to_cpu_transfer_async(int N, TYPE *d_M, TYPE *h_M) { cuda_handle_error(hipMemcpyAsync(h_M, d_M, matrix_memsize(N), hipMemcpyDeviceToHost)); } void gpu_lib::cpu_to_gpu_transfer_async(int N, TYPE *h_M, TYPE *d_M) { cuda_handle_error(hipMemcpyAsync(d_M, h_M, matrix_memsize(N), hipMemcpyHostToDevice)); } void set_flag() { bool flag = false; cuda_handle_error(hipMemcpyToSymbol(matrix_was_changed, &flag, sizeof(bool))); } bool get_flag() { bool flag; cuda_handle_error(hipMemcpyFromSymbol(&flag, matrix_was_changed, sizeof(bool))); return flag; } bool gpu_lib::matrix_product_add_wrapper(TYPE *A, TYPE *B, TYPE *C, int N, TYPE *tmp_matrix) { bool safe = (A == C) || (B == C); dim3 threads(THREADS_PER_BLOCK); dim3 blocks(cols(N) / THREADS_PER_BLOCK + (cols(N) % THREADS_PER_BLOCK ? 1 : 0), rows(N)); set_flag(); if (safe) { hipLaunchKernelGGL(( matrix_product) , dim3(blocks), dim3(threads), 0, 0, A, B, tmp_matrix, cols(N)); synchronize(); cuda_handle_error(hipGetLastError()); hipLaunchKernelGGL(( matrix_add_to_left) , dim3(blocks), dim3(threads), 0, 0, C, tmp_matrix, cols(N)); synchronize(); cuda_handle_error(hipGetLastError()); } else { hipLaunchKernelGGL(( matrix_product_add) , dim3(blocks), dim3(threads), 0, 0, A, B, C, cols(N)); synchronize(); cuda_handle_error(hipGetLastError()); } return get_flag(); } }
927626216c4155276219c4b27228f2019c63004c.cu
// This file is part of the JetBrains-Research/CFPQ-on-GPGPU project. // Project link https://github.com/JetBrains-Research/CFPQ-on-GPGPU // Project is licensed under MIT license. // License link https://github.com/JetBrains-Research/CFPQ-on-GPGPU/blob/master/LICENSE // // This source code files are used to compare the performance of the // boolean matrix multiplication between two dense matrix multiplications implementations. #include <iostream> #include <naive-gpu-shared/Multiplication.h> #include <naive-gpu-shared/Parameters.h> #define cuda_handle_error(ans) { gpuAssert((ans), __FILE__, __LINE__); } namespace naive_gpu_shared { using namespace gpu_lib; inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { std::cout << "GPU assert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl; if (abort) { exit(code); } } } __device__ bool matrix_was_changed; int gpu_lib::rows(int N) { return N / TYPE_SIZE + (N % TYPE_SIZE ? 1 : 0); } int gpu_lib::cols(int N) { return N; } inline size_t matrix_memsize(int N) { return rows(N) * cols(N) * sizeof(TYPE); } __device__ TYPE row_column_product(TYPE *A, TYPE *B, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int rows = cols / TYPE_SIZE + (cols % TYPE_SIZE ? 1 : 0); int row_start = blockIdx.y * cols; __shared__ TYPE A_shared[THREADS_PER_BLOCK]; TYPE acc = 0; TYPE b_el; for (TYPE i = 0; i < rows; ++i) { if (i == (rows - 1) && x >= cols) { return 0; } if ((i % (THREADS_PER_BLOCK / TYPE_SIZE)) == 0) { A_shared[threadIdx.x] = A[row_start + i * TYPE_SIZE + threadIdx.x]; if (THREADS_PER_BLOCK > 32) { __syncthreads(); } } __syncthreads(); b_el = B[i * cols + x]; #pragma unroll for (TYPE b = 0; b < TYPE_SIZE; ++b) { if (b_el & 1) { acc |= A_shared[(i % (THREADS_PER_BLOCK / TYPE_SIZE)) * TYPE_SIZE + b]; } b_el >>= 1; } } return acc; } __device__ void or_value(TYPE *M, TYPE val) { TYPE old_value = *M; if (old_value != (val | old_value)) { matrix_was_changed = true; *M = val | old_value; } } __global__ void matrix_product_add(TYPE *A, TYPE *B, TYPE *C, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int row_start = blockIdx.y * cols; TYPE acc = row_column_product(A, B, cols); if (acc == 0) return; or_value(&C[row_start + x], acc); } __global__ void matrix_product(TYPE *A, TYPE *B, TYPE *C, int cols) { int x = blockIdx.x * blockDim.x + threadIdx.x; int row_start = blockIdx.y * cols; TYPE acc = row_column_product(A, B, cols); if (acc == 0) return; C[row_start + x] = acc; } __global__ void matrix_add_to_left(TYPE *A, TYPE *B, int cols) { int index = blockIdx.y * cols + blockIdx.x * blockDim.x + threadIdx.x; if ((blockIdx.x * blockDim.x + threadIdx.x) >= cols) return; or_value(&A[index], B[index]); } void gpu_lib::synchronize() { cudaDeviceSynchronize(); } void set_value(int N, TYPE *d_M, int val) { cuda_handle_error(cudaMemset(d_M, val, matrix_memsize(N))); } TYPE *gpu_lib::device_matrix_alloc(int N) { TYPE *d_M; cuda_handle_error(cudaMalloc(reinterpret_cast<void **>(&d_M), matrix_memsize(N))); return d_M; } void gpu_lib::device_matrix_dealloc(TYPE *M) { cuda_handle_error(cudaFree(M)); } TYPE *gpu_lib::host_matrix_calloc(int N) { TYPE *M; cuda_handle_error(cudaMallocHost(reinterpret_cast<void **>(&M), matrix_memsize(N))); set_value(N, M, 0); return M; } void gpu_lib::host_matrix_dealloc(TYPE *M) { cuda_handle_error(cudaFreeHost(M)); } void gpu_lib::gpu_to_cpu_transfer_async(int N, TYPE *d_M, TYPE *h_M) { cuda_handle_error(cudaMemcpyAsync(h_M, d_M, matrix_memsize(N), cudaMemcpyDeviceToHost)); } void gpu_lib::cpu_to_gpu_transfer_async(int N, TYPE *h_M, TYPE *d_M) { cuda_handle_error(cudaMemcpyAsync(d_M, h_M, matrix_memsize(N), cudaMemcpyHostToDevice)); } void set_flag() { bool flag = false; cuda_handle_error(cudaMemcpyToSymbol(matrix_was_changed, &flag, sizeof(bool))); } bool get_flag() { bool flag; cuda_handle_error(cudaMemcpyFromSymbol(&flag, matrix_was_changed, sizeof(bool))); return flag; } bool gpu_lib::matrix_product_add_wrapper(TYPE *A, TYPE *B, TYPE *C, int N, TYPE *tmp_matrix) { bool safe = (A == C) || (B == C); dim3 threads(THREADS_PER_BLOCK); dim3 blocks(cols(N) / THREADS_PER_BLOCK + (cols(N) % THREADS_PER_BLOCK ? 1 : 0), rows(N)); set_flag(); if (safe) { matrix_product <<<blocks, threads>>>(A, B, tmp_matrix, cols(N)); synchronize(); cuda_handle_error(cudaGetLastError()); matrix_add_to_left <<<blocks, threads>>>(C, tmp_matrix, cols(N)); synchronize(); cuda_handle_error(cudaGetLastError()); } else { matrix_product_add <<<blocks, threads>>>(A, B, C, cols(N)); synchronize(); cuda_handle_error(cudaGetLastError()); } return get_flag(); } }
7c4ccb8971e23c56aa1ef06476fc0b0515599510.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //This is our CUDA thread //d_a is the word list array //maxidx is the maximum index in the array (if there are more threads than words) //v1 through v4 are the uint values of the correct md5 hash __device__ void IncrementBruteGPU(unsigned char* ourBrute, uint charSetLen, uint bruteLength, uint incrementBy) { int i = 0; while(incrementBy > 0 && i < bruteLength) { int add = incrementBy + ourBrute[i]; ourBrute[i] = add % charSetLen; incrementBy = add / charSetLen; i++; } } __global__ void crack(uint numThreads, uint charSetLen, uint bruteLength, uint v1, uint v2, uint v3, uint v4) { //compute our index number uint idx = (blockIdx.x*blockDim.x + threadIdx.x); int totalLen = 0; int bruteStart = 0; unsigned char word[MAX_TOTAL]; unsigned char ourBrute[MAX_BRUTE_LENGTH]; int i = 0; for(i = 0; i < MAX_BRUTE_LENGTH; i++) { ourBrute[i] = cudaBrute[i]; } i = 0; int ary_i = 0; unsigned char tmp = 0; while((tmp = cudaLeftSalt[ary_i]) != 0) { word[i] = tmp; i++; ary_i++; } bruteStart = i; i+= bruteLength; ary_i = 0; while((tmp = cudaRightSalt[ary_i]) != 0) { word[i] = tmp; i++; ary_i++; } totalLen = i; IncrementBruteGPU(ourBrute, charSetLen, bruteLength, idx); int timer = 0; for(timer = 0; timer < MD5_PER_KERNEL; timer++) { //Now, substitute the values into the string for(i = 0; i < bruteLength; i++) { word[i+bruteStart] = cudaCharSet[ourBrute[i]]; } uint c1 = 0, c2 = 0, c3 = 0, c4 = 0; //get the md5 hash of the word md5_vfy(word,totalLen, &c1, &c2, &c3, &c4); //compare hash with correct hash if(c1 == v1 && c2 == v2 && c3 == v3 && c4 == v4) { //put the correct password in the first indexes of the array, right after the sentinal int j; for(j= 0; j < MAX_TOTAL; j++) { correctPass[j] = word[j]; } correctPass[totalLen] = 0; } IncrementBruteGPU(ourBrute, charSetLen, bruteLength, numThreads); } }
7c4ccb8971e23c56aa1ef06476fc0b0515599510.cu
//This is our CUDA thread //d_a is the word list array //maxidx is the maximum index in the array (if there are more threads than words) //v1 through v4 are the uint values of the correct md5 hash __device__ void IncrementBruteGPU(unsigned char* ourBrute, uint charSetLen, uint bruteLength, uint incrementBy) { int i = 0; while(incrementBy > 0 && i < bruteLength) { int add = incrementBy + ourBrute[i]; ourBrute[i] = add % charSetLen; incrementBy = add / charSetLen; i++; } } __global__ void crack(uint numThreads, uint charSetLen, uint bruteLength, uint v1, uint v2, uint v3, uint v4) { //compute our index number uint idx = (blockIdx.x*blockDim.x + threadIdx.x); int totalLen = 0; int bruteStart = 0; unsigned char word[MAX_TOTAL]; unsigned char ourBrute[MAX_BRUTE_LENGTH]; int i = 0; for(i = 0; i < MAX_BRUTE_LENGTH; i++) { ourBrute[i] = cudaBrute[i]; } i = 0; int ary_i = 0; unsigned char tmp = 0; while((tmp = cudaLeftSalt[ary_i]) != 0) { word[i] = tmp; i++; ary_i++; } bruteStart = i; i+= bruteLength; ary_i = 0; while((tmp = cudaRightSalt[ary_i]) != 0) { word[i] = tmp; i++; ary_i++; } totalLen = i; IncrementBruteGPU(ourBrute, charSetLen, bruteLength, idx); int timer = 0; for(timer = 0; timer < MD5_PER_KERNEL; timer++) { //Now, substitute the values into the string for(i = 0; i < bruteLength; i++) { word[i+bruteStart] = cudaCharSet[ourBrute[i]]; } uint c1 = 0, c2 = 0, c3 = 0, c4 = 0; //get the md5 hash of the word md5_vfy(word,totalLen, &c1, &c2, &c3, &c4); //compare hash with correct hash if(c1 == v1 && c2 == v2 && c3 == v3 && c4 == v4) { //put the correct password in the first indexes of the array, right after the sentinal int j; for(j= 0; j < MAX_TOTAL; j++) { correctPass[j] = word[j]; } correctPass[totalLen] = 0; } IncrementBruteGPU(ourBrute, charSetLen, bruteLength, numThreads); } }
eaeb3297e6588c6478b999aa78214254fa6e6c84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** renderbox2 - a physically based gpu renderer for research purposes Copyright (C) - 2014 - Srinath Ravichandran This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ // Application specific headers. #include <accelerators/sbvh/cudatracer.h> #include <core/intersection.h> #include <integrators/integrator_raycast.h> #include <util/cudatimer.h> // Cuda specific headers. // Standard c++ headers. namespace renderbox2 { // // Ray Cast Integrator update kernel. // __global__ void kernel_ray_cast_update( CameraSampleBuffer sample_buffer, IntersectionBuffer isect_buffer, MaterialBuffer material_buffer, const uint32_t* material_ids, RayCastShade shade_mode ) { for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < sample_buffer.m_size; tidx += gridDim.x * blockDim.x) { float4 color = make_float4(0.0f); float4 colors[15] = { make_float4(1.0f, 0.0f, 0.0f, 0.0f), // red make_float4(0.0f, 1.0f, 0.0f, 0.0f), // green make_float4(0.0f, 0.0f, 1.0f, 0.0f), // blue make_float4(0.0f, 1.0f, 1.0f, 0.0f), // yellow make_float4(1.0f, 0.0f, 1.0f, 0.0f), // magenta make_float4(1.0f, 1.0f, 0.0f, 0.0f), // cyan make_float4(0.1f, 0.1f, 0.1f, 0.0f), // dull gray make_float4(0.5f, 0.5f, 0.5f, 0.0f), // medium gray make_float4(1.0f, 1.0f, 1.0f, 0.0f), // full white make_float4(0.0f, 0.25f, 0.5f, 0.0f), // dark greenish make_float4(1.0f, 0.5f, 0.0f, 0.0f), // orangeish make_float4(0.5f, 0.25f, 0.0f, 0.0f), // brown make_float4(0.2f, 0.9f, 1.0f, 0.0f), make_float4(0.7f, 0.3f, 0.1f, 0.0f), make_float4(0.34f, 0.89f, 0.45f, 0.0f)}; if (shade_mode == RayCastShade::SHADE_PRIMITIVE_ID) { if (isect_buffer.m_intersected[tidx] == 1) color = colors[isect_buffer.m_primitive_id[tidx] % 15]; else color = make_float4(0.0f); } else if (shade_mode == RayCastShade::SHADE_MATERIAL_ID) { if (isect_buffer.m_intersected[tidx] == 1) { const uint32_t primitive_id = isect_buffer.m_primitive_id[tidx]; const uint32_t mid = material_ids[primitive_id]; const Material& material = material_buffer.m_materials[mid]; const BsdfType type = material.layer_bsdf_type[0]; // assuming zeroth layer color only. const uint32_t bsdf_param_index = material.layer_bsdf_id[0]; const uint32_t emitter = material.m_emitter; if (type == BSDF_LAMBERTIAN && emitter != 1) { const LambertianBsdfParams& params = material_buffer.m_lambertian_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_LAMBERTIAN && emitter == 1) { const DiffuseEmitterParams& params = material_buffer.m_diffuse_emitter_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_GLASS) { const GlassBsdfParams& params = material_buffer.m_glass_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_MIRROR) { const MirrorBsdfParams& params = material_buffer.m_mirror_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_BLINN_MICROFACET) { const MicrofacetBsdfParams& params = material_buffer.m_microfacet_bsdfs[bsdf_param_index]; color = params.R; } } } else if (shade_mode == RayCastShade::SHADE_NORMALS) { if (isect_buffer.m_intersected[tidx] == 1) color = make_float4(isect_buffer.m_shading_normal[0][tidx], isect_buffer.m_shading_normal[1][tidx], isect_buffer.m_shading_normal[2][tidx], 0.0f); else color = make_float4(0.0f); } else if (shade_mode == RayCastShade::SHADE_UVS) { } else if (shade_mode == RayCastShade::SHADE_UNIFORM) { color = isect_buffer.m_intersected[tidx] == 1 ? make_float4(1.0f) : make_float4(0.0f); } sample_buffer.m_contribution[tidx] = color; } } // // Ray Cast Integrator's compute method calls the tracer kernels and computes all the results. // NOTE: Number of elements in the camera sample buffer and ray buffer are the same. One to One correspondence. // void IntegratorRayCast::compute(CameraSampleBuffer* csb, RayBuffer* rb) { IntersectionBufferClass ibc(m_allocator); ibc.allocate(rb->m_size); IntersectionBuffer isect = ibc.get_buffer(); SceneBuffer sb = m_scene->gpu_get_buffer(); MaterialBuffer mb = m_scene->gpu_get_material_buffer(); const uint32_t* material_ids = m_scene->gpu_get_tri_material_ids(); m_tracer->trace(*rb, isect, sb, NULL, false); dim3 grid_size(256, 1, 1); dim3 block_size(256, 1, 1); CudaTimer t1("update kernel timer"); t1.start(); hipLaunchKernelGGL(( kernel_ray_cast_update), dim3(grid_size), dim3(block_size), 0, 0, *csb, isect, mb, material_ids, m_params.m_shade); t1.stop(); } }
eaeb3297e6588c6478b999aa78214254fa6e6c84.cu
/** renderbox2 - a physically based gpu renderer for research purposes Copyright (C) - 2014 - Srinath Ravichandran This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ // Application specific headers. #include <accelerators/sbvh/cudatracer.h> #include <core/intersection.h> #include <integrators/integrator_raycast.h> #include <util/cudatimer.h> // Cuda specific headers. // Standard c++ headers. namespace renderbox2 { // // Ray Cast Integrator update kernel. // __global__ void kernel_ray_cast_update( CameraSampleBuffer sample_buffer, IntersectionBuffer isect_buffer, MaterialBuffer material_buffer, const uint32_t* material_ids, RayCastShade shade_mode ) { for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < sample_buffer.m_size; tidx += gridDim.x * blockDim.x) { float4 color = make_float4(0.0f); float4 colors[15] = { make_float4(1.0f, 0.0f, 0.0f, 0.0f), // red make_float4(0.0f, 1.0f, 0.0f, 0.0f), // green make_float4(0.0f, 0.0f, 1.0f, 0.0f), // blue make_float4(0.0f, 1.0f, 1.0f, 0.0f), // yellow make_float4(1.0f, 0.0f, 1.0f, 0.0f), // magenta make_float4(1.0f, 1.0f, 0.0f, 0.0f), // cyan make_float4(0.1f, 0.1f, 0.1f, 0.0f), // dull gray make_float4(0.5f, 0.5f, 0.5f, 0.0f), // medium gray make_float4(1.0f, 1.0f, 1.0f, 0.0f), // full white make_float4(0.0f, 0.25f, 0.5f, 0.0f), // dark greenish make_float4(1.0f, 0.5f, 0.0f, 0.0f), // orangeish make_float4(0.5f, 0.25f, 0.0f, 0.0f), // brown make_float4(0.2f, 0.9f, 1.0f, 0.0f), make_float4(0.7f, 0.3f, 0.1f, 0.0f), make_float4(0.34f, 0.89f, 0.45f, 0.0f)}; if (shade_mode == RayCastShade::SHADE_PRIMITIVE_ID) { if (isect_buffer.m_intersected[tidx] == 1) color = colors[isect_buffer.m_primitive_id[tidx] % 15]; else color = make_float4(0.0f); } else if (shade_mode == RayCastShade::SHADE_MATERIAL_ID) { if (isect_buffer.m_intersected[tidx] == 1) { const uint32_t primitive_id = isect_buffer.m_primitive_id[tidx]; const uint32_t mid = material_ids[primitive_id]; const Material& material = material_buffer.m_materials[mid]; const BsdfType type = material.layer_bsdf_type[0]; // assuming zeroth layer color only. const uint32_t bsdf_param_index = material.layer_bsdf_id[0]; const uint32_t emitter = material.m_emitter; if (type == BSDF_LAMBERTIAN && emitter != 1) { const LambertianBsdfParams& params = material_buffer.m_lambertian_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_LAMBERTIAN && emitter == 1) { const DiffuseEmitterParams& params = material_buffer.m_diffuse_emitter_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_GLASS) { const GlassBsdfParams& params = material_buffer.m_glass_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_MIRROR) { const MirrorBsdfParams& params = material_buffer.m_mirror_bsdfs[bsdf_param_index]; color = params.color; } else if (type == BSDF_BLINN_MICROFACET) { const MicrofacetBsdfParams& params = material_buffer.m_microfacet_bsdfs[bsdf_param_index]; color = params.R; } } } else if (shade_mode == RayCastShade::SHADE_NORMALS) { if (isect_buffer.m_intersected[tidx] == 1) color = make_float4(isect_buffer.m_shading_normal[0][tidx], isect_buffer.m_shading_normal[1][tidx], isect_buffer.m_shading_normal[2][tidx], 0.0f); else color = make_float4(0.0f); } else if (shade_mode == RayCastShade::SHADE_UVS) { } else if (shade_mode == RayCastShade::SHADE_UNIFORM) { color = isect_buffer.m_intersected[tidx] == 1 ? make_float4(1.0f) : make_float4(0.0f); } sample_buffer.m_contribution[tidx] = color; } } // // Ray Cast Integrator's compute method calls the tracer kernels and computes all the results. // NOTE: Number of elements in the camera sample buffer and ray buffer are the same. One to One correspondence. // void IntegratorRayCast::compute(CameraSampleBuffer* csb, RayBuffer* rb) { IntersectionBufferClass ibc(m_allocator); ibc.allocate(rb->m_size); IntersectionBuffer isect = ibc.get_buffer(); SceneBuffer sb = m_scene->gpu_get_buffer(); MaterialBuffer mb = m_scene->gpu_get_material_buffer(); const uint32_t* material_ids = m_scene->gpu_get_tri_material_ids(); m_tracer->trace(*rb, isect, sb, NULL, false); dim3 grid_size(256, 1, 1); dim3 block_size(256, 1, 1); CudaTimer t1("update kernel timer"); t1.start(); kernel_ray_cast_update<<<grid_size, block_size>>>(*csb, isect, mb, material_ids, m_params.m_shade); t1.stop(); } }
eff854a7d8fcb801173822e97862950e0491037c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "luaT.h" #include "THH.h" #include "utils.h" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: long ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } static int cunn_SpatialUpSamplingNearest_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor_zero(state, output); int scale_factor = luaT_getfieldcheckint(L, 1, "scale_factor"); THAssert(THCudaTensor_checkGPU(state, 2, input, output)); input = THCudaTensor_newContiguous(state, input); // This is for allocating output Tensor long no_elements = 1; for(int i = 0; i < input->nDimension; i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->nDimension == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } float *input_data = THCudaTensor_data(state, input); float *output_data = THCudaTensor_data(state, output); // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( upscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, no_elements, scale_factor, d1, d2, d3); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } // final cut: THCudaTensor_free(state, input); return 1; } /* * Description: */ __global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: long ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); gradInput_data[ii] += gradOutput_data[ipidx]; } } } static int cunn_SpatialUpSamplingNearest_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int scale_factor = luaT_getfieldcheckint(L, 1, "scale_factor"); THAssert(THCudaTensor_checkGPU(state, 2, gradOutput, gradInput)); THCudaTensor_zero(state, gradInput); float *gradInput_data = THCudaTensor_data(state, gradInput); float *gradOutput_data = THCudaTensor_data(state, gradOutput); long no_elements = 1; for(int i = 0; i < gradInput->nDimension; i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->nDimension == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( downscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialUpSamplingNearest__ [] = { {"SpatialUpSamplingNearest_updateOutput", cunn_SpatialUpSamplingNearest_updateOutput}, {"SpatialUpSamplingNearest_updateGradInput", cunn_SpatialUpSamplingNearest_updateGradInput}, {NULL, NULL} }; void cunn_SpatialUpSamplingNearest_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialUpSamplingNearest__, "nn"); lua_pop(L,1); }
eff854a7d8fcb801173822e97862950e0491037c.cu
#include "luaT.h" #include "THC.h" #include "utils.h" #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> /* * Description: */ __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: long ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } static int cunn_SpatialUpSamplingNearest_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor_zero(state, output); int scale_factor = luaT_getfieldcheckint(L, 1, "scale_factor"); THAssert(THCudaTensor_checkGPU(state, 2, input, output)); input = THCudaTensor_newContiguous(state, input); // This is for allocating output Tensor long no_elements = 1; for(int i = 0; i < input->nDimension; i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->nDimension == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } float *input_data = THCudaTensor_data(state, input); float *output_data = THCudaTensor_data(state, output); // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: upscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, no_elements, scale_factor, d1, d2, d3); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } // final cut: THCudaTensor_free(state, input); return 1; } /* * Description: */ __global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3) { // output offset: long ii = threadIdx.x + blockDim.x * blockIdx.x; ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y; if (ii >= no_elements) return; for (int i=0; i < scale_factor; i++){ for(int j=0; j < scale_factor; j++){ int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); gradInput_data[ii] += gradOutput_data[ipidx]; } } } static int cunn_SpatialUpSamplingNearest_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int scale_factor = luaT_getfieldcheckint(L, 1, "scale_factor"); THAssert(THCudaTensor_checkGPU(state, 2, gradOutput, gradInput)); THCudaTensor_zero(state, gradInput); float *gradInput_data = THCudaTensor_data(state, gradInput); float *gradOutput_data = THCudaTensor_data(state, gradOutput); long no_elements = 1; for(int i = 0; i < gradInput->nDimension; i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->nDimension == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: downscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialUpSamplingNearest.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialUpSamplingNearest__ [] = { {"SpatialUpSamplingNearest_updateOutput", cunn_SpatialUpSamplingNearest_updateOutput}, {"SpatialUpSamplingNearest_updateGradInput", cunn_SpatialUpSamplingNearest_updateGradInput}, {NULL, NULL} }; void cunn_SpatialUpSamplingNearest_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialUpSamplingNearest__, "nn"); lua_pop(L,1); }
78136c22ddd0c8e92edd9aae9bc86e366ec6528b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> void DisplayProperties( hipDeviceProp_t* pDeviceProp ) { if( !pDeviceProp ) return; printf( "\nDevice Name \t - %s ", pDeviceProp->name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t -%d KB", pDeviceProp->totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %d KB", pDeviceProp->sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", pDeviceProp->regsPerBlock ); printf( "\nWarp size in threads \t - %d", pDeviceProp->warpSize ); printf( "\nMemory Pitch \t - %d bytes", pDeviceProp->memPitch ); printf( "\nMaximum threads per block \t - %d", pDeviceProp->maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", pDeviceProp->maxThreadsDim[0], pDeviceProp->maxThreadsDim[1], pDeviceProp->maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", pDeviceProp->maxGridSize[0], pDeviceProp->maxGridSize[1], pDeviceProp->maxGridSize[2] ); printf( "\nTotal constant memory \t - %d bytes", pDeviceProp->totalConstMem ); printf( "\nCUDA ver \t - %d.%d", pDeviceProp->major, pDeviceProp->minor ); printf( "\nClock rate \t - %d KHz", pDeviceProp->clockRate ); printf( "\nTexture Alignment \t - %d bytes", pDeviceProp->textureAlignment ); printf( "\nDevice Overlap \t - %s", pDeviceProp-> deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t - %d", pDeviceProp->multiProcessorCount ); } int main(void) { hipDeviceProp_t deviceProp; int nDevCount = 0; hipGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx)) DisplayProperties( &deviceProp ); else printf( "\n%s", hipGetErrorString(hipGetLastError())); } }
78136c22ddd0c8e92edd9aae9bc86e366ec6528b.cu
#include <stdio.h> #include <assert.h> #include <cuda.h> void DisplayProperties( cudaDeviceProp* pDeviceProp ) { if( !pDeviceProp ) return; printf( "\nDevice Name \t - %s ", pDeviceProp->name ); printf( "\n**************************************"); printf( "\nTotal Global Memory\t\t -%d KB", pDeviceProp->totalGlobalMem/1024 ); printf( "\nShared memory available per block \t - %d KB", pDeviceProp->sharedMemPerBlock/1024 ); printf( "\nNumber of registers per thread block \t - %d", pDeviceProp->regsPerBlock ); printf( "\nWarp size in threads \t - %d", pDeviceProp->warpSize ); printf( "\nMemory Pitch \t - %d bytes", pDeviceProp->memPitch ); printf( "\nMaximum threads per block \t - %d", pDeviceProp->maxThreadsPerBlock ); printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", pDeviceProp->maxThreadsDim[0], pDeviceProp->maxThreadsDim[1], pDeviceProp->maxThreadsDim[2] ); printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", pDeviceProp->maxGridSize[0], pDeviceProp->maxGridSize[1], pDeviceProp->maxGridSize[2] ); printf( "\nTotal constant memory \t - %d bytes", pDeviceProp->totalConstMem ); printf( "\nCUDA ver \t - %d.%d", pDeviceProp->major, pDeviceProp->minor ); printf( "\nClock rate \t - %d KHz", pDeviceProp->clockRate ); printf( "\nTexture Alignment \t - %d bytes", pDeviceProp->textureAlignment ); printf( "\nDevice Overlap \t - %s", pDeviceProp-> deviceOverlap?"Allowed":"Not Allowed" ); printf( "\nNumber of Multi processors \t - %d", pDeviceProp->multiProcessorCount ); } int main(void) { cudaDeviceProp deviceProp; int nDevCount = 0; cudaGetDeviceCount( &nDevCount ); printf( "Total Device found: %d", nDevCount ); for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx ) { memset( &deviceProp, 0, sizeof(deviceProp)); if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx)) DisplayProperties( &deviceProp ); else printf( "\n%s", cudaGetErrorString(cudaGetLastError())); } }
1ce4b2e8e9849be15373fd3513337e937885d7de.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmm.cu * \brief SPMM C APIs and definitions. */ #include <dgl/array.h> #include "./spmm.cuh" #include "functor.cuh" #include "../../runtime/cuda/cuda_common.h" namespace dgl { using namespace cuda; namespace aten { namespace { /*! \brief Fill the vector started from ptr of size length with val */ template <typename DType> void _Fill(DType* ptr, size_t length, DType val) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length); int nb = (length + nt - 1) / nt; // on x-axis, no need to worry about upperbound. CUDA_KERNEL_CALL(cuda::_FillKernel, nb, nt, 0, thr_entry->stream, ptr, length, val); } } // namespace namespace cusparse { #if CUDART_VERSION < 11000 template <typename DType> hipsparseStatus_t Xcsrmm2(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const hipsparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPSPARSE_STATUS_EXECUTION_FAILED; } template <> hipsparseStatus_t Xcsrmm2<float>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const hipsparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return hipsparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> hipsparseStatus_t Xcsrmm2<double>(hipsparseHandle_t handle, hipsparseOperation_t transA, hipsparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const hipsparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return hipsparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } #endif template <typename DType> hipblasStatus_t Xgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return HIPBLAS_STATUS_EXECUTION_FAILED; } template <> hipblasStatus_t Xgeam<float>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> hipblasStatus_t Xgeam<double>(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType> void CusparseCsrmm2( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(ctx, m * n * sizeof(DType))); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 hipsparseSpMatDescr_t matA; hipsparseDnMatDescr_t matB, matC; constexpr auto cuda_dtype = std::is_same<DType, float>::value ? HIP_R_32F: HIP_R_64F; CUSPARSE_CALL(hipsparseCreateCsr(&matA, m, k, nnz, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cuda_dtype)); CUSPARSE_CALL(hipsparseCreateDnMat(&matB, n, k, n, const_cast<DType*>(B_data), cuda_dtype, HIPSPARSE_ORDER_COL)); CUSPARSE_CALL(hipsparseCreateDnMat(&matC, m, n, m, trans_out, cuda_dtype, HIPSPARSE_ORDER_COL)); auto transA = HIPSPARSE_OPERATION_NON_TRANSPOSE; auto transB = HIPSPARSE_OPERATION_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(hipsparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, cuda_dtype, HIPSPARSE_CSRMM_ALG1, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(hipsparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, cuda_dtype, HIPSPARSE_CSRMM_ALG1, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(hipsparseDestroySpMat(matA)); CUSPARSE_CALL(hipsparseDestroyDnMat(matB)); CUSPARSE_CALL(hipsparseDestroyDnMat(matC)); #else hipsparseMatDescr_t descr; CUSPARSE_CALL(hipsparseCreateMatDescr(&descr)); CUSPARSE_CALL(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, trans_out, m)); CUSPARSE_CALL(hipsparseDestroyMatDescr(descr)); #endif if (valptr) device->FreeWorkspace(ctx, valptr); // transpose the output matrix if (!thr_entry->cublas_handle) CUBLAS_CALL(hipblasCreate(&(thr_entry->cublas_handle))); CUBLAS_CALL(hipblasSetStream(thr_entry->cublas_handle, thr_entry->stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, &alpha, trans_out, m, &beta, nullptr, n, C_data, n)); device->FreeWorkspace(ctx, trans_out); } } // namespace cusparse #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef cuda::binary::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef cuda::binary::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef cuda::binary::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef cuda::binary::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef cuda::binary::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef cuda::binary::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \ } \ } while (0) /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, typename DType> void SpMMCsr(const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { if (sizeof(IdType) == 4 && op == "copy_lhs") { int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; cusparse::CusparseCsrmm2<DType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), nullptr, static_cast<DType*>(out->data), x_length); } else if (sizeof(IdType) == 4 && op == "mul" && efeat.NumElements() == csr.indices->shape[0]) { int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; if (!IsNullArray(csr.data)) efeat = IndexSelect(efeat, csr.data); cusparse::CusparseCsrmm2<DType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), static_cast<DType*>(efeat->data), static_cast<DType*>(out->data), x_length); } else { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, out, NullArray(), NullArray()); }); } } else if (reduce == "max") { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else if (reduce == "min") { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else { LOG(FATAL) << "Not implemented"; } } /*! * \brief CUDA implementation of g-SpMM on Coo format. */ template <int XPU, typename IdType, typename DType> void SpMMCoo(const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, NullArray(), NullArray()); }); } else if (reduce == "max") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else if (reduce == "min") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else { LOG(FATAL) << "Not implemented"; } } template void SpMMCsr<kDLGPU, int32_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); } // namespace aten } // namespace dgl
1ce4b2e8e9849be15373fd3513337e937885d7de.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/spmm.cu * \brief SPMM C APIs and definitions. */ #include <dgl/array.h> #include "./spmm.cuh" #include "./functor.cuh" #include "../../runtime/cuda/cuda_common.h" namespace dgl { using namespace cuda; namespace aten { namespace { /*! \brief Fill the vector started from ptr of size length with val */ template <typename DType> void _Fill(DType* ptr, size_t length, DType val) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length); int nb = (length + nt - 1) / nt; // on x-axis, no need to worry about upperbound. CUDA_KERNEL_CALL(cuda::_FillKernel, nb, nt, 0, thr_entry->stream, ptr, length, val); } } // namespace namespace cusparse { #if CUDART_VERSION < 11000 template <typename DType> cusparseStatus_t Xcsrmm2(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const DType* alpha, const cusparseMatDescr_t descrA, const DType* csrValA, const int* csrRowPtrA, const int* csrColIndA, const DType* B, int ldb, const DType* beta, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUSPARSE_STATUS_EXECUTION_FAILED; } template <> cusparseStatus_t Xcsrmm2<float>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const float* alpha, const cusparseMatDescr_t descrA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const float* B, int ldb, const float* beta, float* C, int ldc) { return cusparseScsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } template <> cusparseStatus_t Xcsrmm2<double>(cusparseHandle_t handle, cusparseOperation_t transA, cusparseOperation_t transB, int m, int n, int k, int nnz, const double* alpha, const cusparseMatDescr_t descrA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const double* B, int ldb, const double* beta, double* C, int ldc) { return cusparseDcsrmm2(handle, transA, transB, m, n, k, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc); } #endif template <typename DType> cublasStatus_t Xgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const DType* alpha, const DType* A, int lda, const DType* beta, const DType* B, int ldb, DType* C, int ldc) { LOG(INFO) << "Not supported dtype"; return CUBLAS_STATUS_EXECUTION_FAILED; } template <> cublasStatus_t Xgeam<float>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float* alpha, const float* A, int lda, const float* beta, const float* B, int ldb, float* C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } template <> cublasStatus_t Xgeam<double>(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double* alpha, const double* A, int lda, const double* beta, const double* B, int ldb, double* C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } /*! Cusparse implementation of SpMM on Csr format. */ template <typename DType> void CusparseCsrmm2( const DLContext& ctx, const CSRMatrix& csr, const DType* B_data, const DType* A_data, DType* C_data, int x_length) { // We use csrmm2 to perform following operation: // C = A x B, where A is a sparse matrix in csr format, B is the dense matrix for node // feature tensor. However, since cusparse only supports column-major, while our tensor // is stored in row-major, the actual computation is: // C = trans(A x trans(B)). // Currently, we use cublasXgeam to implement transposition and allocate intermediate // workspace memory for this. const int m = csr.num_rows; const int n = x_length; const int k = csr.num_cols; const int nnz = csr.indices->shape[0]; const DType alpha = 1.0; const DType beta = 0.0; // device auto device = runtime::DeviceAPI::Get(ctx); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); // allocate matrix for temporary transposed output DType* trans_out = static_cast<DType*>(device->AllocWorkspace(ctx, m * n * sizeof(DType))); // all one data array DType* valptr = nullptr; if (!A_data) { valptr = static_cast<DType*>(device->AllocWorkspace(ctx, nnz * sizeof(DType))); _Fill(valptr, nnz, static_cast<DType>(1.)); } #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t matA; cusparseDnMatDescr_t matB, matC; constexpr auto cuda_dtype = std::is_same<DType, float>::value ? CUDA_R_32F: CUDA_R_64F; CUSPARSE_CALL(cusparseCreateCsr(&matA, m, k, nnz, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), const_cast<DType*>(valptr? valptr : A_data), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cuda_dtype)); CUSPARSE_CALL(cusparseCreateDnMat(&matB, n, k, n, const_cast<DType*>(B_data), cuda_dtype, CUSPARSE_ORDER_COL)); CUSPARSE_CALL(cusparseCreateDnMat(&matC, m, n, m, trans_out, cuda_dtype, CUSPARSE_ORDER_COL)); auto transA = CUSPARSE_OPERATION_NON_TRANSPOSE; auto transB = CUSPARSE_OPERATION_TRANSPOSE; size_t workspace_size; CUSPARSE_CALL(cusparseSpMM_bufferSize( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, cuda_dtype, CUSPARSE_CSRMM_ALG1, &workspace_size)); void* workspace = device->AllocWorkspace(ctx, workspace_size); CUSPARSE_CALL(cusparseSpMM( thr_entry->cusparse_handle, transA, transB, &alpha, matA, matB, &beta, matC, cuda_dtype, CUSPARSE_CSRMM_ALG1, workspace)); device->FreeWorkspace(ctx, workspace); CUSPARSE_CALL(cusparseDestroySpMat(matA)); CUSPARSE_CALL(cusparseDestroyDnMat(matB)); CUSPARSE_CALL(cusparseDestroyDnMat(matC)); #else cusparseMatDescr_t descr; CUSPARSE_CALL(cusparseCreateMatDescr(&descr)); CUSPARSE_CALL(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); CUSPARSE_CALL(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(Xcsrmm2<DType>( thr_entry->cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE, m, n, k, nnz, &alpha, descr, (valptr)? valptr : A_data, static_cast<int32_t*>(csr.indptr->data), static_cast<int32_t*>(csr.indices->data), B_data, n, &beta, trans_out, m)); CUSPARSE_CALL(cusparseDestroyMatDescr(descr)); #endif if (valptr) device->FreeWorkspace(ctx, valptr); // transpose the output matrix if (!thr_entry->cublas_handle) CUBLAS_CALL(cublasCreate(&(thr_entry->cublas_handle))); CUBLAS_CALL(cublasSetStream(thr_entry->cublas_handle, thr_entry->stream)); CUBLAS_CALL(Xgeam<DType>( thr_entry->cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, &alpha, trans_out, m, &beta, nullptr, n, C_data, n)); device->FreeWorkspace(ctx, trans_out); } } // namespace cusparse #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef cuda::binary::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef cuda::binary::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef cuda::binary::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef cuda::binary::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef cuda::binary::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef cuda::binary::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \ } \ } while (0) /*! * \brief CUDA implementation of g-SpMM on Csr format. * \note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. */ template <int XPU, typename IdType, typename DType> void SpMMCsr(const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { if (sizeof(IdType) == 4 && op == "copy_lhs") { int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; cusparse::CusparseCsrmm2<DType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), nullptr, static_cast<DType*>(out->data), x_length); } else if (sizeof(IdType) == 4 && op == "mul" && efeat.NumElements() == csr.indices->shape[0]) { int64_t x_length = 1; for (int i = 1; i < ufeat->ndim; ++i) x_length *= ufeat->shape[i]; if (!IsNullArray(csr.data)) efeat = IndexSelect(efeat, csr.data); cusparse::CusparseCsrmm2<DType>( ufeat->ctx, csr, static_cast<DType*>(ufeat->data), static_cast<DType*>(efeat->data), static_cast<DType*>(out->data), x_length); } else { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >( bcast, csr, ufeat, efeat, out, NullArray(), NullArray()); }); } } else if (reduce == "max") { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else if (reduce == "min") { SWITCH_OP(op, Op, { cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >( bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else { LOG(FATAL) << "Not implemented"; } } /*! * \brief CUDA implementation of g-SpMM on Coo format. */ template <int XPU, typename IdType, typename DType> void SpMMCoo(const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux) { if (reduce == "sum") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, NullArray(), NullArray()); }); } else if (reduce == "max") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else if (reduce == "min") { SWITCH_OP(op, Op, { cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > ( bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]); }); } else { LOG(FATAL) << "Not implemented"; } } template void SpMMCsr<kDLGPU, int32_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int32_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCsr<kDLGPU, int64_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, float>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int32_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); template void SpMMCoo<kDLGPU, int64_t, double>( const std::string& op, const std::string& reduce, const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux); } // namespace aten } // namespace dgl
3b8d21ea3b257e08ede0a54b2c55df05ba842c63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2014 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // ---------------------------------------------------------------------------------------- // Transpose // // This file contains both device and host code for transposing a floating-point // matrix. It performs several transpose kernels, which incrementally improve performance // through coalescing, removing shared memory bank conflicts, and eliminating partition // camping. Several of the kernels perform a copy, used to represent the best case // performance that a transpose can achieve. // // Please see the whitepaper in the docs folder of the transpose project for a detailed // description of this performance study. // ---------------------------------------------------------------------------------------- // Utilities and system includes #include <helper_string.h> // helper for string parsing #include <helper_image.h> // helper for image and data compariosn #include <helper_cuda.h> // helper for cuda error checking functions const char *sSDKsample = "Transpose"; // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS #define TILE_DIM 16 #define BLOCK_ROWS 16 // This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y int MATRIX_SIZE_X = 1024; int MATRIX_SIZE_Y = 1024; int MUL_FACTOR = TILE_DIM; #define FLOOR(a,b) (a-(a%b)) // Compute the tile size necessary to illustrate performance cases for SM12+ hardware int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM); // Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM *TILE_DIM); // Number of repetitions used for timing. Two sets of repetitions are performed: // 1) over kernel launches and 2) inside the kernel over just the loads and stores #define NUM_REPS 1 // ------------------------------------------------------- // Copies // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void copy(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = idata[index+i*width]; } } __global__ void copySharedMem(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < width && yIndex < height) { tile[threadIdx.y][threadIdx.x] = idata[index]; } } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < height && yIndex < width) { odata[index] = tile[threadIdx.y][threadIdx.x]; } } } // ------------------------------------------------------- // Transposes // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void transposeNaive(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } // coalesced transpose (with bank conflicts) __global__ void transposeCoalesced(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Coalesced transpose with no bank conflicts __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Transpose that effectively reorders execution of thread blocks along diagonals of the // matrix (also coalesced and has no bank conflicts) // // Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as // corresponding to different diagonals // // blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly // used cartesian coordinates so that the only changes to the code from the coalesced version // are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and // bloclIdx.y with the subscripted versions in the remaining code __global__ void transposeDiagonal(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // do diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x // and similarly for y int xIndex = blockIdx_x * TILE_DIM + threadIdx.x; int yIndex = blockIdx_y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y * TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // -------------------------------------------------------------------- // Partial transposes // NB: the coarse- and fine-grained routines only perform part of a // transpose and will fail the test against the reference solution // // They are used to assess performance characteristics of different // components of a full transpose // -------------------------------------------------------------------- __global__ void transposeFineGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + (yIndex)*width; for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { odata[index+i*height] = block[threadIdx.x][threadIdx.y+i]; } } __global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x]; } } // --------------------- // host utility routines // --------------------- void computeTransposeGold(float *gold, float *idata, const int size_x, const int size_y) { for (int y = 0; y < size_y; ++y) { for (int x = 0; x < size_x; ++x) { gold[(x * size_y) + y] = idata[(y * size_x) + x]; } } } void getParams(int argc, char **argv, hipDeviceProp_t &deviceProp, int &size_x, int &size_y, int max_tile_dim) { // set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified if (checkCmdLineFlag(argc, (const char **)argv, "dimX")) { size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX"); if (size_x > max_tile_dim) { printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim); } else { printf("> MatrixSize X = %d\n", size_x); } } else { size_x = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_x = FLOOR(size_x, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_x = FLOOR(size_x, 384); } } if (checkCmdLineFlag(argc, (const char **)argv, "dimY")) { size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY"); if (size_y > max_tile_dim) { printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim); } else { printf("> MatrixSize Y = %d\n", size_y); } } else { size_y = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_y = FLOOR(size_y, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_y = FLOOR(size_y, 384); } } } void showHelp() { printf("\n%s : Command line options\n", sSDKsample); printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n"); printf("> The default matrix size can be overridden with these parameters\n"); printf("\t-dimX=row_dim_size (matrix row dimensions)\n"); printf("\t-dimY=col_dim_size (matrix column dimensions)\n"); } // ---- // main // ---- int main(int argc, char **argv) { // Start logs printf("%s Starting...\n\n", sSDKsample); if (checkCmdLineFlag(argc, (const char **)argv, "help")) { showHelp(); return 0; } int devID = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; // get number of SMs on this GPU checkCudaErrors(hipGetDevice(&devID)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); // compute the scaling factor (for GPUs with fewer MPs) float scale_factor, total_tiles; scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); printf("> Device %d: \"%s\"\n", devID, deviceProp.name); printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor); // Calculate number of tiles we will run for the Matrix Transpose performance tests int size_x, size_y, max_matrix_dim, matrix_size_test; if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { matrix_size_test = 512; // we round down max_matrix_dim for this perf test total_tiles = (float)MAX_TILES_SM12 / scale_factor; } else { matrix_size_test = 384; // we round down max_matrix_dim for this perf test total_tiles = (float)MAX_TILES_SM10 / scale_factor; } max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test); // This is the minimum size allowed if (max_matrix_dim == 0) { max_matrix_dim = matrix_size_test; } printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf("> Compute performance scaling factor = %4.2f\n", scale_factor); // Extract parameters if there are any, command line -dimx and -dimy can override // any of these settings getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim); // Kepler Size size_x = 256; size_y = 256; if (size_x != size_y) { printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) { printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); const char *kernelName; // execution configuration parameters dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); if (grid.x < 1 || grid.y < 1) { printf("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } // CUDA events hipEvent_t start, stop; // size of memory required to store the matrix const int mem_size = sizeof(float) * size_x*size_y; if (2*mem_size > deviceProp.totalGlobalMem) { printf("Input matrix size is larger than the available device memory!\n"); printf("Please choose a smaller size matrix\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } // allocate host memory float *h_idata = (float *) malloc(mem_size); float *h_odata = (float *) malloc(mem_size); float *transposeGold = (float *) malloc(mem_size); float *gold; // allocate device memory float *d_idata, *d_odata; checkCudaErrors(hipMalloc((void **) &d_idata, mem_size)); checkCudaErrors(hipMalloc((void **) &d_odata, mem_size)); // initalize host data for (int i = 0; i < (size_x*size_y); ++i) { h_idata[i] = (float) i; } // copy host data to device checkCudaErrors(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice)); // Compute reference transpose solution computeTransposeGold(transposeGold, h_idata, size_x, size_y); // print out common data for all kernels printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n", size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS); // initialize events checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // // loop over different kernels // bool success = true; for (int k = 7; k<8; k = k+8) { // set kernel pointer switch (k) { case 0: kernel = &copy; kernelName = "simple copy "; break; case 1: kernel = &copySharedMem; kernelName = "shared memory copy"; break; case 2: kernel = &transposeNaive; kernelName = "naive "; break; case 3: kernel = &transposeCoalesced; kernelName = "coalesced "; break; case 4: kernel = &transposeNoBankConflicts; kernelName = "optimized "; break; case 5: kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break; case 6: kernel = &transposeFineGrained; kernelName = "fine-grained "; break; case 7: kernel = &transposeDiagonal; kernelName = "diagonal "; break; } // set reference solution if (kernel == &copy || kernel == &copySharedMem) { gold = h_idata; } else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) { gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check } else { gold = transposeGold; } // Clear error status checkCudaErrors(hipGetLastError()); // warmup to avoid timing startup hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); // take measurements for loop over kernel launches checkCudaErrors(hipEventRecord(start, 0)); for (int i=0; i < NUM_REPS; i++) { hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); // Ensure no launch failure checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipEventSynchronize(stop)); float kernelTime; checkCudaErrors(hipEventElapsedTime(&kernelTime, start, stop)); checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost)); bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; } // take measurements for loop inside kernel checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost)); res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; } // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS); printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelName, kernelBandwidth, kernelTime/NUM_REPS, (size_x *size_y), 1, TILE_DIM *BLOCK_ROWS); } // cleanup free(h_idata); free(h_odata); free(transposeGold); hipFree(d_idata); hipFree(d_odata); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); if (!success) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
3b8d21ea3b257e08ede0a54b2c55df05ba842c63.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2014 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // ---------------------------------------------------------------------------------------- // Transpose // // This file contains both device and host code for transposing a floating-point // matrix. It performs several transpose kernels, which incrementally improve performance // through coalescing, removing shared memory bank conflicts, and eliminating partition // camping. Several of the kernels perform a copy, used to represent the best case // performance that a transpose can achieve. // // Please see the whitepaper in the docs folder of the transpose project for a detailed // description of this performance study. // ---------------------------------------------------------------------------------------- // Utilities and system includes #include <helper_string.h> // helper for string parsing #include <helper_image.h> // helper for image and data compariosn #include <helper_cuda.h> // helper for cuda error checking functions const char *sSDKsample = "Transpose"; // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS #define TILE_DIM 16 #define BLOCK_ROWS 16 // This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y int MATRIX_SIZE_X = 1024; int MATRIX_SIZE_Y = 1024; int MUL_FACTOR = TILE_DIM; #define FLOOR(a,b) (a-(a%b)) // Compute the tile size necessary to illustrate performance cases for SM12+ hardware int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM); // Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM *TILE_DIM); // Number of repetitions used for timing. Two sets of repetitions are performed: // 1) over kernel launches and 2) inside the kernel over just the loads and stores #define NUM_REPS 1 // ------------------------------------------------------- // Copies // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void copy(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = idata[index+i*width]; } } __global__ void copySharedMem(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < width && yIndex < height) { tile[threadIdx.y][threadIdx.x] = idata[index]; } } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < height && yIndex < width) { odata[index] = tile[threadIdx.y][threadIdx.x]; } } } // ------------------------------------------------------- // Transposes // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void transposeNaive(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } // coalesced transpose (with bank conflicts) __global__ void transposeCoalesced(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Coalesced transpose with no bank conflicts __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Transpose that effectively reorders execution of thread blocks along diagonals of the // matrix (also coalesced and has no bank conflicts) // // Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as // corresponding to different diagonals // // blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly // used cartesian coordinates so that the only changes to the code from the coalesced version // are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and // bloclIdx.y with the subscripted versions in the remaining code __global__ void transposeDiagonal(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // do diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x // and similarly for y int xIndex = blockIdx_x * TILE_DIM + threadIdx.x; int yIndex = blockIdx_y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y * TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // -------------------------------------------------------------------- // Partial transposes // NB: the coarse- and fine-grained routines only perform part of a // transpose and will fail the test against the reference solution // // They are used to assess performance characteristics of different // components of a full transpose // -------------------------------------------------------------------- __global__ void transposeFineGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + (yIndex)*width; for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { odata[index+i*height] = block[threadIdx.x][threadIdx.y+i]; } } __global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x]; } } // --------------------- // host utility routines // --------------------- void computeTransposeGold(float *gold, float *idata, const int size_x, const int size_y) { for (int y = 0; y < size_y; ++y) { for (int x = 0; x < size_x; ++x) { gold[(x * size_y) + y] = idata[(y * size_x) + x]; } } } void getParams(int argc, char **argv, cudaDeviceProp &deviceProp, int &size_x, int &size_y, int max_tile_dim) { // set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified if (checkCmdLineFlag(argc, (const char **)argv, "dimX")) { size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX"); if (size_x > max_tile_dim) { printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim); } else { printf("> MatrixSize X = %d\n", size_x); } } else { size_x = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_x = FLOOR(size_x, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_x = FLOOR(size_x, 384); } } if (checkCmdLineFlag(argc, (const char **)argv, "dimY")) { size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY"); if (size_y > max_tile_dim) { printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim); } else { printf("> MatrixSize Y = %d\n", size_y); } } else { size_y = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_y = FLOOR(size_y, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_y = FLOOR(size_y, 384); } } } void showHelp() { printf("\n%s : Command line options\n", sSDKsample); printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n"); printf("> The default matrix size can be overridden with these parameters\n"); printf("\t-dimX=row_dim_size (matrix row dimensions)\n"); printf("\t-dimY=col_dim_size (matrix column dimensions)\n"); } // ---- // main // ---- int main(int argc, char **argv) { // Start logs printf("%s Starting...\n\n", sSDKsample); if (checkCmdLineFlag(argc, (const char **)argv, "help")) { showHelp(); return 0; } int devID = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; // get number of SMs on this GPU checkCudaErrors(cudaGetDevice(&devID)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); // compute the scaling factor (for GPUs with fewer MPs) float scale_factor, total_tiles; scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); printf("> Device %d: \"%s\"\n", devID, deviceProp.name); printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor); // Calculate number of tiles we will run for the Matrix Transpose performance tests int size_x, size_y, max_matrix_dim, matrix_size_test; if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { matrix_size_test = 512; // we round down max_matrix_dim for this perf test total_tiles = (float)MAX_TILES_SM12 / scale_factor; } else { matrix_size_test = 384; // we round down max_matrix_dim for this perf test total_tiles = (float)MAX_TILES_SM10 / scale_factor; } max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test); // This is the minimum size allowed if (max_matrix_dim == 0) { max_matrix_dim = matrix_size_test; } printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf("> Compute performance scaling factor = %4.2f\n", scale_factor); // Extract parameters if there are any, command line -dimx and -dimy can override // any of these settings getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim); // Kepler Size size_x = 256; size_y = 256; if (size_x != size_y) { printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) { printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); const char *kernelName; // execution configuration parameters dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); if (grid.x < 1 || grid.y < 1) { printf("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } // CUDA events cudaEvent_t start, stop; // size of memory required to store the matrix const int mem_size = sizeof(float) * size_x*size_y; if (2*mem_size > deviceProp.totalGlobalMem) { printf("Input matrix size is larger than the available device memory!\n"); printf("Please choose a smaller size matrix\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } // allocate host memory float *h_idata = (float *) malloc(mem_size); float *h_odata = (float *) malloc(mem_size); float *transposeGold = (float *) malloc(mem_size); float *gold; // allocate device memory float *d_idata, *d_odata; checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); // initalize host data for (int i = 0; i < (size_x*size_y); ++i) { h_idata[i] = (float) i; } // copy host data to device checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); // Compute reference transpose solution computeTransposeGold(transposeGold, h_idata, size_x, size_y); // print out common data for all kernels printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n", size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS); // initialize events checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // // loop over different kernels // bool success = true; for (int k = 7; k<8; k = k+8) { // set kernel pointer switch (k) { case 0: kernel = &copy; kernelName = "simple copy "; break; case 1: kernel = &copySharedMem; kernelName = "shared memory copy"; break; case 2: kernel = &transposeNaive; kernelName = "naive "; break; case 3: kernel = &transposeCoalesced; kernelName = "coalesced "; break; case 4: kernel = &transposeNoBankConflicts; kernelName = "optimized "; break; case 5: kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break; case 6: kernel = &transposeFineGrained; kernelName = "fine-grained "; break; case 7: kernel = &transposeDiagonal; kernelName = "diagonal "; break; } // set reference solution if (kernel == &copy || kernel == &copySharedMem) { gold = h_idata; } else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) { gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check } else { gold = transposeGold; } // Clear error status checkCudaErrors(cudaGetLastError()); // warmup to avoid timing startup kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); // take measurements for loop over kernel launches checkCudaErrors(cudaEventRecord(start, 0)); for (int i=0; i < NUM_REPS; i++) { kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); // Ensure no launch failure checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float kernelTime; checkCudaErrors(cudaEventElapsedTime(&kernelTime, start, stop)); checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost)); bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; } // take measurements for loop inside kernel checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost)); res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; } // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS); printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelName, kernelBandwidth, kernelTime/NUM_REPS, (size_x *size_y), 1, TILE_DIM *BLOCK_ROWS); } // cleanup free(h_idata); free(h_odata); free(transposeGold); cudaFree(d_idata); cudaFree(d_odata); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); if (!success) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
f4fada3b4a99c15a185aa461913ea3331313316b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } float fx = 1.0f, fy = 1.0f, fz = 1.0f; const int mx = 64, my = 64, mz = 64; // shared memory tiles will be m*-by-*Pencils // sPencils is used when each thread calculates the derivative at one point // lPencils is used for coalescing in y and z where each thread has to // calculate the derivative at mutiple points const int sPencils = 4; // small # pencils const int lPencils = 32; // large # pencils dim3 grid[3][2], block[3][2]; // stencil coefficients __constant__ float c_ax, c_bx, c_cx, c_dx; __constant__ float c_ay, c_by, c_cy, c_dy; __constant__ float c_az, c_bz, c_cz, c_dz; // host routine to set constant data void setDerivativeParameters() { // check to make sure dimensions are integral multiples of sPencils if ((mx % sPencils != 0) || (my %sPencils != 0) || (mz % sPencils != 0)) { printf("'mx', 'my', and 'mz' must be integral multiples of sPencils\n"); exit(1); } if ((mx % lPencils != 0) || (my % lPencils != 0)) { printf("'mx' and 'my' must be multiples of lPencils\n"); exit(1); } // stencil weights (for unit length problem) float dsinv = mx-1.f; float ax = 4.f / 5.f * dsinv; float bx = -1.f / 5.f * dsinv; float cx = 4.f / 105.f * dsinv; float dx = -1.f / 280.f * dsinv; checkCuda( hipMemcpyToSymbol(c_ax, &ax, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_bx, &bx, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_cx, &cx, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_dx, &dx, sizeof(float), 0, hipMemcpyHostToDevice) ); dsinv = my-1.f; float ay = 4.f / 5.f * dsinv; float by = -1.f / 5.f * dsinv; float cy = 4.f / 105.f * dsinv; float dy = -1.f / 280.f * dsinv; checkCuda( hipMemcpyToSymbol(c_ay, &ay, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_by, &by, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_cy, &cy, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_dy, &dy, sizeof(float), 0, hipMemcpyHostToDevice) ); dsinv = mz-1.f; float az = 4.f / 5.f * dsinv; float bz = -1.f / 5.f * dsinv; float cz = 4.f / 105.f * dsinv; float dz = -1.f / 280.f * dsinv; checkCuda( hipMemcpyToSymbol(c_az, &az, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_bz, &bz, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_cz, &cz, sizeof(float), 0, hipMemcpyHostToDevice) ); checkCuda( hipMemcpyToSymbol(c_dz, &dz, sizeof(float), 0, hipMemcpyHostToDevice) ); // Execution configurations for small and large pencil tiles grid[0][0] = dim3(my / sPencils, mz, 1); block[0][0] = dim3(mx, sPencils, 1); grid[0][1] = dim3(my / lPencils, mz, 1); block[0][1] = dim3(mx, sPencils, 1); grid[1][0] = dim3(mx / sPencils, mz, 1); block[1][0] = dim3(sPencils, my, 1); grid[1][1] = dim3(mx / lPencils, mz, 1); // we want to use the same number of threads as above, // so when we use lPencils instead of sPencils in one // dimension, we multiply the other by sPencils/lPencils block[1][1] = dim3(lPencils, my * sPencils / lPencils, 1); grid[2][0] = dim3(mx / sPencils, my, 1); block[2][0] = dim3(sPencils, mz, 1); grid[2][1] = dim3(mx / lPencils, my, 1); block[2][1] = dim3(lPencils, mz * sPencils / lPencils, 1); } void initInput(float *f, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { switch (dim) { case 0: f[k*mx*my+j*mx+i] = cos(fx*twopi*(i-1.f)/(mx-1.f)); break; case 1: f[k*mx*my+j*mx+i] = cos(fy*twopi*(j-1.f)/(my-1.f)); break; case 2: f[k*mx*my+j*mx+i] = cos(fz*twopi*(k-1.f)/(mz-1.f)); break; } } } } } void initSol(float *sol, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { switch (dim) { case 0: sol[k*mx*my+j*mx+i] = -fx*twopi*sin(fx*twopi*(i-1.f)/(mx-1.f)); break; case 1: sol[k*mx*my+j*mx+i] = -fy*twopi*sin(fy*twopi*(j-1.f)/(my-1.f)); break; case 2: sol[k*mx*my+j*mx+i] = -fz*twopi*sin(fz*twopi*(k-1.f)/(mz-1.f)); break; } } } } } void checkResults(double &error, double &maxError, float *sol, float *df) { // error = sqrt(sum((sol-df)**2)/(mx*my*mz)) // maxError = maxval(abs(sol-df)) maxError = 0; error = 0; for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { float s = sol[k*mx*my+j*mx+i]; float f = df[k*mx*my+j*mx+i]; //printf("%d %d %d: %f %f\n", i, j, k, s, f); error += (s-f)*(s-f); if (fabs(s-f) > maxError) maxError = fabs(s-f); } } } error = sqrt(error / (mx*my*mz)); } // ------------- // x derivatives // ------------- __global__ void derivative_x(float *f, float *df) { __shared__ float s_f[sPencils][mx+8]; // 4-wide halo int i = threadIdx.x; int j = blockIdx.x*blockDim.y + threadIdx.y; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset int sj = threadIdx.y; // local j for shared memory access int globalIdx = k * mx * my + j * mx + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { s_f[sj][si-4] = s_f[sj][si+mx-5]; s_f[sj][si+mx] = s_f[sj][si+1]; } __syncthreads(); df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) + c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) + c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) + c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } // this version uses a 64x32 shared memory tile, // still with 64*sPencils threads __global__ void derivative_x_lPencils(float *f, float *df) { __shared__ float s_f[lPencils][mx+8]; // 4-wide halo int i = threadIdx.x; int jBase = blockIdx.x*lPencils; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * mx * my + (jBase + sj) * mx + i; s_f[sj][si] = f[globalIdx]; } __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { s_f[sj][si-4] = s_f[sj][si+mx-5]; s_f[sj][si+mx] = s_f[sj][si+1]; } } __syncthreads(); for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * mx * my + (jBase + sj) * mx + i; df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) + c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) + c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) + c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } } // ------------- // y derivatives // ------------- __global__ void derivative_y(float *f, float *df) { __shared__ float s_f[my+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = threadIdx.y; int k = blockIdx.y; int si = threadIdx.x; int sj = j + 4; int globalIdx = k * mx * my + j * mx + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); if (j < 4) { s_f[sj-4][si] = s_f[sj+my-5][si]; s_f[sj+my][si] = s_f[sj+1][si]; } __syncthreads(); df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } // y derivative using a tile of 32x64, // launch with thread block of 32x8 __global__ void derivative_y_lPencils(float *f, float *df) { __shared__ float s_f[my+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int k = blockIdx.y; int si = threadIdx.x; for (int j = threadIdx.y; j < my; j += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sj = j + 4; s_f[sj][si] = f[globalIdx]; } __syncthreads(); int sj = threadIdx.y + 4; if (sj < 8) { s_f[sj-4][si] = s_f[sj+my-5][si]; s_f[sj+my][si] = s_f[sj+1][si]; } __syncthreads(); for (int j = threadIdx.y; j < my; j += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sj = j + 4; df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } } // ------------ // z derivative // ------------ __global__ void derivative_z(float *f, float *df) { __shared__ float s_f[mz+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int k = threadIdx.y; int si = threadIdx.x; int sk = k + 4; // halo offset int globalIdx = k * mx * my + j * mx + i; s_f[sk][si] = f[globalIdx]; __syncthreads(); if (k < 4) { s_f[sk-4][si] = s_f[sk+mz-5][si]; s_f[sk+mz][si] = s_f[sk+1][si]; } __syncthreads(); df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } __global__ void derivative_z_lPencils(float *f, float *df) { __shared__ float s_f[mz+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int si = threadIdx.x; for (int k = threadIdx.y; k < mz; k += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sk = k + 4; s_f[sk][si] = f[globalIdx]; } __syncthreads(); int k = threadIdx.y + 4; if (k < 8) { s_f[k-4][si] = s_f[k+mz-5][si]; s_f[k+mz][si] = s_f[k+1][si]; } __syncthreads(); for (int k = threadIdx.y; k < mz; k += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sk = k + 4; df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } } // Run the kernels for a given dimension. One for sPencils, one for lPencils void runTest(int dimension) { void (*fpDeriv[2])(float*, float*); switch(dimension) { case 0: fpDeriv[0] = derivative_x; fpDeriv[1] = derivative_x_lPencils; break; case 1: fpDeriv[0] = derivative_y; fpDeriv[1] = derivative_y_lPencils; break; case 2: fpDeriv[0] = derivative_z; fpDeriv[1] = derivative_z_lPencils; break; } int sharedDims[3][2][2] = { mx, sPencils, mx, lPencils, sPencils, my, lPencils, my, sPencils, mz, lPencils, mz }; float *f = new float[mx*my*mz]; float *df = new float[mx*my*mz]; float *sol = new float[mx*my*mz]; initInput(f, dimension); initSol(sol, dimension); // device arrays int bytes = mx*my*mz * sizeof(float); float *d_f, *d_df; checkCuda( hipMalloc((void**)&d_f, bytes) ); checkCuda( hipMalloc((void**)&d_df, bytes) ); const int nReps = 20; float milliseconds; hipEvent_t startEvent, stopEvent; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); double error, maxError; printf("%c derivatives\n\n", (char)(0x58 + dimension)); for (int fp = 0; fp < 2; fp++) { checkCuda( hipMemcpy(d_f, f, bytes, hipMemcpyHostToDevice) ); checkCuda( hipMemset(d_df, 0, bytes) ); fpDerivhipLaunchKernelGGL(([fp)], dim3(grid[dimension]([fp)]),dim3(block[dimension]([fp)]), 0, 0, d_f, d_df); // warm up checkCuda( hipEventRecord(startEvent, 0) ); for (int i = 0; i < nReps; i++) fpDerivhipLaunchKernelGGL(([fp)], dim3(grid[dimension]([fp)]),dim3(block[dimension]([fp)]), 0, 0, d_f, d_df); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&milliseconds, startEvent, stopEvent) ); checkCuda( hipMemcpy(df, d_df, bytes, hipMemcpyDeviceToHost) ); checkResults(error, maxError, sol, df); printf(" Using shared memory tile of %d x %d\n", sharedDims[dimension][fp][0], sharedDims[dimension][fp][1]); printf(" RMS error: %e\n", error); printf(" MAX error: %e\n", maxError); printf(" Average time (ms): %f\n", milliseconds / nReps); printf(" Average Bandwidth (GB/s): %f\n\n", 2.f * 1e-6 * mx * my * mz * nReps * sizeof(float) / milliseconds); } checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); checkCuda( hipFree(d_f) ); checkCuda( hipFree(d_df) ); delete [] f; delete [] df; delete [] sol; } // This the main host code for the finite difference // example. The kernels are contained in the derivative_m module int main(void) { // Print device and precision hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, 0) ); printf("\nDevice Name: %s\n", prop.name); printf("Compute Capability: %d.%d\n\n", prop.major, prop.minor); setDerivativeParameters(); // initialize runTest(0); // x derivative runTest(1); // y derivative runTest(2); // z derivative return 0; }
f4fada3b4a99c15a185aa461913ea3331313316b.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } float fx = 1.0f, fy = 1.0f, fz = 1.0f; const int mx = 64, my = 64, mz = 64; // shared memory tiles will be m*-by-*Pencils // sPencils is used when each thread calculates the derivative at one point // lPencils is used for coalescing in y and z where each thread has to // calculate the derivative at mutiple points const int sPencils = 4; // small # pencils const int lPencils = 32; // large # pencils dim3 grid[3][2], block[3][2]; // stencil coefficients __constant__ float c_ax, c_bx, c_cx, c_dx; __constant__ float c_ay, c_by, c_cy, c_dy; __constant__ float c_az, c_bz, c_cz, c_dz; // host routine to set constant data void setDerivativeParameters() { // check to make sure dimensions are integral multiples of sPencils if ((mx % sPencils != 0) || (my %sPencils != 0) || (mz % sPencils != 0)) { printf("'mx', 'my', and 'mz' must be integral multiples of sPencils\n"); exit(1); } if ((mx % lPencils != 0) || (my % lPencils != 0)) { printf("'mx' and 'my' must be multiples of lPencils\n"); exit(1); } // stencil weights (for unit length problem) float dsinv = mx-1.f; float ax = 4.f / 5.f * dsinv; float bx = -1.f / 5.f * dsinv; float cx = 4.f / 105.f * dsinv; float dx = -1.f / 280.f * dsinv; checkCuda( cudaMemcpyToSymbol(c_ax, &ax, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_bx, &bx, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_cx, &cx, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_dx, &dx, sizeof(float), 0, cudaMemcpyHostToDevice) ); dsinv = my-1.f; float ay = 4.f / 5.f * dsinv; float by = -1.f / 5.f * dsinv; float cy = 4.f / 105.f * dsinv; float dy = -1.f / 280.f * dsinv; checkCuda( cudaMemcpyToSymbol(c_ay, &ay, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_by, &by, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_cy, &cy, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_dy, &dy, sizeof(float), 0, cudaMemcpyHostToDevice) ); dsinv = mz-1.f; float az = 4.f / 5.f * dsinv; float bz = -1.f / 5.f * dsinv; float cz = 4.f / 105.f * dsinv; float dz = -1.f / 280.f * dsinv; checkCuda( cudaMemcpyToSymbol(c_az, &az, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_bz, &bz, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_cz, &cz, sizeof(float), 0, cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpyToSymbol(c_dz, &dz, sizeof(float), 0, cudaMemcpyHostToDevice) ); // Execution configurations for small and large pencil tiles grid[0][0] = dim3(my / sPencils, mz, 1); block[0][0] = dim3(mx, sPencils, 1); grid[0][1] = dim3(my / lPencils, mz, 1); block[0][1] = dim3(mx, sPencils, 1); grid[1][0] = dim3(mx / sPencils, mz, 1); block[1][0] = dim3(sPencils, my, 1); grid[1][1] = dim3(mx / lPencils, mz, 1); // we want to use the same number of threads as above, // so when we use lPencils instead of sPencils in one // dimension, we multiply the other by sPencils/lPencils block[1][1] = dim3(lPencils, my * sPencils / lPencils, 1); grid[2][0] = dim3(mx / sPencils, my, 1); block[2][0] = dim3(sPencils, mz, 1); grid[2][1] = dim3(mx / lPencils, my, 1); block[2][1] = dim3(lPencils, mz * sPencils / lPencils, 1); } void initInput(float *f, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { switch (dim) { case 0: f[k*mx*my+j*mx+i] = cos(fx*twopi*(i-1.f)/(mx-1.f)); break; case 1: f[k*mx*my+j*mx+i] = cos(fy*twopi*(j-1.f)/(my-1.f)); break; case 2: f[k*mx*my+j*mx+i] = cos(fz*twopi*(k-1.f)/(mz-1.f)); break; } } } } } void initSol(float *sol, int dim) { const float twopi = 8.f * (float)atan(1.0); for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { switch (dim) { case 0: sol[k*mx*my+j*mx+i] = -fx*twopi*sin(fx*twopi*(i-1.f)/(mx-1.f)); break; case 1: sol[k*mx*my+j*mx+i] = -fy*twopi*sin(fy*twopi*(j-1.f)/(my-1.f)); break; case 2: sol[k*mx*my+j*mx+i] = -fz*twopi*sin(fz*twopi*(k-1.f)/(mz-1.f)); break; } } } } } void checkResults(double &error, double &maxError, float *sol, float *df) { // error = sqrt(sum((sol-df)**2)/(mx*my*mz)) // maxError = maxval(abs(sol-df)) maxError = 0; error = 0; for (int k = 0; k < mz; k++) { for (int j = 0; j < my; j++) { for (int i = 0; i < mx; i++) { float s = sol[k*mx*my+j*mx+i]; float f = df[k*mx*my+j*mx+i]; //printf("%d %d %d: %f %f\n", i, j, k, s, f); error += (s-f)*(s-f); if (fabs(s-f) > maxError) maxError = fabs(s-f); } } } error = sqrt(error / (mx*my*mz)); } // ------------- // x derivatives // ------------- __global__ void derivative_x(float *f, float *df) { __shared__ float s_f[sPencils][mx+8]; // 4-wide halo int i = threadIdx.x; int j = blockIdx.x*blockDim.y + threadIdx.y; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset int sj = threadIdx.y; // local j for shared memory access int globalIdx = k * mx * my + j * mx + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { s_f[sj][si-4] = s_f[sj][si+mx-5]; s_f[sj][si+mx] = s_f[sj][si+1]; } __syncthreads(); df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) + c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) + c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) + c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } // this version uses a 64x32 shared memory tile, // still with 64*sPencils threads __global__ void derivative_x_lPencils(float *f, float *df) { __shared__ float s_f[lPencils][mx+8]; // 4-wide halo int i = threadIdx.x; int jBase = blockIdx.x*lPencils; int k = blockIdx.y; int si = i + 4; // local i for shared memory access + halo offset for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * mx * my + (jBase + sj) * mx + i; s_f[sj][si] = f[globalIdx]; } __syncthreads(); // fill in periodic images in shared memory array if (i < 4) { for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { s_f[sj][si-4] = s_f[sj][si+mx-5]; s_f[sj][si+mx] = s_f[sj][si+1]; } } __syncthreads(); for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) { int globalIdx = k * mx * my + (jBase + sj) * mx + i; df[globalIdx] = ( c_ax * ( s_f[sj][si+1] - s_f[sj][si-1] ) + c_bx * ( s_f[sj][si+2] - s_f[sj][si-2] ) + c_cx * ( s_f[sj][si+3] - s_f[sj][si-3] ) + c_dx * ( s_f[sj][si+4] - s_f[sj][si-4] ) ); } } // ------------- // y derivatives // ------------- __global__ void derivative_y(float *f, float *df) { __shared__ float s_f[my+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = threadIdx.y; int k = blockIdx.y; int si = threadIdx.x; int sj = j + 4; int globalIdx = k * mx * my + j * mx + i; s_f[sj][si] = f[globalIdx]; __syncthreads(); if (j < 4) { s_f[sj-4][si] = s_f[sj+my-5][si]; s_f[sj+my][si] = s_f[sj+1][si]; } __syncthreads(); df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } // y derivative using a tile of 32x64, // launch with thread block of 32x8 __global__ void derivative_y_lPencils(float *f, float *df) { __shared__ float s_f[my+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int k = blockIdx.y; int si = threadIdx.x; for (int j = threadIdx.y; j < my; j += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sj = j + 4; s_f[sj][si] = f[globalIdx]; } __syncthreads(); int sj = threadIdx.y + 4; if (sj < 8) { s_f[sj-4][si] = s_f[sj+my-5][si]; s_f[sj+my][si] = s_f[sj+1][si]; } __syncthreads(); for (int j = threadIdx.y; j < my; j += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sj = j + 4; df[globalIdx] = ( c_ay * ( s_f[sj+1][si] - s_f[sj-1][si] ) + c_by * ( s_f[sj+2][si] - s_f[sj-2][si] ) + c_cy * ( s_f[sj+3][si] - s_f[sj-3][si] ) + c_dy * ( s_f[sj+4][si] - s_f[sj-4][si] ) ); } } // ------------ // z derivative // ------------ __global__ void derivative_z(float *f, float *df) { __shared__ float s_f[mz+8][sPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int k = threadIdx.y; int si = threadIdx.x; int sk = k + 4; // halo offset int globalIdx = k * mx * my + j * mx + i; s_f[sk][si] = f[globalIdx]; __syncthreads(); if (k < 4) { s_f[sk-4][si] = s_f[sk+mz-5][si]; s_f[sk+mz][si] = s_f[sk+1][si]; } __syncthreads(); df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } __global__ void derivative_z_lPencils(float *f, float *df) { __shared__ float s_f[mz+8][lPencils]; int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y; int si = threadIdx.x; for (int k = threadIdx.y; k < mz; k += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sk = k + 4; s_f[sk][si] = f[globalIdx]; } __syncthreads(); int k = threadIdx.y + 4; if (k < 8) { s_f[k-4][si] = s_f[k+mz-5][si]; s_f[k+mz][si] = s_f[k+1][si]; } __syncthreads(); for (int k = threadIdx.y; k < mz; k += blockDim.y) { int globalIdx = k * mx * my + j * mx + i; int sk = k + 4; df[globalIdx] = ( c_az * ( s_f[sk+1][si] - s_f[sk-1][si] ) + c_bz * ( s_f[sk+2][si] - s_f[sk-2][si] ) + c_cz * ( s_f[sk+3][si] - s_f[sk-3][si] ) + c_dz * ( s_f[sk+4][si] - s_f[sk-4][si] ) ); } } // Run the kernels for a given dimension. One for sPencils, one for lPencils void runTest(int dimension) { void (*fpDeriv[2])(float*, float*); switch(dimension) { case 0: fpDeriv[0] = derivative_x; fpDeriv[1] = derivative_x_lPencils; break; case 1: fpDeriv[0] = derivative_y; fpDeriv[1] = derivative_y_lPencils; break; case 2: fpDeriv[0] = derivative_z; fpDeriv[1] = derivative_z_lPencils; break; } int sharedDims[3][2][2] = { mx, sPencils, mx, lPencils, sPencils, my, lPencils, my, sPencils, mz, lPencils, mz }; float *f = new float[mx*my*mz]; float *df = new float[mx*my*mz]; float *sol = new float[mx*my*mz]; initInput(f, dimension); initSol(sol, dimension); // device arrays int bytes = mx*my*mz * sizeof(float); float *d_f, *d_df; checkCuda( cudaMalloc((void**)&d_f, bytes) ); checkCuda( cudaMalloc((void**)&d_df, bytes) ); const int nReps = 20; float milliseconds; cudaEvent_t startEvent, stopEvent; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); double error, maxError; printf("%c derivatives\n\n", (char)(0x58 + dimension)); for (int fp = 0; fp < 2; fp++) { checkCuda( cudaMemcpy(d_f, f, bytes, cudaMemcpyHostToDevice) ); checkCuda( cudaMemset(d_df, 0, bytes) ); fpDeriv[fp]<<<grid[dimension][fp],block[dimension][fp]>>>(d_f, d_df); // warm up checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < nReps; i++) fpDeriv[fp]<<<grid[dimension][fp],block[dimension][fp]>>>(d_f, d_df); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&milliseconds, startEvent, stopEvent) ); checkCuda( cudaMemcpy(df, d_df, bytes, cudaMemcpyDeviceToHost) ); checkResults(error, maxError, sol, df); printf(" Using shared memory tile of %d x %d\n", sharedDims[dimension][fp][0], sharedDims[dimension][fp][1]); printf(" RMS error: %e\n", error); printf(" MAX error: %e\n", maxError); printf(" Average time (ms): %f\n", milliseconds / nReps); printf(" Average Bandwidth (GB/s): %f\n\n", 2.f * 1e-6 * mx * my * mz * nReps * sizeof(float) / milliseconds); } checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); checkCuda( cudaFree(d_f) ); checkCuda( cudaFree(d_df) ); delete [] f; delete [] df; delete [] sol; } // This the main host code for the finite difference // example. The kernels are contained in the derivative_m module int main(void) { // Print device and precision cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, 0) ); printf("\nDevice Name: %s\n", prop.name); printf("Compute Capability: %d.%d\n\n", prop.major, prop.minor); setDerivativeParameters(); // initialize runTest(0); // x derivative runTest(1); // y derivative runTest(2); // z derivative return 0; }
df5c6678ec8de60614cd545da3e9f6eb6f93bae2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <sys/time.h> #include <cfloat> #include <chrono> #include <climits> #include <cstddef> #include <cstdint> #include <cstdio> #include <cstdlib> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M *elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M *elements * per array. */ #ifndef STREAM_ARRAY_SIZE #define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES <= 1 #define NTIMES 10 #endif #endif #ifndef NTIMES #define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some * systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example *is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) *above. * * The preprocessor directive "TUNED" does not do much -- it simply causes *the * code to call separate functions to execute each kernel. Trivial *versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to [email protected] * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, *memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x, y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif #ifndef TEAM_NUM #define TEAM_NUM 4 #endif #ifndef THREAD_LIMIT #define THREAD_LIMIT 512 #endif #ifndef WINDOW_SIZE #define WINDOW_SIZE 4 #endif #ifndef DEVICE_ID #define DEVICE_ID 0 #endif #define cudaErrorCheck(call) \ do { \ hipError_t cuErr = call; \ if (hipSuccess != cuErr) { \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, \ hipGetErrorString(cuErr)); \ exit(0); \ } \ } while (0) // static long thread_limit_in_team = ((long)STREAM_ARRAY_SIZE / (long)TEAM_NUM) // > 1024l ? 1024l : ((long)STREAM_ARRAY_SIZE / (long)TEAM_NUM); static STREAM_TYPE a[STREAM_ARRAY_SIZE + OFFSET], b[STREAM_ARRAY_SIZE + OFFSET], c[STREAM_ARRAY_SIZE + OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX}; static char* label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = {2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE}; extern double mysecond(); extern void checkSTREAMresults(); #ifdef SHADOW_MEMORY #define EMPTY 0x0000000000000000 class ShadowMemory { private: static unsigned int offsetPatterns[4]; unsigned long long int bits[WINDOW_SIZE]; public: ShadowMemory() { for (unsigned i = 0; i < WINDOW_SIZE; i++) { bits[i] = EMPTY; } } friend __device__ __noinline__ void insertSM(ShadowMemory* const s, ptrdiff_t address, unsigned int threadID, bool isWrite, unsigned int size); unsigned int getThreadID(unsigned index) { return (unsigned int)(this->bits[index] >> 48); } unsigned long long int getClock(unsigned index) { return (this->bits[index] >> 6) & 0x000003FFFFFFFFFF; } bool isWrite(unsigned index) { return (unsigned int)((this->bits[index] >> 5) & 0x0000000000000001) == 0x0000000000000001 ? true : false; } unsigned int getAccessSize(unsigned index) { unsigned long patternIndex = (this->bits[index] >> 3) & 0x0000000000000003; return offsetPatterns[patternIndex]; } unsigned int getAddressOffset(unsigned index) { return (unsigned int)(this->bits[index] & 0x0000000000000007); } void outputSM() { printf(HLINE); for (unsigned i = 0; i < WINDOW_SIZE; i++) { printf( "Thread ID = %d, Clock = %lld, Access mode = %s, Access size = " "%d, Offset = %d\n", getThreadID(i), getClock(i), isWrite(i) ? "write" : "read", getAccessSize(i), getAddressOffset(i)); } printf(HLINE); } }; unsigned int ShadowMemory::offsetPatterns[] = {1, 2, 4, 8}; unsigned int smSize = ((unsigned int)STREAM_ARRAY_SIZE * sizeof(STREAM_TYPE) + 7) / 8; ShadowMemory* sa = new ShadowMemory[smSize]; ShadowMemory* sb = new ShadowMemory[smSize]; ShadowMemory* sc = new ShadowMemory[smSize]; // omp_lock_t lock_sa, lock_sb, lock_sc; __device__ __noinline__ void insertSM(ShadowMemory* const s, ptrdiff_t address, unsigned int threadID, bool isWrite, unsigned int size) { unsigned int index = address / 8; unsigned int offset = address % 8; unsigned int clock = 0xC0DA; unsigned int encodedSize = 0; while (!(size & 0x0000000000000001)) { encodedSize++; size >>= 1; } unsigned long long int bit = 0x0000000000000000; bit |= (threadID & 0x000000000000FFFF); bit <<= 42; bit |= (clock & 0x000003FFFFFFFFFF); bit <<= 1; bit |= (isWrite ? 0x0000000000000001 : 0x0000000000000000); bit <<= 2; bit |= encodedSize; bit <<= 3; bit |= (offset & 0x0000000000000007); // unsigned int nextAvail; // // nextAvail = s[index].nextAvail; // s[index].bits[nextAvail] = bit; // nextAvail = (nextAvail + 1) % WINDOW_SIZE; // s[index].nextAvail = nextAvail; #ifdef USE_CAS unsigned nextIndex = WINDOW_SIZE; for (unsigned i = 0; i < WINDOW_SIZE; i++) { unsigned long long int temp; temp = s[index].bits[i]; if (temp == EMPTY && nextIndex == WINDOW_SIZE) { nextIndex = i; } } if (nextIndex == WINDOW_SIZE) { nextIndex = (address >> 3) % WINDOW_SIZE; } atomicExch(&s[index].bits[nextIndex], bit); #else unsigned nextIndex = WINDOW_SIZE; for (unsigned i = 0; i < WINDOW_SIZE; i++) { unsigned long long int temp; temp = s[index].bits[i]; if (temp == EMPTY && nextIndex == WINDOW_SIZE) { nextIndex = i; } } if (nextIndex == WINDOW_SIZE) { nextIndex = (address >> 3) % WINDOW_SIZE; } *(volatile long long int*)(&s[index].bits[nextIndex])=bit; #endif } #endif int checktick(); #ifdef SHADOW_MEMORY void checkShadowMemory(); #endif STREAM_TYPE scalar = 3.0; #ifdef SHADOW_MEMORY __global__ void stream_copy(STREAM_TYPE* dst, STREAM_TYPE* src, ShadowMemory* dstSM, ShadowMemory* srcSM, unsigned size) { #else __global__ void stream_copy(STREAM_TYPE* dst, STREAM_TYPE* src, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = src[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(srcSM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while(tid < size) { // dst[tid] = src[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_scale(STREAM_TYPE* dst, STREAM_TYPE* src, ShadowMemory* dstSM, ShadowMemory* srcSM, STREAM_TYPE scalar, unsigned size) { #else __global__ void stream_scale(STREAM_TYPE* dst, STREAM_TYPE* src, STREAM_TYPE scalar, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = scalar * src[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(srcSM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = scalar * src[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_add(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, ShadowMemory* dstSM, ShadowMemory* op1SM, ShadowMemory* op2SM, unsigned size) { #else __global__ void stream_add(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = op1[index] + op2[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(op1SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(op2SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = op1[tid] + op2[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_triad(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, ShadowMemory* dstSM, ShadowMemory* op1SM, ShadowMemory* op2SM, STREAM_TYPE scalar, unsigned size) { #else __global__ void stream_triad(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, STREAM_TYPE scalar, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = op1[index] + scalar * op2[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(op1SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(op2SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = op1[tid] + scalar * op2[tid]; // tid += gridDim.x * blockDim.x; //} } int main() { int quantum; int BytesPerWord; int k; ssize_t j; double t, times[4][NTIMES]; // if (sizeof(STREAM_TYPE) < 8) { // printf( //"Due to the limitation on GPU, we currently only support 64 bit " //"STREAM_TYPE"); // exit(1); //} /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf( " It appears that you set the preprocessor variable N when " "compiling this code.\n"); printf( " This version of the code uses the preprocesor variable " "STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n", (unsigned long long)STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n", (unsigned long long)STREAM_ARRAY_SIZE, OFFSET); printf( "Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0), BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0 / 1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.), (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024. / 1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf( " The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); int device_num; cudaErrorCheck(hipGetDeviceCount(&device_num)); printf("Number of Device = %d\n", device_num); if (DEVICE_ID >= device_num) { printf("Invalid device index, vaild index range is 0 - %d.\n", device_num - 1); exit(-1); } cudaErrorCheck(hipSetDevice(DEVICE_ID)); /* Get initial value for system clock. */ for (j = 0; j < STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ((quantum = checktick()) >= 1) printf( "Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf( "Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order of %d microseconds.\n", (int)t); printf(" (= %d clock ticks)\n", (int)(t / quantum)); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ STREAM_TYPE *da, *db, *dc; unsigned size_in_byte = sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE; cudaErrorCheck(hipMalloc(&da, size_in_byte)); cudaErrorCheck(hipMalloc(&db, size_in_byte)); cudaErrorCheck(hipMalloc(&dc, size_in_byte)); cudaErrorCheck(hipMemcpy(da, a, size_in_byte, hipMemcpyHostToDevice)); cudaErrorCheck(hipMemcpy(db, b, size_in_byte, hipMemcpyHostToDevice)); cudaErrorCheck(hipMemcpy(dc, c, size_in_byte, hipMemcpyHostToDevice)); #ifdef SHADOW_MEMORY ShadowMemory *dsa, *dsb, *dsc; unsigned smSizeInByte = sizeof(ShadowMemory) * smSize; cudaErrorCheck(hipMalloc(&dsa, smSizeInByte)); cudaErrorCheck(hipMalloc(&dsb, smSizeInByte)); cudaErrorCheck(hipMalloc(&dsc, smSizeInByte)); cudaErrorCheck(hipMemcpy(dsa, sa, smSizeInByte, hipMemcpyHostToDevice)); cudaErrorCheck(hipMemcpy(dsb, sb, smSizeInByte, hipMemcpyHostToDevice)); cudaErrorCheck(hipMemcpy(dsc, sc, smSizeInByte, hipMemcpyHostToDevice)); #endif #ifdef EXECUTION_TIME auto start_time = std::chrono::high_resolution_clock::now(); #endif for (k = 0; k < NTIMES; k++) { times[0][k] = mysecond(); #ifdef SHADOW_MEMORY hipLaunchKernelGGL(( stream_copy), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, dc, da, dsc, dsa, STREAM_ARRAY_SIZE); #else hipLaunchKernelGGL(( stream_copy), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, dc, da, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipDeviceSynchronize()); times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef SHADOW_MEMORY hipLaunchKernelGGL(( stream_scale), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, db, dc, dsb, dsc, scalar, STREAM_ARRAY_SIZE); #else hipLaunchKernelGGL(( stream_scale), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, db, dc, scalar, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipDeviceSynchronize()); times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef SHADOW_MEMORY hipLaunchKernelGGL(( stream_add), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, dc, da, db, dsc, dsa, dsb, STREAM_ARRAY_SIZE); #else hipLaunchKernelGGL(( stream_add), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, dc, da, db, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipDeviceSynchronize()); times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef SHADOW_MEMORY hipLaunchKernelGGL(( stream_triad), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, da, db, dc, dsa, dsb, dsc, scalar, STREAM_ARRAY_SIZE); #else hipLaunchKernelGGL(( stream_triad), dim3(TEAM_NUM), dim3(THREAD_LIMIT), 0, 0, da, db, dc, scalar, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(hipGetLastError()); cudaErrorCheck(hipDeviceSynchronize()); times[3][k] = mysecond() - times[3][k]; } #ifdef EXECUTION_TIME auto end_time = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_time = end_time - start_time; printf("overall execution time = %f seconds\n", elapsed_time.count()); #endif cudaErrorCheck(hipMemcpy(a, da, size_in_byte, hipMemcpyDeviceToHost)); cudaErrorCheck(hipMemcpy(b, db, size_in_byte, hipMemcpyDeviceToHost)); cudaErrorCheck(hipMemcpy(c, dc, size_in_byte, hipMemcpyDeviceToHost)); #ifdef SHADOW_MEMORY cudaErrorCheck(hipMemcpy(sa, dsa, smSizeInByte, hipMemcpyDeviceToHost)); cudaErrorCheck(hipMemcpy(sb, dsb, smSizeInByte, hipMemcpyDeviceToHost)); cudaErrorCheck(hipMemcpy(sc, dsc, smSizeInByte, hipMemcpyDeviceToHost)); #endif /* --- SUMMARY --- */ for (k = 1; k < NTIMES; k++) /* note -- skip first iteration */ { for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] / (double)(NTIMES - 1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j] / mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); #ifdef SHADOW_MEMORY checkShadowMemory(); #endif cudaErrorCheck(hipFree(da)); cudaErrorCheck(hipFree(db)); cudaErrorCheck(hipFree(dc)); #ifdef SHADOW_MEMORY cudaErrorCheck(hipFree(dsa)); cudaErrorCheck(hipFree(dsb)); cudaErrorCheck(hipFree(dsc)); #endif return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while (((t2 = mysecond()) - t1) < 1.0E-6) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)(1.0E6 * (timesfound[i] - timesfound[i - 1])); minDelta = MIN(minDelta, MAX(Delta, 0)); } return (minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } // long getTime() { // struct timeval tp; // struct timezone tzp; // int i; // i = gettimeofday(&tp, &tzp); // return ((long)tp.tv_sec * 1000000 + (long)tp.tv_usec); //} #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj, bj, cj, scalar; STREAM_TYPE aSumErr, bSumErr, cSumErr; STREAM_TYPE aAvgErr, bAvgErr, cAvgErr; double epsilon; ssize_t j; int k, ierr, err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { cj = aj; bj = scalar * cj; cj = aj + bj; aj = bj + scalar * cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // // // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n", sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr / aj) > epsilon) { err++; printf( "Failed Vac++ multilinelidation on array a[], AvgRelAbsErr > " "epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", aj, aAvgErr, abs(aAvgErr) / aj); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(a[j] / aj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array a: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, aj, a[j], abs((aj - a[j]) / aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n", ierr); } if (abs(bAvgErr / bj) > epsilon) { err++; printf("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", bj, bAvgErr, abs(bAvgErr) / bj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(b[j] / bj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array b: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, bj, b[j], abs((bj - b[j]) / bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n", ierr); } if (abs(cAvgErr / cj) > epsilon) { err++; printf("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", cj, cAvgErr, abs(cAvgErr) / cj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(c[j] / cj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array c: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, cj, c[j], abs((cj - c[j]) / cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n", ierr); } if (err == 0) { printf( "Solution Validates: avg error less than %e on all three arrays\n", epsilon); } #ifdef VERBOSE printf("Results Validation Verbose Results: \n"); printf(" Expected a(1), b(1), c(1): %f %f %f \n", aj, bj, cj); printf(" Observed a(1), b(1), c(1): %f %f %f \n", a[1], b[1], c[1]); printf(" Rel Errors on a, b, c: %e %e %e \n", abs(aAvgErr / aj), abs(bAvgErr / bj), abs(cAvgErr / cj)); #endif } #ifdef SHADOW_MEMORY void checkShadowMemory() { printf(HLINE); unsigned limit = 5; unsigned stripe = smSize / limit; printf("sa:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sa[i].outputSM(); } printf("sb:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sb[i].outputSM(); } printf("sc:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sc[i].outputSM(); } printf(HLINE); } #endif
df5c6678ec8de60614cd545da3e9f6eb6f93bae2.cu
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <sys/time.h> #include <cfloat> #include <chrono> #include <climits> #include <cstddef> #include <cstdint> #include <cstdio> #include <cstdlib> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M *elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M *elements * per array. */ #ifndef STREAM_ARRAY_SIZE #define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES <= 1 #define NTIMES 10 #endif #endif #ifndef NTIMES #define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some * systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example *is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) *above. * * The preprocessor directive "TUNED" does not do much -- it simply causes *the * code to call separate functions to execute each kernel. Trivial *versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to [email protected] * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, *memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x, y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif #ifndef TEAM_NUM #define TEAM_NUM 4 #endif #ifndef THREAD_LIMIT #define THREAD_LIMIT 512 #endif #ifndef WINDOW_SIZE #define WINDOW_SIZE 4 #endif #ifndef DEVICE_ID #define DEVICE_ID 0 #endif #define cudaErrorCheck(call) \ do { \ cudaError_t cuErr = call; \ if (cudaSuccess != cuErr) { \ printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, \ cudaGetErrorString(cuErr)); \ exit(0); \ } \ } while (0) // static long thread_limit_in_team = ((long)STREAM_ARRAY_SIZE / (long)TEAM_NUM) // > 1024l ? 1024l : ((long)STREAM_ARRAY_SIZE / (long)TEAM_NUM); static STREAM_TYPE a[STREAM_ARRAY_SIZE + OFFSET], b[STREAM_ARRAY_SIZE + OFFSET], c[STREAM_ARRAY_SIZE + OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX}; static char* label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = {2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE}; extern double mysecond(); extern void checkSTREAMresults(); #ifdef SHADOW_MEMORY #define EMPTY 0x0000000000000000 class ShadowMemory { private: static unsigned int offsetPatterns[4]; unsigned long long int bits[WINDOW_SIZE]; public: ShadowMemory() { for (unsigned i = 0; i < WINDOW_SIZE; i++) { bits[i] = EMPTY; } } friend __device__ __noinline__ void insertSM(ShadowMemory* const s, ptrdiff_t address, unsigned int threadID, bool isWrite, unsigned int size); unsigned int getThreadID(unsigned index) { return (unsigned int)(this->bits[index] >> 48); } unsigned long long int getClock(unsigned index) { return (this->bits[index] >> 6) & 0x000003FFFFFFFFFF; } bool isWrite(unsigned index) { return (unsigned int)((this->bits[index] >> 5) & 0x0000000000000001) == 0x0000000000000001 ? true : false; } unsigned int getAccessSize(unsigned index) { unsigned long patternIndex = (this->bits[index] >> 3) & 0x0000000000000003; return offsetPatterns[patternIndex]; } unsigned int getAddressOffset(unsigned index) { return (unsigned int)(this->bits[index] & 0x0000000000000007); } void outputSM() { printf(HLINE); for (unsigned i = 0; i < WINDOW_SIZE; i++) { printf( "Thread ID = %d, Clock = %lld, Access mode = %s, Access size = " "%d, Offset = %d\n", getThreadID(i), getClock(i), isWrite(i) ? "write" : "read", getAccessSize(i), getAddressOffset(i)); } printf(HLINE); } }; unsigned int ShadowMemory::offsetPatterns[] = {1, 2, 4, 8}; unsigned int smSize = ((unsigned int)STREAM_ARRAY_SIZE * sizeof(STREAM_TYPE) + 7) / 8; ShadowMemory* sa = new ShadowMemory[smSize]; ShadowMemory* sb = new ShadowMemory[smSize]; ShadowMemory* sc = new ShadowMemory[smSize]; // omp_lock_t lock_sa, lock_sb, lock_sc; __device__ __noinline__ void insertSM(ShadowMemory* const s, ptrdiff_t address, unsigned int threadID, bool isWrite, unsigned int size) { unsigned int index = address / 8; unsigned int offset = address % 8; unsigned int clock = 0xC0DA; unsigned int encodedSize = 0; while (!(size & 0x0000000000000001)) { encodedSize++; size >>= 1; } unsigned long long int bit = 0x0000000000000000; bit |= (threadID & 0x000000000000FFFF); bit <<= 42; bit |= (clock & 0x000003FFFFFFFFFF); bit <<= 1; bit |= (isWrite ? 0x0000000000000001 : 0x0000000000000000); bit <<= 2; bit |= encodedSize; bit <<= 3; bit |= (offset & 0x0000000000000007); // unsigned int nextAvail; // // nextAvail = s[index].nextAvail; // s[index].bits[nextAvail] = bit; // nextAvail = (nextAvail + 1) % WINDOW_SIZE; // s[index].nextAvail = nextAvail; #ifdef USE_CAS unsigned nextIndex = WINDOW_SIZE; for (unsigned i = 0; i < WINDOW_SIZE; i++) { unsigned long long int temp; temp = s[index].bits[i]; if (temp == EMPTY && nextIndex == WINDOW_SIZE) { nextIndex = i; } } if (nextIndex == WINDOW_SIZE) { nextIndex = (address >> 3) % WINDOW_SIZE; } atomicExch(&s[index].bits[nextIndex], bit); #else unsigned nextIndex = WINDOW_SIZE; for (unsigned i = 0; i < WINDOW_SIZE; i++) { unsigned long long int temp; temp = s[index].bits[i]; if (temp == EMPTY && nextIndex == WINDOW_SIZE) { nextIndex = i; } } if (nextIndex == WINDOW_SIZE) { nextIndex = (address >> 3) % WINDOW_SIZE; } *(volatile long long int*)(&s[index].bits[nextIndex])=bit; #endif } #endif int checktick(); #ifdef SHADOW_MEMORY void checkShadowMemory(); #endif STREAM_TYPE scalar = 3.0; #ifdef SHADOW_MEMORY __global__ void stream_copy(STREAM_TYPE* dst, STREAM_TYPE* src, ShadowMemory* dstSM, ShadowMemory* srcSM, unsigned size) { #else __global__ void stream_copy(STREAM_TYPE* dst, STREAM_TYPE* src, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = src[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(srcSM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while(tid < size) { // dst[tid] = src[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_scale(STREAM_TYPE* dst, STREAM_TYPE* src, ShadowMemory* dstSM, ShadowMemory* srcSM, STREAM_TYPE scalar, unsigned size) { #else __global__ void stream_scale(STREAM_TYPE* dst, STREAM_TYPE* src, STREAM_TYPE scalar, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = scalar * src[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(srcSM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = scalar * src[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_add(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, ShadowMemory* dstSM, ShadowMemory* op1SM, ShadowMemory* op2SM, unsigned size) { #else __global__ void stream_add(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = op1[index] + op2[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(op1SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(op2SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = op1[tid] + op2[tid]; // tid += gridDim.x * blockDim.x; //} } #ifdef SHADOW_MEMORY __global__ void stream_triad(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, ShadowMemory* dstSM, ShadowMemory* op1SM, ShadowMemory* op2SM, STREAM_TYPE scalar, unsigned size) { #else __global__ void stream_triad(STREAM_TYPE* dst, STREAM_TYPE* op1, STREAM_TYPE* op2, STREAM_TYPE scalar, unsigned size) { #endif unsigned chunk = size / gridDim.x; unsigned remain = size % gridDim.x; unsigned start = chunk * blockIdx.x; unsigned end = start + chunk; if (blockIdx.x < remain) { start += blockIdx.x; end = start + chunk + 1; } unsigned index = start + threadIdx.x; while (index < end) { dst[index] = op1[index] + scalar * op2[index]; #ifdef SHADOW_MEMORY unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; insertSM(op1SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(op2SM, index * sizeof(STREAM_TYPE), tid, false, sizeof(STREAM_TYPE)); insertSM(dstSM, index * sizeof(STREAM_TYPE), tid, true, sizeof(STREAM_TYPE)); #endif index += blockDim.x; } // unsigned tid = blockDim.x * blockIdx.x + threadIdx.x; // while (tid < size) { // dst[tid] = op1[tid] + scalar * op2[tid]; // tid += gridDim.x * blockDim.x; //} } int main() { int quantum; int BytesPerWord; int k; ssize_t j; double t, times[4][NTIMES]; // if (sizeof(STREAM_TYPE) < 8) { // printf( //"Due to the limitation on GPU, we currently only support 64 bit " //"STREAM_TYPE"); // exit(1); //} /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf( " It appears that you set the preprocessor variable N when " "compiling this code.\n"); printf( " This version of the code uses the preprocesor variable " "STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n", (unsigned long long)STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n", (unsigned long long)STREAM_ARRAY_SIZE, OFFSET); printf( "Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0), BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0 / 1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.), (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024. / 1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf( " The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); int device_num; cudaErrorCheck(cudaGetDeviceCount(&device_num)); printf("Number of Device = %d\n", device_num); if (DEVICE_ID >= device_num) { printf("Invalid device index, vaild index range is 0 - %d.\n", device_num - 1); exit(-1); } cudaErrorCheck(cudaSetDevice(DEVICE_ID)); /* Get initial value for system clock. */ for (j = 0; j < STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ((quantum = checktick()) >= 1) printf( "Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf( "Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order of %d microseconds.\n", (int)t); printf(" (= %d clock ticks)\n", (int)(t / quantum)); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ STREAM_TYPE *da, *db, *dc; unsigned size_in_byte = sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE; cudaErrorCheck(cudaMalloc(&da, size_in_byte)); cudaErrorCheck(cudaMalloc(&db, size_in_byte)); cudaErrorCheck(cudaMalloc(&dc, size_in_byte)); cudaErrorCheck(cudaMemcpy(da, a, size_in_byte, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaMemcpy(db, b, size_in_byte, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaMemcpy(dc, c, size_in_byte, cudaMemcpyHostToDevice)); #ifdef SHADOW_MEMORY ShadowMemory *dsa, *dsb, *dsc; unsigned smSizeInByte = sizeof(ShadowMemory) * smSize; cudaErrorCheck(cudaMalloc(&dsa, smSizeInByte)); cudaErrorCheck(cudaMalloc(&dsb, smSizeInByte)); cudaErrorCheck(cudaMalloc(&dsc, smSizeInByte)); cudaErrorCheck(cudaMemcpy(dsa, sa, smSizeInByte, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaMemcpy(dsb, sb, smSizeInByte, cudaMemcpyHostToDevice)); cudaErrorCheck(cudaMemcpy(dsc, sc, smSizeInByte, cudaMemcpyHostToDevice)); #endif #ifdef EXECUTION_TIME auto start_time = std::chrono::high_resolution_clock::now(); #endif for (k = 0; k < NTIMES; k++) { times[0][k] = mysecond(); #ifdef SHADOW_MEMORY stream_copy<<<TEAM_NUM, THREAD_LIMIT>>>(dc, da, dsc, dsa, STREAM_ARRAY_SIZE); #else stream_copy<<<TEAM_NUM, THREAD_LIMIT>>>(dc, da, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaDeviceSynchronize()); times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef SHADOW_MEMORY stream_scale<<<TEAM_NUM, THREAD_LIMIT>>>(db, dc, dsb, dsc, scalar, STREAM_ARRAY_SIZE); #else stream_scale<<<TEAM_NUM, THREAD_LIMIT>>>(db, dc, scalar, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaDeviceSynchronize()); times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef SHADOW_MEMORY stream_add<<<TEAM_NUM, THREAD_LIMIT>>>(dc, da, db, dsc, dsa, dsb, STREAM_ARRAY_SIZE); #else stream_add<<<TEAM_NUM, THREAD_LIMIT>>>(dc, da, db, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaDeviceSynchronize()); times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef SHADOW_MEMORY stream_triad<<<TEAM_NUM, THREAD_LIMIT>>>(da, db, dc, dsa, dsb, dsc, scalar, STREAM_ARRAY_SIZE); #else stream_triad<<<TEAM_NUM, THREAD_LIMIT>>>(da, db, dc, scalar, STREAM_ARRAY_SIZE); #endif cudaErrorCheck(cudaGetLastError()); cudaErrorCheck(cudaDeviceSynchronize()); times[3][k] = mysecond() - times[3][k]; } #ifdef EXECUTION_TIME auto end_time = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed_time = end_time - start_time; printf("overall execution time = %f seconds\n", elapsed_time.count()); #endif cudaErrorCheck(cudaMemcpy(a, da, size_in_byte, cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaMemcpy(b, db, size_in_byte, cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaMemcpy(c, dc, size_in_byte, cudaMemcpyDeviceToHost)); #ifdef SHADOW_MEMORY cudaErrorCheck(cudaMemcpy(sa, dsa, smSizeInByte, cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaMemcpy(sb, dsb, smSizeInByte, cudaMemcpyDeviceToHost)); cudaErrorCheck(cudaMemcpy(sc, dsc, smSizeInByte, cudaMemcpyDeviceToHost)); #endif /* --- SUMMARY --- */ for (k = 1; k < NTIMES; k++) /* note -- skip first iteration */ { for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] / (double)(NTIMES - 1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j] / mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); #ifdef SHADOW_MEMORY checkShadowMemory(); #endif cudaErrorCheck(cudaFree(da)); cudaErrorCheck(cudaFree(db)); cudaErrorCheck(cudaFree(dc)); #ifdef SHADOW_MEMORY cudaErrorCheck(cudaFree(dsa)); cudaErrorCheck(cudaFree(dsb)); cudaErrorCheck(cudaFree(dsc)); #endif return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while (((t2 = mysecond()) - t1) < 1.0E-6) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)(1.0E6 * (timesfound[i] - timesfound[i - 1])); minDelta = MIN(minDelta, MAX(Delta, 0)); } return (minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } // long getTime() { // struct timeval tp; // struct timezone tzp; // int i; // i = gettimeofday(&tp, &tzp); // return ((long)tp.tv_sec * 1000000 + (long)tp.tv_usec); //} #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj, bj, cj, scalar; STREAM_TYPE aSumErr, bSumErr, cSumErr; STREAM_TYPE aAvgErr, bAvgErr, cAvgErr; double epsilon; ssize_t j; int k, ierr, err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { cj = aj; bj = scalar * cj; cj = aj + bj; aj = bj + scalar * cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // // // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n", sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr / aj) > epsilon) { err++; printf( "Failed Vac++ multilinelidation on array a[], AvgRelAbsErr > " "epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", aj, aAvgErr, abs(aAvgErr) / aj); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(a[j] / aj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array a: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, aj, a[j], abs((aj - a[j]) / aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n", ierr); } if (abs(bAvgErr / bj) > epsilon) { err++; printf("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", bj, bAvgErr, abs(bAvgErr) / bj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(b[j] / bj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array b: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, bj, b[j], abs((bj - b[j]) / bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n", ierr); } if (abs(cAvgErr / cj) > epsilon) { err++; printf("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", cj, cAvgErr, abs(cAvgErr) / cj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(c[j] / cj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf( " array c: index: %ld, expected: %e, observed: " "%e, relative error: %e\n", j, cj, c[j], abs((cj - c[j]) / cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n", ierr); } if (err == 0) { printf( "Solution Validates: avg error less than %e on all three arrays\n", epsilon); } #ifdef VERBOSE printf("Results Validation Verbose Results: \n"); printf(" Expected a(1), b(1), c(1): %f %f %f \n", aj, bj, cj); printf(" Observed a(1), b(1), c(1): %f %f %f \n", a[1], b[1], c[1]); printf(" Rel Errors on a, b, c: %e %e %e \n", abs(aAvgErr / aj), abs(bAvgErr / bj), abs(cAvgErr / cj)); #endif } #ifdef SHADOW_MEMORY void checkShadowMemory() { printf(HLINE); unsigned limit = 5; unsigned stripe = smSize / limit; printf("sa:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sa[i].outputSM(); } printf("sb:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sb[i].outputSM(); } printf("sc:\n"); for (unsigned i = 0; i < smSize; i += stripe) { sc[i].outputSM(); } printf(HLINE); } #endif
7a70b2c9678eb109c870cfba923aa21bfa76269d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudart/hip/hip_vector_types.h" #include "cudart/cuda_math.h" #include "Common.h" #define INFINITY ((float)(1e+300 * 1e+300)) __device__ __constant__ int screen_width; __device__ __constant__ int screen_pitch; __device__ __constant__ int screen_height; #include "Util.h" #include "Material.h" #include "Sky.h" #include "Random.h" #include "RayCone.h" __device__ __constant__ Settings settings; // Frame Buffers __device__ __constant__ float4 * frame_buffer_albedo; __device__ __constant__ float4 * frame_buffer_direct; __device__ __constant__ float4 * frame_buffer_indirect; // Final Frame Buffer, shared with OpenGL __device__ __constant__ Surface<float4> accumulator; #include "Raytracing/BVH.h" #include "SVGF/SVGF.h" #include "SVGF/TAA.h" struct Camera { float3 position; float3 bottom_left_corner; float3 x_axis; float3 y_axis; float pixel_spread_angle; float aperture_radius; float focal_distance; } __device__ __constant__ camera; __device__ PixelQuery pixel_query = { INVALID, INVALID, INVALID, INVALID }; extern "C" __global__ void kernel_generate( int rand_seed, int sample_index, int pixel_offset, int pixel_count ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= pixel_count) return; int index_offset = index + pixel_offset; int x = index_offset % screen_width; int y = index_offset / screen_width; unsigned seed = wang_hash(index_offset ^ rand_seed); int pixel_index = x + y * screen_pitch; ASSERT(pixel_index < screen_pitch * screen_height, "Pixel should fit inside the buffer"); float u0 = random_float_xorshift(seed); float u1 = random_float_xorshift(seed); float u2 = random_float_heitz(x, y, sample_index, 0, 0, seed); float u3 = random_float_heitz(x, y, sample_index, 0, 1, seed); float2 jitter; if (settings.enable_svgf) { jitter.x = taa_halton_x[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; jitter.y = taa_halton_y[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; } else { switch (settings.reconstruction_filter) { case ReconstructionFilter::BOX: { jitter.x = u1; jitter.y = u2; break; } case ReconstructionFilter::GAUSSIAN: { float2 gaussians = box_muller(u1, u2); jitter.x = 0.5f + 0.5f * gaussians.x; jitter.y = 0.5f + 0.5f * gaussians.y; break; } } } float x_jittered = float(x) + jitter.x; float y_jittered = float(y) + jitter.y; float3 focal_point = camera.focal_distance * normalize(camera.bottom_left_corner + x_jittered * camera.x_axis + y_jittered * camera.y_axis); float2 lens_point = camera.aperture_radius * random_point_in_regular_n_gon<5>(u2, u3); float3 offset = camera.x_axis * lens_point.x + camera.y_axis * lens_point.y; float3 direction = normalize(focal_point - offset); // Create primary Ray that starts at the Camera's position and goes through the current pixel ray_buffer_trace.origin .set(index, camera.position + offset); ray_buffer_trace.direction.set(index, direction); ray_buffer_trace.pixel_index_and_mis_eligable[index] = pixel_index | (false << 31); } extern "C" __global__ void kernel_trace(int bounce) { bvh_trace(buffer_sizes.trace[bounce], &buffer_sizes.rays_retired[bounce]); } extern "C" __global__ void kernel_sort(int rand_seed, int bounce) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.trace[bounce]) return; float3 ray_origin = ray_buffer_trace.origin .get(index); float3 ray_direction = ray_buffer_trace.direction.get(index); RayHit hit = ray_buffer_trace.hits.get(index); unsigned ray_pixel_index_and_mis_eligable = ray_buffer_trace.pixel_index_and_mis_eligable[index]; int ray_pixel_index = ray_pixel_index_and_mis_eligable & ~(0b11 << 31); int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; bool mis_eligable = ray_pixel_index_and_mis_eligable >> 31; float3 ray_throughput; if (bounce <= 1) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_trace.throughput.get(index); } // If we didn't hit anything, sample the Sky if (hit.triangle_id == INVALID) { float3 illumination = ray_throughput * sample_sky(ray_direction); if (bounce == 0) { if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } frame_buffer_direct[ray_pixel_index] = make_float4(illumination); } else if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } return; } // Get the Material of the Mesh we hit int material_id = mesh_get_material_id(hit.mesh_id); MaterialType material_type = material_get_type(material_id); if (bounce == 0 && pixel_query.pixel_index == ray_pixel_index) { pixel_query.mesh_id = hit.mesh_id; pixel_query.triangle_id = hit.triangle_id; pixel_query.material_id = material_id; } if (material_type == MaterialType::LIGHT) { // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(hit.triangle_id); float3 light_point; float3 light_normal; triangle_barycentric(light, hit.u, hit.v, light_point, light_normal); float3 light_point_prev = light_point; // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, light_point); matrix3x4_transform_direction(world, light_normal); light_normal = normalize(light_normal); if (bounce == 0 && settings.enable_svgf) { Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, light_point_prev); svgf_set_gbuffers(x, y, hit, light_point, light_normal, light_point_prev); } MaterialLight material_light = material_as_light(material_id); bool should_count_light_contribution = settings.enable_next_event_estimation ? !mis_eligable : true; if (should_count_light_contribution) { float3 illumination = ray_throughput * material_light.emission; if (bounce == 0) { if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } frame_buffer_direct[ray_pixel_index] = make_float4(material_light.emission); } else if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } return; } if (settings.enable_multiple_importance_sampling) { float3 to_light = light_point - ray_origin;; float distance_to_light_squared = dot(to_light, to_light); float distance_to_light = sqrtf(distance_to_light_squared); to_light /= distance_to_light; // Normalize float cos_o = fabsf(dot(to_light, light_normal)); // if (cos_o <= 0.0f) return; float power = material_light.emission.x + material_light.emission.y + material_light.emission.z; float brdf_pdf = ray_buffer_trace.last_pdf[index]; float light_pdf = power * distance_to_light_squared / (cos_o * lights_total_power); float mis_pdf = brdf_pdf + light_pdf; float3 illumination = ray_throughput * material_light.emission * brdf_pdf / mis_pdf; if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } } return; } unsigned seed = wang_hash(index ^ rand_seed); // Russian Roulette if (bounce > 0) { // Throughput does not include albedo so it doesn't need to be demodulated by SVGF (causing precision issues) // This deteriorates Russian Roulette performance, so albedo is included here float3 throughput_with_albedo = ray_throughput * make_float3(frame_buffer_albedo[ray_pixel_index]); float survival_probability = saturate(vmax_max(throughput_with_albedo.x, throughput_with_albedo.y, throughput_with_albedo.z)); if (random_float_xorshift(seed) > survival_probability) { return; } ray_throughput /= survival_probability; } switch (material_type) { case MaterialType::DIFFUSE: { int index_out = atomic_agg_inc(&buffer_sizes.diffuse[bounce]); ray_buffer_shade_diffuse.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_diffuse.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_diffuse.hits.set(index_out, hit); ray_buffer_shade_diffuse.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_diffuse.throughput.set(index_out, ray_throughput); break; } case MaterialType::DIELECTRIC: { int index_out = atomic_agg_inc(&buffer_sizes.dielectric[bounce]); ray_buffer_shade_dielectric_and_glossy.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_dielectric_and_glossy.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_glossy.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.throughput.set(index_out, ray_throughput); break; } case MaterialType::GLOSSY: { // Glossy Material buffer is shared with Dielectric Material buffer but grows in the opposite direction int index_out = (BATCH_SIZE - 1) - atomic_agg_inc(&buffer_sizes.glossy[bounce]); ray_buffer_shade_dielectric_and_glossy.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_dielectric_and_glossy.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_glossy.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.throughput.set(index_out, ray_throughput); break; } } } #if ENABLE_MIPMAPPING __device__ inline float3 sample_albedo( int bounce, const float3 & material_diffuse, int material_texture_id, const RayHit & hit, const TrianglePosNorTex & hit_triangle, const float3 & hit_point_local, const float3 & hit_normal, const float2 & hit_tex_coord, const float3 & ray_direction, const float2 * cone_buffer, int cone_buffer_index, float & cone_angle, float & cone_width ) { float3 albedo; float3 geometric_normal = cross(hit_triangle.position_edge_1, hit_triangle.position_edge_2); float triangle_area_inv = 1.0f / length(geometric_normal); geometric_normal *= triangle_area_inv; // Normalize float mesh_scale = mesh_get_scale(hit.mesh_id); if (bounce == 0) { cone_angle = camera.pixel_spread_angle; cone_width = cone_angle * hit.t; float3 ellipse_axis_1, ellipse_axis_2; ray_cone_get_ellipse_axes(ray_direction, geometric_normal, cone_width, ellipse_axis_1, ellipse_axis_2); float2 gradient_1, gradient_2; ray_cone_get_texture_gradients( mesh_scale, geometric_normal, triangle_area_inv, hit_triangle.position_0, hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.tex_coord_0, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2, hit_point_local, hit_tex_coord, ellipse_axis_1, ellipse_axis_2, gradient_1, gradient_2 ); // Anisotropic sampling albedo = material_get_albedo(material_diffuse, material_texture_id, hit_tex_coord.x, hit_tex_coord.y, gradient_1, gradient_2); } else { float2 cone = cone_buffer[cone_buffer_index]; cone_angle = cone.x; cone_width = cone.y + cone_angle * hit.t; float2 tex_size = texture_get_size(material_texture_id); float lod_triangle = sqrtf(tex_size.x * tex_size.y * triangle_get_lod(mesh_scale, triangle_area_inv, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2)); float lod_ray_cone = ray_cone_get_lod(ray_direction, hit_normal, cone_width); float lod = log2f(lod_triangle * lod_ray_cone); // Trilinear sampling albedo = material_get_albedo(material_diffuse, material_texture_id, hit_tex_coord.x, hit_tex_coord.y, lod); } float curvature = triangle_get_curvature( hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.normal_edge_1, hit_triangle.normal_edge_2 ) / mesh_scale; cone_angle += -2.0f * curvature * fabsf(cone_width / dot(hit_normal, ray_direction)); // Eq. 5 (Akenine-Mller 2021) return albedo; } #endif template<typename BRDFEvaluator> __device__ inline void nee_sample( int x, int y, int bounce, int sample_index, unsigned & seed, const float3 & hit_point, const float3 & hit_normal, const float3 & throughput, BRDFEvaluator brdf_evaluator ) { // Pick random point on random Light float light_u, light_v; int light_mesh_id; int light_id = random_point_on_random_light(x, y, sample_index, bounce, seed, light_u, light_v, light_mesh_id); // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(light_id); float3 light_point; float3 light_normal; triangle_barycentric(light, light_u, light_v, light_point, light_normal); // Transform into world space Matrix3x4 light_world = mesh_get_transform(light_mesh_id); matrix3x4_transform_position (light_world, light_point); matrix3x4_transform_direction(light_world, light_normal); light_normal = normalize(light_normal); float3 to_light = light_point - hit_point; float distance_to_light_squared = dot(to_light, to_light); float distance_to_light = sqrtf(distance_to_light_squared); // Normalize the vector to the light to_light /= distance_to_light; float cos_o = -dot(to_light, light_normal); float cos_i = dot(to_light, hit_normal); // Only trace Shadow Ray if light transport is possible given the normals if (cos_o > 0.0f && cos_i > 0.0f) { int light_material_id = mesh_get_material_id(light_mesh_id); MaterialLight material_light = material_as_light(light_material_id); float power = material_light.emission.x + material_light.emission.y + material_light.emission.z; float brdf_pdf; float brdf = brdf_evaluator(to_light, brdf_pdf); float light_pdf = power * distance_to_light_squared / (cos_o * lights_total_power); float pdf; if (settings.enable_multiple_importance_sampling) { pdf = brdf_pdf + light_pdf; } else { pdf = light_pdf; } float3 illumination = throughput * brdf * material_light.emission * cos_i / pdf; int shadow_ray_index = atomic_agg_inc(&buffer_sizes.shadow[bounce]); ray_buffer_shadow.ray_origin .set(shadow_ray_index, hit_point); ray_buffer_shadow.ray_direction.set(shadow_ray_index, to_light); ray_buffer_shadow.max_distance[shadow_ray_index] = distance_to_light - EPSILON; ray_buffer_shadow.illumination_and_pixel_index[shadow_ray_index] = make_float4( illumination.x, illumination.y, illumination.z, __int_as_float(x + y * screen_pitch) ); } } extern "C" __global__ void kernel_shade_diffuse(int rand_seed, int bounce, int sample_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.diffuse[bounce]) return; float3 ray_direction = ray_buffer_shade_diffuse.direction.get(index); RayHit hit = ray_buffer_shade_diffuse.hits .get(index); int ray_pixel_index = ray_buffer_shade_diffuse.pixel_index[index]; int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_diffuse.throughput.get(index); } unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialDiffuse material = material_as_diffuse(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNorTex hit_triangle = triangle_get_positions_normals_and_tex_coords(hit.triangle_id); float3 hit_point; float3 hit_normal; float2 hit_tex_coord; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal, hit_tex_coord); float3 hit_point_local = hit_point; // Keep copy of the untransformed hit point in local space // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); if (dot(ray_direction, hit_normal) > 0.0f) hit_normal = -hit_normal; // Sample albedo #if ENABLE_MIPMAPPING float cone_angle; float cone_width; float3 albedo = sample_albedo( bounce, material.diffuse, material.texture_id, hit, hit_triangle, hit_point_local, hit_normal, hit_tex_coord, ray_direction, ray_buffer_shade_diffuse.cone, index, cone_angle, cone_width ); #else float3 albedo = material_get_albedo(material.diffuse, material.texture_id, hit_tex_coord.x, hit_tex_coord.y); #endif float3 throughput = ray_throughput; if (bounce > 0) { throughput *= albedo; } else if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(albedo); } if (bounce == 0 && settings.enable_svgf) { float3 hit_point_prev = hit_point_local; Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, hit_point_prev); svgf_set_gbuffers(x, y, hit, hit_point, hit_normal, hit_point_prev); } if (settings.enable_next_event_estimation && lights_total_power > 0.0f) { nee_sample(x, y, bounce, sample_index, seed, hit_point, hit_normal, throughput, [&](const float3 & to_light, float & pdf) { pdf = dot(to_light, hit_normal) * ONE_OVER_PI; return ONE_OVER_PI; }); } if (bounce == settings.num_bounces - 1) return; int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); float3 tangent, binormal; orthonormal_basis(hit_normal, tangent, binormal); float3 direction_local = random_cosine_weighted_direction(x, y, sample_index, bounce, seed); float3 direction_world = local_to_world(direction_local, tangent, binormal, hit_normal); ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_world); #if ENABLE_MIPMAPPING ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | (true << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, throughput); ray_buffer_trace.last_pdf[index_out] = fabsf(dot(direction_world, hit_normal)) * ONE_OVER_PI; } extern "C" __global__ void kernel_shade_dielectric(int rand_seed, int bounce) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.dielectric[bounce] || bounce == settings.num_bounces - 1) return; float3 ray_direction = ray_buffer_shade_dielectric_and_glossy.direction.get(index); RayHit hit = ray_buffer_shade_dielectric_and_glossy.hits .get(index); int ray_pixel_index = ray_buffer_shade_dielectric_and_glossy.pixel_index[index]; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_dielectric_and_glossy.throughput.get(index); } ASSERT(hit.triangle_id != -1, "Ray must have hit something for this Kernel to be invoked!"); unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialDielectric material = material_as_dielectric(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNor hit_triangle = triangle_get_positions_and_normals(hit.triangle_id); float3 hit_point; float3 hit_normal; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal); // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); // Calculate proper facing normal and determine indices of refraction float3 normal; float cos_theta; float eta_1; float eta_2; float dir_dot_normal = dot(ray_direction, hit_normal); if (dir_dot_normal < 0.0f) { // Entering material eta_1 = 1.0f; eta_2 = material.index_of_refraction; normal = hit_normal; cos_theta = -dir_dot_normal; } else { // Leaving material eta_1 = material.index_of_refraction; eta_2 = 1.0f; normal = -hit_normal; cos_theta = dir_dot_normal; // Lambert-Beer Law // NOTE: does not take into account nested dielectrics! ray_throughput.x *= expf(material.negative_absorption.x * hit.t); ray_throughput.y *= expf(material.negative_absorption.y * hit.t); ray_throughput.z *= expf(material.negative_absorption.z * hit.t); } float eta = eta_1 / eta_2; float k = 1.0f - eta*eta * (1.0f - cos_theta*cos_theta); float3 ray_direction_reflected = reflect(ray_direction, hit_normal); float3 direction_out; if (k < 0.0f) { // Total Internal Reflection direction_out = ray_direction_reflected; } else { float3 ray_direction_refracted = normalize(eta * ray_direction + (eta * cos_theta - sqrtf(k)) * hit_normal); float f = fresnel(eta_1, eta_2, cos_theta, -dot(ray_direction_refracted, normal)); if (random_float_xorshift(seed) < f) { direction_out = ray_direction_reflected; } else { direction_out = ray_direction_refracted; } } if (bounce == 0 && (settings.modulate_albedo || settings.enable_svgf)) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_out); #if ENABLE_MIPMAPPING float2 cone = ray_buffer_shade_dielectric_and_glossy.cone[index]; float cone_angle = cone.x; float cone_width = cone.y + cone_angle * hit.t; float mesh_scale = mesh_get_scale(hit.mesh_id); float curvature = triangle_get_curvature( hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.normal_edge_1, hit_triangle.normal_edge_2 ) / mesh_scale; cone_angle += -2.0f * curvature * fabsf(cone_width) / dot(hit_normal, ray_direction); // Eq. 5 (Akenine-Mller 2021) ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | (false << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, ray_throughput); } extern "C" __global__ void kernel_shade_glossy(int rand_seed, int bounce, int sample_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.glossy[bounce]) return; index = (BATCH_SIZE - 1) - index; float3 ray_direction = ray_buffer_shade_dielectric_and_glossy.direction.get(index); RayHit hit = ray_buffer_shade_dielectric_and_glossy.hits.get(index); int ray_pixel_index = ray_buffer_shade_dielectric_and_glossy.pixel_index[index]; int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_dielectric_and_glossy.throughput.get(index); } ASSERT(hit.triangle_id != -1, "Ray must have hit something for this Kernel to be invoked!"); unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialGlossy material = material_as_glossy(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNorTex hit_triangle = triangle_get_positions_normals_and_tex_coords(hit.triangle_id); float3 hit_point; float3 hit_normal; float2 hit_tex_coord; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal, hit_tex_coord); float3 hit_point_local = hit_point; // Keep copy of the untransformed hit point in local space // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); if (dot(ray_direction, hit_normal) > 0.0f) hit_normal = -hit_normal; // Sample albedo #if ENABLE_MIPMAPPING float cone_angle; float cone_width; float3 albedo = sample_albedo( bounce, material.diffuse, material.texture_id, hit, hit_triangle, hit_point_local, hit_normal, hit_tex_coord, ray_direction, ray_buffer_shade_dielectric_and_glossy.cone, index, cone_angle, cone_width ); #else float3 albedo = material_get_albedo(material.diffuse, material.texture_id, hit_tex_coord.x, hit_tex_coord.y); #endif float3 throughput = ray_throughput; if (bounce > 0) { throughput *= albedo; } else if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(albedo); } if (bounce == 0 && settings.enable_svgf) { float3 hit_point_prev = hit_point_local; Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, hit_point_prev); svgf_set_gbuffers(x, y, hit, hit_point, hit_normal, hit_point_prev); } // Slightly widen the distribution to prevent the weights from becoming too large (see Walter et al. 2007) float alpha = material.roughness; // (1.2f - 0.2f * sqrtf(-dot(ray_direction, hit_normal))) * material.roughness; float alpha2 = alpha * alpha; // Construct orthonormal basis float3 hit_tangent, hit_binormal; orthonormal_basis(hit_normal, hit_tangent, hit_binormal); float3 omega_i = world_to_local(-ray_direction, hit_tangent, hit_binormal, hit_normal); if (settings.enable_next_event_estimation && lights_total_power > 0.0f && material.roughness >= ROUGHNESS_CUTOFF) { nee_sample(x, y, bounce, sample_index, seed, hit_point, hit_normal, throughput, [&](const float3 & to_light, float & pdf) { float3 omega_o = world_to_local(to_light, hit_tangent, hit_binormal, hit_normal); return ggx_eval(omega_o, omega_i, material.index_of_refraction, alpha, alpha, pdf); }); } if (bounce == settings.num_bounces - 1) return; // Importance sample distribution of normals float u1 = random_float_heitz(x, y, sample_index, bounce, 2, seed); float u2 = random_float_heitz(x, y, sample_index, bounce, 3, seed); float3 micro_normal_local = ggx_sample_distribution_of_normals(omega_i, alpha, alpha, u1, u2); float3 omega_o = reflect(-omega_i, micro_normal_local); float3 half_vector = normalize(omega_o + omega_i); float mu = fmaxf(0.0, dot(omega_o, half_vector)); float F = fresnel_schlick(material.index_of_refraction, 1.0f, mu); float D = ggx_D(half_vector, alpha, alpha); // Masking/shadowing using two monodirectional Smith terms float G1_o = ggx_G1(omega_o, alpha2, alpha2); float G1_i = ggx_G1(omega_i, alpha2, alpha2); float G2 = G1_o * G1_i; float pdf = G1_o * D * mu / (4.0f * omega_i.z * omega_o.z); throughput *= F * G2 / (G1_o * mu); float3 direction_out = local_to_world(omega_o, hit_tangent, hit_binormal, hit_normal); int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_out); #if ENABLE_MIPMAPPING ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | ((material.roughness >= ROUGHNESS_CUTOFF) << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, throughput); ray_buffer_trace.last_pdf[index_out] = pdf; } extern "C" __global__ void kernel_trace_shadow(int bounce) { bvh_trace_shadow(buffer_sizes.shadow[bounce], &buffer_sizes.rays_retired_shadow[bounce], bounce); } extern "C" __global__ void kernel_accumulate(float frames_accumulated) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= screen_width || y >= screen_height) return; int pixel_index = x + y * screen_pitch; float4 direct = frame_buffer_direct [pixel_index]; float4 indirect = frame_buffer_indirect[pixel_index]; float4 colour = direct + indirect; if (settings.modulate_albedo) { colour *= frame_buffer_albedo[pixel_index]; } if (frames_accumulated > 0.0f) { float4 colour_prev = accumulator.get(x, y); colour = colour_prev + (colour - colour_prev) / frames_accumulated; // Online average } accumulator.set(x, y, colour); }
7a70b2c9678eb109c870cfba923aa21bfa76269d.cu
#include "cudart/vector_types.h" #include "cudart/cuda_math.h" #include "Common.h" #define INFINITY ((float)(1e+300 * 1e+300)) __device__ __constant__ int screen_width; __device__ __constant__ int screen_pitch; __device__ __constant__ int screen_height; #include "Util.h" #include "Material.h" #include "Sky.h" #include "Random.h" #include "RayCone.h" __device__ __constant__ Settings settings; // Frame Buffers __device__ __constant__ float4 * frame_buffer_albedo; __device__ __constant__ float4 * frame_buffer_direct; __device__ __constant__ float4 * frame_buffer_indirect; // Final Frame Buffer, shared with OpenGL __device__ __constant__ Surface<float4> accumulator; #include "Raytracing/BVH.h" #include "SVGF/SVGF.h" #include "SVGF/TAA.h" struct Camera { float3 position; float3 bottom_left_corner; float3 x_axis; float3 y_axis; float pixel_spread_angle; float aperture_radius; float focal_distance; } __device__ __constant__ camera; __device__ PixelQuery pixel_query = { INVALID, INVALID, INVALID, INVALID }; extern "C" __global__ void kernel_generate( int rand_seed, int sample_index, int pixel_offset, int pixel_count ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= pixel_count) return; int index_offset = index + pixel_offset; int x = index_offset % screen_width; int y = index_offset / screen_width; unsigned seed = wang_hash(index_offset ^ rand_seed); int pixel_index = x + y * screen_pitch; ASSERT(pixel_index < screen_pitch * screen_height, "Pixel should fit inside the buffer"); float u0 = random_float_xorshift(seed); float u1 = random_float_xorshift(seed); float u2 = random_float_heitz(x, y, sample_index, 0, 0, seed); float u3 = random_float_heitz(x, y, sample_index, 0, 1, seed); float2 jitter; if (settings.enable_svgf) { jitter.x = taa_halton_x[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; jitter.y = taa_halton_y[sample_index & (TAA_HALTON_NUM_SAMPLES-1)]; } else { switch (settings.reconstruction_filter) { case ReconstructionFilter::BOX: { jitter.x = u1; jitter.y = u2; break; } case ReconstructionFilter::GAUSSIAN: { float2 gaussians = box_muller(u1, u2); jitter.x = 0.5f + 0.5f * gaussians.x; jitter.y = 0.5f + 0.5f * gaussians.y; break; } } } float x_jittered = float(x) + jitter.x; float y_jittered = float(y) + jitter.y; float3 focal_point = camera.focal_distance * normalize(camera.bottom_left_corner + x_jittered * camera.x_axis + y_jittered * camera.y_axis); float2 lens_point = camera.aperture_radius * random_point_in_regular_n_gon<5>(u2, u3); float3 offset = camera.x_axis * lens_point.x + camera.y_axis * lens_point.y; float3 direction = normalize(focal_point - offset); // Create primary Ray that starts at the Camera's position and goes through the current pixel ray_buffer_trace.origin .set(index, camera.position + offset); ray_buffer_trace.direction.set(index, direction); ray_buffer_trace.pixel_index_and_mis_eligable[index] = pixel_index | (false << 31); } extern "C" __global__ void kernel_trace(int bounce) { bvh_trace(buffer_sizes.trace[bounce], &buffer_sizes.rays_retired[bounce]); } extern "C" __global__ void kernel_sort(int rand_seed, int bounce) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.trace[bounce]) return; float3 ray_origin = ray_buffer_trace.origin .get(index); float3 ray_direction = ray_buffer_trace.direction.get(index); RayHit hit = ray_buffer_trace.hits.get(index); unsigned ray_pixel_index_and_mis_eligable = ray_buffer_trace.pixel_index_and_mis_eligable[index]; int ray_pixel_index = ray_pixel_index_and_mis_eligable & ~(0b11 << 31); int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; bool mis_eligable = ray_pixel_index_and_mis_eligable >> 31; float3 ray_throughput; if (bounce <= 1) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_trace.throughput.get(index); } // If we didn't hit anything, sample the Sky if (hit.triangle_id == INVALID) { float3 illumination = ray_throughput * sample_sky(ray_direction); if (bounce == 0) { if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } frame_buffer_direct[ray_pixel_index] = make_float4(illumination); } else if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } return; } // Get the Material of the Mesh we hit int material_id = mesh_get_material_id(hit.mesh_id); MaterialType material_type = material_get_type(material_id); if (bounce == 0 && pixel_query.pixel_index == ray_pixel_index) { pixel_query.mesh_id = hit.mesh_id; pixel_query.triangle_id = hit.triangle_id; pixel_query.material_id = material_id; } if (material_type == MaterialType::LIGHT) { // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(hit.triangle_id); float3 light_point; float3 light_normal; triangle_barycentric(light, hit.u, hit.v, light_point, light_normal); float3 light_point_prev = light_point; // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, light_point); matrix3x4_transform_direction(world, light_normal); light_normal = normalize(light_normal); if (bounce == 0 && settings.enable_svgf) { Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, light_point_prev); svgf_set_gbuffers(x, y, hit, light_point, light_normal, light_point_prev); } MaterialLight material_light = material_as_light(material_id); bool should_count_light_contribution = settings.enable_next_event_estimation ? !mis_eligable : true; if (should_count_light_contribution) { float3 illumination = ray_throughput * material_light.emission; if (bounce == 0) { if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } frame_buffer_direct[ray_pixel_index] = make_float4(material_light.emission); } else if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } return; } if (settings.enable_multiple_importance_sampling) { float3 to_light = light_point - ray_origin;; float distance_to_light_squared = dot(to_light, to_light); float distance_to_light = sqrtf(distance_to_light_squared); to_light /= distance_to_light; // Normalize float cos_o = fabsf(dot(to_light, light_normal)); // if (cos_o <= 0.0f) return; float power = material_light.emission.x + material_light.emission.y + material_light.emission.z; float brdf_pdf = ray_buffer_trace.last_pdf[index]; float light_pdf = power * distance_to_light_squared / (cos_o * lights_total_power); float mis_pdf = brdf_pdf + light_pdf; float3 illumination = ray_throughput * material_light.emission * brdf_pdf / mis_pdf; if (bounce == 1) { frame_buffer_direct[ray_pixel_index] += make_float4(illumination); } else { frame_buffer_indirect[ray_pixel_index] += make_float4(illumination); } } return; } unsigned seed = wang_hash(index ^ rand_seed); // Russian Roulette if (bounce > 0) { // Throughput does not include albedo so it doesn't need to be demodulated by SVGF (causing precision issues) // This deteriorates Russian Roulette performance, so albedo is included here float3 throughput_with_albedo = ray_throughput * make_float3(frame_buffer_albedo[ray_pixel_index]); float survival_probability = saturate(vmax_max(throughput_with_albedo.x, throughput_with_albedo.y, throughput_with_albedo.z)); if (random_float_xorshift(seed) > survival_probability) { return; } ray_throughput /= survival_probability; } switch (material_type) { case MaterialType::DIFFUSE: { int index_out = atomic_agg_inc(&buffer_sizes.diffuse[bounce]); ray_buffer_shade_diffuse.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_diffuse.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_diffuse.hits.set(index_out, hit); ray_buffer_shade_diffuse.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_diffuse.throughput.set(index_out, ray_throughput); break; } case MaterialType::DIELECTRIC: { int index_out = atomic_agg_inc(&buffer_sizes.dielectric[bounce]); ray_buffer_shade_dielectric_and_glossy.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_dielectric_and_glossy.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_glossy.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.throughput.set(index_out, ray_throughput); break; } case MaterialType::GLOSSY: { // Glossy Material buffer is shared with Dielectric Material buffer but grows in the opposite direction int index_out = (BATCH_SIZE - 1) - atomic_agg_inc(&buffer_sizes.glossy[bounce]); ray_buffer_shade_dielectric_and_glossy.direction.set(index_out, ray_direction); #if ENABLE_MIPMAPPING if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.cone[index_out] = ray_buffer_trace.cone[index]; #endif ray_buffer_shade_dielectric_and_glossy.hits.set(index_out, hit); ray_buffer_shade_dielectric_and_glossy.pixel_index[index_out] = ray_pixel_index; if (bounce > 0) ray_buffer_shade_dielectric_and_glossy.throughput.set(index_out, ray_throughput); break; } } } #if ENABLE_MIPMAPPING __device__ inline float3 sample_albedo( int bounce, const float3 & material_diffuse, int material_texture_id, const RayHit & hit, const TrianglePosNorTex & hit_triangle, const float3 & hit_point_local, const float3 & hit_normal, const float2 & hit_tex_coord, const float3 & ray_direction, const float2 * cone_buffer, int cone_buffer_index, float & cone_angle, float & cone_width ) { float3 albedo; float3 geometric_normal = cross(hit_triangle.position_edge_1, hit_triangle.position_edge_2); float triangle_area_inv = 1.0f / length(geometric_normal); geometric_normal *= triangle_area_inv; // Normalize float mesh_scale = mesh_get_scale(hit.mesh_id); if (bounce == 0) { cone_angle = camera.pixel_spread_angle; cone_width = cone_angle * hit.t; float3 ellipse_axis_1, ellipse_axis_2; ray_cone_get_ellipse_axes(ray_direction, geometric_normal, cone_width, ellipse_axis_1, ellipse_axis_2); float2 gradient_1, gradient_2; ray_cone_get_texture_gradients( mesh_scale, geometric_normal, triangle_area_inv, hit_triangle.position_0, hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.tex_coord_0, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2, hit_point_local, hit_tex_coord, ellipse_axis_1, ellipse_axis_2, gradient_1, gradient_2 ); // Anisotropic sampling albedo = material_get_albedo(material_diffuse, material_texture_id, hit_tex_coord.x, hit_tex_coord.y, gradient_1, gradient_2); } else { float2 cone = cone_buffer[cone_buffer_index]; cone_angle = cone.x; cone_width = cone.y + cone_angle * hit.t; float2 tex_size = texture_get_size(material_texture_id); float lod_triangle = sqrtf(tex_size.x * tex_size.y * triangle_get_lod(mesh_scale, triangle_area_inv, hit_triangle.tex_coord_edge_1, hit_triangle.tex_coord_edge_2)); float lod_ray_cone = ray_cone_get_lod(ray_direction, hit_normal, cone_width); float lod = log2f(lod_triangle * lod_ray_cone); // Trilinear sampling albedo = material_get_albedo(material_diffuse, material_texture_id, hit_tex_coord.x, hit_tex_coord.y, lod); } float curvature = triangle_get_curvature( hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.normal_edge_1, hit_triangle.normal_edge_2 ) / mesh_scale; cone_angle += -2.0f * curvature * fabsf(cone_width / dot(hit_normal, ray_direction)); // Eq. 5 (Akenine-Möller 2021) return albedo; } #endif template<typename BRDFEvaluator> __device__ inline void nee_sample( int x, int y, int bounce, int sample_index, unsigned & seed, const float3 & hit_point, const float3 & hit_normal, const float3 & throughput, BRDFEvaluator brdf_evaluator ) { // Pick random point on random Light float light_u, light_v; int light_mesh_id; int light_id = random_point_on_random_light(x, y, sample_index, bounce, seed, light_u, light_v, light_mesh_id); // Obtain the Light's position and normal TrianglePosNor light = triangle_get_positions_and_normals(light_id); float3 light_point; float3 light_normal; triangle_barycentric(light, light_u, light_v, light_point, light_normal); // Transform into world space Matrix3x4 light_world = mesh_get_transform(light_mesh_id); matrix3x4_transform_position (light_world, light_point); matrix3x4_transform_direction(light_world, light_normal); light_normal = normalize(light_normal); float3 to_light = light_point - hit_point; float distance_to_light_squared = dot(to_light, to_light); float distance_to_light = sqrtf(distance_to_light_squared); // Normalize the vector to the light to_light /= distance_to_light; float cos_o = -dot(to_light, light_normal); float cos_i = dot(to_light, hit_normal); // Only trace Shadow Ray if light transport is possible given the normals if (cos_o > 0.0f && cos_i > 0.0f) { int light_material_id = mesh_get_material_id(light_mesh_id); MaterialLight material_light = material_as_light(light_material_id); float power = material_light.emission.x + material_light.emission.y + material_light.emission.z; float brdf_pdf; float brdf = brdf_evaluator(to_light, brdf_pdf); float light_pdf = power * distance_to_light_squared / (cos_o * lights_total_power); float pdf; if (settings.enable_multiple_importance_sampling) { pdf = brdf_pdf + light_pdf; } else { pdf = light_pdf; } float3 illumination = throughput * brdf * material_light.emission * cos_i / pdf; int shadow_ray_index = atomic_agg_inc(&buffer_sizes.shadow[bounce]); ray_buffer_shadow.ray_origin .set(shadow_ray_index, hit_point); ray_buffer_shadow.ray_direction.set(shadow_ray_index, to_light); ray_buffer_shadow.max_distance[shadow_ray_index] = distance_to_light - EPSILON; ray_buffer_shadow.illumination_and_pixel_index[shadow_ray_index] = make_float4( illumination.x, illumination.y, illumination.z, __int_as_float(x + y * screen_pitch) ); } } extern "C" __global__ void kernel_shade_diffuse(int rand_seed, int bounce, int sample_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.diffuse[bounce]) return; float3 ray_direction = ray_buffer_shade_diffuse.direction.get(index); RayHit hit = ray_buffer_shade_diffuse.hits .get(index); int ray_pixel_index = ray_buffer_shade_diffuse.pixel_index[index]; int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_diffuse.throughput.get(index); } unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialDiffuse material = material_as_diffuse(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNorTex hit_triangle = triangle_get_positions_normals_and_tex_coords(hit.triangle_id); float3 hit_point; float3 hit_normal; float2 hit_tex_coord; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal, hit_tex_coord); float3 hit_point_local = hit_point; // Keep copy of the untransformed hit point in local space // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); if (dot(ray_direction, hit_normal) > 0.0f) hit_normal = -hit_normal; // Sample albedo #if ENABLE_MIPMAPPING float cone_angle; float cone_width; float3 albedo = sample_albedo( bounce, material.diffuse, material.texture_id, hit, hit_triangle, hit_point_local, hit_normal, hit_tex_coord, ray_direction, ray_buffer_shade_diffuse.cone, index, cone_angle, cone_width ); #else float3 albedo = material_get_albedo(material.diffuse, material.texture_id, hit_tex_coord.x, hit_tex_coord.y); #endif float3 throughput = ray_throughput; if (bounce > 0) { throughput *= albedo; } else if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(albedo); } if (bounce == 0 && settings.enable_svgf) { float3 hit_point_prev = hit_point_local; Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, hit_point_prev); svgf_set_gbuffers(x, y, hit, hit_point, hit_normal, hit_point_prev); } if (settings.enable_next_event_estimation && lights_total_power > 0.0f) { nee_sample(x, y, bounce, sample_index, seed, hit_point, hit_normal, throughput, [&](const float3 & to_light, float & pdf) { pdf = dot(to_light, hit_normal) * ONE_OVER_PI; return ONE_OVER_PI; }); } if (bounce == settings.num_bounces - 1) return; int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); float3 tangent, binormal; orthonormal_basis(hit_normal, tangent, binormal); float3 direction_local = random_cosine_weighted_direction(x, y, sample_index, bounce, seed); float3 direction_world = local_to_world(direction_local, tangent, binormal, hit_normal); ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_world); #if ENABLE_MIPMAPPING ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | (true << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, throughput); ray_buffer_trace.last_pdf[index_out] = fabsf(dot(direction_world, hit_normal)) * ONE_OVER_PI; } extern "C" __global__ void kernel_shade_dielectric(int rand_seed, int bounce) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.dielectric[bounce] || bounce == settings.num_bounces - 1) return; float3 ray_direction = ray_buffer_shade_dielectric_and_glossy.direction.get(index); RayHit hit = ray_buffer_shade_dielectric_and_glossy.hits .get(index); int ray_pixel_index = ray_buffer_shade_dielectric_and_glossy.pixel_index[index]; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_dielectric_and_glossy.throughput.get(index); } ASSERT(hit.triangle_id != -1, "Ray must have hit something for this Kernel to be invoked!"); unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialDielectric material = material_as_dielectric(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNor hit_triangle = triangle_get_positions_and_normals(hit.triangle_id); float3 hit_point; float3 hit_normal; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal); // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); // Calculate proper facing normal and determine indices of refraction float3 normal; float cos_theta; float eta_1; float eta_2; float dir_dot_normal = dot(ray_direction, hit_normal); if (dir_dot_normal < 0.0f) { // Entering material eta_1 = 1.0f; eta_2 = material.index_of_refraction; normal = hit_normal; cos_theta = -dir_dot_normal; } else { // Leaving material eta_1 = material.index_of_refraction; eta_2 = 1.0f; normal = -hit_normal; cos_theta = dir_dot_normal; // Lambert-Beer Law // NOTE: does not take into account nested dielectrics! ray_throughput.x *= expf(material.negative_absorption.x * hit.t); ray_throughput.y *= expf(material.negative_absorption.y * hit.t); ray_throughput.z *= expf(material.negative_absorption.z * hit.t); } float eta = eta_1 / eta_2; float k = 1.0f - eta*eta * (1.0f - cos_theta*cos_theta); float3 ray_direction_reflected = reflect(ray_direction, hit_normal); float3 direction_out; if (k < 0.0f) { // Total Internal Reflection direction_out = ray_direction_reflected; } else { float3 ray_direction_refracted = normalize(eta * ray_direction + (eta * cos_theta - sqrtf(k)) * hit_normal); float f = fresnel(eta_1, eta_2, cos_theta, -dot(ray_direction_refracted, normal)); if (random_float_xorshift(seed) < f) { direction_out = ray_direction_reflected; } else { direction_out = ray_direction_refracted; } } if (bounce == 0 && (settings.modulate_albedo || settings.enable_svgf)) { frame_buffer_albedo[ray_pixel_index] = make_float4(1.0f); } ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_out); #if ENABLE_MIPMAPPING float2 cone = ray_buffer_shade_dielectric_and_glossy.cone[index]; float cone_angle = cone.x; float cone_width = cone.y + cone_angle * hit.t; float mesh_scale = mesh_get_scale(hit.mesh_id); float curvature = triangle_get_curvature( hit_triangle.position_edge_1, hit_triangle.position_edge_2, hit_triangle.normal_edge_1, hit_triangle.normal_edge_2 ) / mesh_scale; cone_angle += -2.0f * curvature * fabsf(cone_width) / dot(hit_normal, ray_direction); // Eq. 5 (Akenine-Möller 2021) ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | (false << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, ray_throughput); } extern "C" __global__ void kernel_shade_glossy(int rand_seed, int bounce, int sample_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= buffer_sizes.glossy[bounce]) return; index = (BATCH_SIZE - 1) - index; float3 ray_direction = ray_buffer_shade_dielectric_and_glossy.direction.get(index); RayHit hit = ray_buffer_shade_dielectric_and_glossy.hits.get(index); int ray_pixel_index = ray_buffer_shade_dielectric_and_glossy.pixel_index[index]; int x = ray_pixel_index % screen_pitch; int y = ray_pixel_index / screen_pitch; float3 ray_throughput; if (bounce == 0) { ray_throughput = make_float3(1.0f); // Throughput is known to be (1,1,1) still, skip the global memory load } else { ray_throughput = ray_buffer_shade_dielectric_and_glossy.throughput.get(index); } ASSERT(hit.triangle_id != -1, "Ray must have hit something for this Kernel to be invoked!"); unsigned seed = wang_hash(index ^ rand_seed); int material_id = mesh_get_material_id(hit.mesh_id); MaterialGlossy material = material_as_glossy(material_id); // Obtain hit Triangle position, normal, and texture coordinates TrianglePosNorTex hit_triangle = triangle_get_positions_normals_and_tex_coords(hit.triangle_id); float3 hit_point; float3 hit_normal; float2 hit_tex_coord; triangle_barycentric(hit_triangle, hit.u, hit.v, hit_point, hit_normal, hit_tex_coord); float3 hit_point_local = hit_point; // Keep copy of the untransformed hit point in local space // Transform into world space Matrix3x4 world = mesh_get_transform(hit.mesh_id); matrix3x4_transform_position (world, hit_point); matrix3x4_transform_direction(world, hit_normal); hit_normal = normalize(hit_normal); if (dot(ray_direction, hit_normal) > 0.0f) hit_normal = -hit_normal; // Sample albedo #if ENABLE_MIPMAPPING float cone_angle; float cone_width; float3 albedo = sample_albedo( bounce, material.diffuse, material.texture_id, hit, hit_triangle, hit_point_local, hit_normal, hit_tex_coord, ray_direction, ray_buffer_shade_dielectric_and_glossy.cone, index, cone_angle, cone_width ); #else float3 albedo = material_get_albedo(material.diffuse, material.texture_id, hit_tex_coord.x, hit_tex_coord.y); #endif float3 throughput = ray_throughput; if (bounce > 0) { throughput *= albedo; } else if (settings.modulate_albedo || settings.enable_svgf) { frame_buffer_albedo[ray_pixel_index] = make_float4(albedo); } if (bounce == 0 && settings.enable_svgf) { float3 hit_point_prev = hit_point_local; Matrix3x4 world_prev = mesh_get_transform_prev(hit.mesh_id); matrix3x4_transform_position(world_prev, hit_point_prev); svgf_set_gbuffers(x, y, hit, hit_point, hit_normal, hit_point_prev); } // Slightly widen the distribution to prevent the weights from becoming too large (see Walter et al. 2007) float alpha = material.roughness; // (1.2f - 0.2f * sqrtf(-dot(ray_direction, hit_normal))) * material.roughness; float alpha2 = alpha * alpha; // Construct orthonormal basis float3 hit_tangent, hit_binormal; orthonormal_basis(hit_normal, hit_tangent, hit_binormal); float3 omega_i = world_to_local(-ray_direction, hit_tangent, hit_binormal, hit_normal); if (settings.enable_next_event_estimation && lights_total_power > 0.0f && material.roughness >= ROUGHNESS_CUTOFF) { nee_sample(x, y, bounce, sample_index, seed, hit_point, hit_normal, throughput, [&](const float3 & to_light, float & pdf) { float3 omega_o = world_to_local(to_light, hit_tangent, hit_binormal, hit_normal); return ggx_eval(omega_o, omega_i, material.index_of_refraction, alpha, alpha, pdf); }); } if (bounce == settings.num_bounces - 1) return; // Importance sample distribution of normals float u1 = random_float_heitz(x, y, sample_index, bounce, 2, seed); float u2 = random_float_heitz(x, y, sample_index, bounce, 3, seed); float3 micro_normal_local = ggx_sample_distribution_of_normals(omega_i, alpha, alpha, u1, u2); float3 omega_o = reflect(-omega_i, micro_normal_local); float3 half_vector = normalize(omega_o + omega_i); float mu = fmaxf(0.0, dot(omega_o, half_vector)); float F = fresnel_schlick(material.index_of_refraction, 1.0f, mu); float D = ggx_D(half_vector, alpha, alpha); // Masking/shadowing using two monodirectional Smith terms float G1_o = ggx_G1(omega_o, alpha2, alpha2); float G1_i = ggx_G1(omega_i, alpha2, alpha2); float G2 = G1_o * G1_i; float pdf = G1_o * D * mu / (4.0f * omega_i.z * omega_o.z); throughput *= F * G2 / (G1_o * mu); float3 direction_out = local_to_world(omega_o, hit_tangent, hit_binormal, hit_normal); int index_out = atomic_agg_inc(&buffer_sizes.trace[bounce + 1]); ray_buffer_trace.origin .set(index_out, hit_point); ray_buffer_trace.direction.set(index_out, direction_out); #if ENABLE_MIPMAPPING ray_buffer_trace.cone[index_out] = make_float2(cone_angle, cone_width); #endif ray_buffer_trace.pixel_index_and_mis_eligable[index_out] = ray_pixel_index | ((material.roughness >= ROUGHNESS_CUTOFF) << 31); if (bounce > 0) ray_buffer_trace.throughput.set(index_out, throughput); ray_buffer_trace.last_pdf[index_out] = pdf; } extern "C" __global__ void kernel_trace_shadow(int bounce) { bvh_trace_shadow(buffer_sizes.shadow[bounce], &buffer_sizes.rays_retired_shadow[bounce], bounce); } extern "C" __global__ void kernel_accumulate(float frames_accumulated) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= screen_width || y >= screen_height) return; int pixel_index = x + y * screen_pitch; float4 direct = frame_buffer_direct [pixel_index]; float4 indirect = frame_buffer_indirect[pixel_index]; float4 colour = direct + indirect; if (settings.modulate_albedo) { colour *= frame_buffer_albedo[pixel_index]; } if (frames_accumulated > 0.0f) { float4 colour_prev = accumulator.get(x, y); colour = colour_prev + (colour - colour_prev) / frames_accumulated; // Online average } accumulator.set(x, y, colour); }
d5f73c022b2929fab89ac77b45e5950c92024f11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* * Copyright (C) 2011 by Saleh Dindar and the Swarm-NG Development Team * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 3 of the License. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ************************************************************************/ #include "swarm/common.hpp" #include "swarm/gpu/bppt.hpp" #include "monitors/stop_on_ejection.hpp" #include "monitors/composites.hpp" #include "swarm/gpu/gravitation_accjerk.hpp" #include "monitors/log_transit.hpp" namespace swarm { namespace gpu { namespace bppt { /*! GPU implementation of PEC2 Hermite integrator w/ adaptive time step * \ingroup integrators * */ template< class Monitor > class hermite_adap: public integrator { typedef integrator base; typedef Monitor monitor_t; typedef typename monitor_t::params mon_params_t; private: double _time_step_factor, _min_time_step; mon_params_t _mon_params; public: hermite_adap(const config& cfg): base(cfg),_time_step_factor(0.001),_min_time_step(0.001), _mon_params(cfg) { _time_step_factor = cfg.require("time_step_factor", 0.0); _min_time_step = cfg.require("min_time_step", 0.0); } template<class T> static GENERIC const int shmem_per_system(T compile_time_param){ return sizeof(SystemSharedData<T>)/SHMEM_CHUNK_SIZE; } virtual void launch_integrator() { launch_templatized_integrator(this); } template<class T> struct SystemSharedData { typedef GravitationAccJerk<T> Grav; typename Grav::shared_data gravitation; DoubleCoalescedStruct<SHMEM_CHUNK_SIZE> time_step_factor[T::n]; }; GPUAPI void convert_internal_to_std_coord() {} GPUAPI void convert_std_to_internal_coord() {} template<class T> __device__ double calc_adaptive_time_step(T compile_time_param, SystemSharedData<T>& shared, const double acc, const double jerk) { // Body number int b = thread_body_idx(T::n); // Component number int c = thread_component_idx(T::n); // Put accelerations and jerks for each body and component into shared memory if( (b < T::n) && (c < 3) ) { shared.gravitation[b][c].acc() = acc*acc; shared.gravitation[b][c].jerk() = jerk*jerk; } __syncthreads(); // calculate sum of squares of each component for each body // store ratio in shared memory if( (b < T::n) && (c==0) ) { double acc_mag_sq = shared.gravitation[b][0].acc()+shared.gravitation[b][1].acc()+shared.gravitation[b][2].acc(); double jerk_mag_sq = shared.gravitation[b][0].jerk()+shared.gravitation[b][1].jerk()+shared.gravitation[b][2].jerk(); shared.time_step_factor[b].value() = jerk_mag_sq/acc_mag_sq; } __syncthreads(); if( thread_in_system() == 0 ) { double tf = shared.time_step_factor[0].value(); for(int bb=1;bb<T::n;++bb) tf += shared.time_step_factor[bb].value(); shared.time_step_factor[0].value() = rsqrt(tf)*_time_step_factor+_min_time_step; } __syncthreads(); return shared.time_step_factor[0].value(); } template<class T> __device__ void kernel(T compile_time_param){ if(sysid()>=_dens.nsys()) return; // References to Ensemble and Shared Memory typedef GravitationAccJerk<T> Grav; ensemble::SystemRef sys = _dens[sysid()]; SystemSharedData<T>& shared_data = *(SystemSharedData<T>*) system_shared_data_pointer(this, compile_time_param); Grav calcForces(sys, shared_data.gravitation ); // Local variables const int nbod = T::n; // Body number const int b = thread_body_idx(nbod); // Component number const int c = thread_component_idx(nbod); const bool body_component_grid = (b < nbod) && (c < 3); // local variables monitor_t montest(_mon_params,sys,*_log) ; // local information per component per body double pos = 0.0, vel = 0.0 , acc0 = 0.0, jerk0 = 0.0; if( (b < T::n) && (c < 3) ) pos = sys[b][c].pos() , vel = sys[b][c].vel(); montest( thread_in_system() ); ////////// INTEGRATION ////////////////////// // Calculate acceleration and jerk calcForces(thread_in_system(),b,c,pos,vel,acc0,jerk0); for(int iter = 0 ; (iter < _max_iterations) && sys.is_active() ; iter ++ ) { // Since h is the time step that is used for the step it makes more sense to // to calculate time step and assign it to h double h = calc_adaptive_time_step(compile_time_param, shared_data ,acc0,jerk0); if( sys.time() + h > _destination_time ) { h = _destination_time - sys.time(); } // Initial Evaluation, it can be omitted for faster computation ///calcForces(thread_in_system(),b,c,pos,vel,acc0,jerk0); // Predict pos = pos + h*(vel+(h*0.5)*(acc0+(h/3.0)*jerk0)); vel = vel + h*(acc0+(h*0.5)*jerk0); double pre_pos = pos, pre_vel = vel; double acc1,jerk1; { // Evaluation calcForces(thread_in_system(),b,c,pos,vel,acc1,jerk1); // Correct #if 0 // OLD pos = pre_pos + (0.1-0.25) * (acc0 - acc1) * h * h - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h * h * h; vel = pre_vel + ( -0.5 ) * (acc0 - acc1 ) * h - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h * h; #endif pos = pre_pos + ( (0.1-0.25) * (acc0 - acc1) - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h) * h * h; vel = pre_vel + (( -0.5 ) * (acc0 - acc1 ) - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h )* h ; } { // Evaluation calcForces(thread_in_system(),b,c,pos,vel,acc1,jerk1); // Correct #if 0 // OLD pos = pre_pos + (0.1-0.25) * (acc0 - acc1) * h * h - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h * h * h; vel = pre_vel + ( -0.5 ) * (acc0 - acc1 ) * h - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h * h; #endif pos = pre_pos + ((0.1-0.25) * (acc0 - acc1) - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h )* h * h ; vel = pre_vel + (( -0.5 ) * (acc0 - acc1 ) - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h ) * h ; } acc0 = acc1, jerk0 = jerk1; // Finalize the step if( (b < T::n) && (c < 3) ) sys[b][c].pos() = pos , sys[b][c].vel() = vel; if( thread_in_system()==0 ) sys.time() += h; montest( thread_in_system() ); if( sys.is_active() && thread_in_system()==0 ) { if( sys.time() >= _destination_time ) { sys.set_inactive(); } } __syncthreads(); } } }; typedef gpulog::device_log L; using namespace monitors; integrator_plugin_initializer< hermite_adap< stop_on_ejection<L> > > hermite_adap_plugin("hermite_adap"); integrator_plugin_initializer< hermite_adap< stop_on_ejection_or_close_encounter<L> > > hermite_adap_close_encounter_plugin("hermite_adap_close_encounter"); integrator_plugin_initializer<hermite_adap< log_transit<L> > > hermite_adap_log_plugin("hermite_adap_transit"); } } } // end namespace bppt :: integrators :: swarm
d5f73c022b2929fab89ac77b45e5950c92024f11.cu
/************************************************************************* * Copyright (C) 2011 by Saleh Dindar and the Swarm-NG Development Team * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 3 of the License. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ************************************************************************/ #include "swarm/common.hpp" #include "swarm/gpu/bppt.hpp" #include "monitors/stop_on_ejection.hpp" #include "monitors/composites.hpp" #include "swarm/gpu/gravitation_accjerk.hpp" #include "monitors/log_transit.hpp" namespace swarm { namespace gpu { namespace bppt { /*! GPU implementation of PEC2 Hermite integrator w/ adaptive time step * \ingroup integrators * */ template< class Monitor > class hermite_adap: public integrator { typedef integrator base; typedef Monitor monitor_t; typedef typename monitor_t::params mon_params_t; private: double _time_step_factor, _min_time_step; mon_params_t _mon_params; public: hermite_adap(const config& cfg): base(cfg),_time_step_factor(0.001),_min_time_step(0.001), _mon_params(cfg) { _time_step_factor = cfg.require("time_step_factor", 0.0); _min_time_step = cfg.require("min_time_step", 0.0); } template<class T> static GENERIC const int shmem_per_system(T compile_time_param){ return sizeof(SystemSharedData<T>)/SHMEM_CHUNK_SIZE; } virtual void launch_integrator() { launch_templatized_integrator(this); } template<class T> struct SystemSharedData { typedef GravitationAccJerk<T> Grav; typename Grav::shared_data gravitation; DoubleCoalescedStruct<SHMEM_CHUNK_SIZE> time_step_factor[T::n]; }; GPUAPI void convert_internal_to_std_coord() {} GPUAPI void convert_std_to_internal_coord() {} template<class T> __device__ double calc_adaptive_time_step(T compile_time_param, SystemSharedData<T>& shared, const double acc, const double jerk) { // Body number int b = thread_body_idx(T::n); // Component number int c = thread_component_idx(T::n); // Put accelerations and jerks for each body and component into shared memory if( (b < T::n) && (c < 3) ) { shared.gravitation[b][c].acc() = acc*acc; shared.gravitation[b][c].jerk() = jerk*jerk; } __syncthreads(); // calculate sum of squares of each component for each body // store ratio in shared memory if( (b < T::n) && (c==0) ) { double acc_mag_sq = shared.gravitation[b][0].acc()+shared.gravitation[b][1].acc()+shared.gravitation[b][2].acc(); double jerk_mag_sq = shared.gravitation[b][0].jerk()+shared.gravitation[b][1].jerk()+shared.gravitation[b][2].jerk(); shared.time_step_factor[b].value() = jerk_mag_sq/acc_mag_sq; } __syncthreads(); if( thread_in_system() == 0 ) { double tf = shared.time_step_factor[0].value(); for(int bb=1;bb<T::n;++bb) tf += shared.time_step_factor[bb].value(); shared.time_step_factor[0].value() = rsqrt(tf)*_time_step_factor+_min_time_step; } __syncthreads(); return shared.time_step_factor[0].value(); } template<class T> __device__ void kernel(T compile_time_param){ if(sysid()>=_dens.nsys()) return; // References to Ensemble and Shared Memory typedef GravitationAccJerk<T> Grav; ensemble::SystemRef sys = _dens[sysid()]; SystemSharedData<T>& shared_data = *(SystemSharedData<T>*) system_shared_data_pointer(this, compile_time_param); Grav calcForces(sys, shared_data.gravitation ); // Local variables const int nbod = T::n; // Body number const int b = thread_body_idx(nbod); // Component number const int c = thread_component_idx(nbod); const bool body_component_grid = (b < nbod) && (c < 3); // local variables monitor_t montest(_mon_params,sys,*_log) ; // local information per component per body double pos = 0.0, vel = 0.0 , acc0 = 0.0, jerk0 = 0.0; if( (b < T::n) && (c < 3) ) pos = sys[b][c].pos() , vel = sys[b][c].vel(); montest( thread_in_system() ); ////////// INTEGRATION ////////////////////// // Calculate acceleration and jerk calcForces(thread_in_system(),b,c,pos,vel,acc0,jerk0); for(int iter = 0 ; (iter < _max_iterations) && sys.is_active() ; iter ++ ) { // Since h is the time step that is used for the step it makes more sense to // to calculate time step and assign it to h double h = calc_adaptive_time_step(compile_time_param, shared_data ,acc0,jerk0); if( sys.time() + h > _destination_time ) { h = _destination_time - sys.time(); } // Initial Evaluation, it can be omitted for faster computation ///calcForces(thread_in_system(),b,c,pos,vel,acc0,jerk0); // Predict pos = pos + h*(vel+(h*0.5)*(acc0+(h/3.0)*jerk0)); vel = vel + h*(acc0+(h*0.5)*jerk0); double pre_pos = pos, pre_vel = vel; double acc1,jerk1; { // Evaluation calcForces(thread_in_system(),b,c,pos,vel,acc1,jerk1); // Correct #if 0 // OLD pos = pre_pos + (0.1-0.25) * (acc0 - acc1) * h * h - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h * h * h; vel = pre_vel + ( -0.5 ) * (acc0 - acc1 ) * h - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h * h; #endif pos = pre_pos + ( (0.1-0.25) * (acc0 - acc1) - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h) * h * h; vel = pre_vel + (( -0.5 ) * (acc0 - acc1 ) - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h )* h ; } { // Evaluation calcForces(thread_in_system(),b,c,pos,vel,acc1,jerk1); // Correct #if 0 // OLD pos = pre_pos + (0.1-0.25) * (acc0 - acc1) * h * h - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h * h * h; vel = pre_vel + ( -0.5 ) * (acc0 - acc1 ) * h - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h * h; #endif pos = pre_pos + ((0.1-0.25) * (acc0 - acc1) - 1.0/60.0 * ( 7.0 * jerk0 + 2.0 * jerk1 ) * h )* h * h ; vel = pre_vel + (( -0.5 ) * (acc0 - acc1 ) - 1.0/12.0 * ( 5.0 * jerk0 + jerk1 ) * h ) * h ; } acc0 = acc1, jerk0 = jerk1; // Finalize the step if( (b < T::n) && (c < 3) ) sys[b][c].pos() = pos , sys[b][c].vel() = vel; if( thread_in_system()==0 ) sys.time() += h; montest( thread_in_system() ); if( sys.is_active() && thread_in_system()==0 ) { if( sys.time() >= _destination_time ) { sys.set_inactive(); } } __syncthreads(); } } }; typedef gpulog::device_log L; using namespace monitors; integrator_plugin_initializer< hermite_adap< stop_on_ejection<L> > > hermite_adap_plugin("hermite_adap"); integrator_plugin_initializer< hermite_adap< stop_on_ejection_or_close_encounter<L> > > hermite_adap_close_encounter_plugin("hermite_adap_close_encounter"); integrator_plugin_initializer<hermite_adap< log_transit<L> > > hermite_adap_log_plugin("hermite_adap_transit"); } } } // end namespace bppt :: integrators :: swarm
2939cb39f71c2309ac33b02d29ea2039cde04fdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __global__ void roadCrossingsKernel(int rows, int segs, int* adjacency, int* cross) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < rows) { cross[idx] = 0; for (int ii = 0; ii < segs; ii++) { cross[idx] += adjacency[idx*segs + ii]; } } }
2939cb39f71c2309ac33b02d29ea2039cde04fdb.cu
#include "includes.h" // ERROR CHECKING MACROS ////////////////////////////////////////////////////// __global__ void roadCrossingsKernel(int rows, int segs, int* adjacency, int* cross) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < rows) { cross[idx] = 0; for (int ii = 0; ii < segs; ii++) { cross[idx] += adjacency[idx*segs + ii]; } } }
4b308fb1a5d5951875d8ce8161a6979b382f5541.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for TensorReduce family of device-wide operators */ #include <iostream> #include <limits> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/reduction/device/tensor_reduce.h" #include "cutlass/functional.h" #include "cutlass/layout/tensor.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/tensor_view_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// This reduces the W dimension, transforming an NHWC tensor into NHWC with W=1. template < typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute > bool TestAllReduction_NHWC_reduce_w(ElementCompute reduction_identity = ElementCompute()) { using Layout = typename TensorReduction::Layout; using ElementOutput = typename TensorReduction::ElementOutput; using ElementSource = typename TensorReduction::ElementSource; int const kV = TensorReduction::kVectorLength; int const N_indices[] = {1, 2, 5, 10}; int const H_indices[] = {1, 3, 9 }; int const W_indices[] = {1, 5, 19, 40, 224}; int const C_indices[] = { kV, 2 * kV, 5 * kV, 9 * kV, 17 * kV, 39 * kV, 257 * kV, kV * 760 }; using Element = int; for (int N : N_indices) { for (int H : H_indices) { for (int W : W_indices) { for (int C : C_indices) { cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C}); cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, 1, C}); cutlass::reference::host::TensorFillRandomUniform( src_tensor.host_view(), 17, 10, -10, 0); cutlass::reference::host::BlockFillSequential( dst_tensor.host_data(), dst_tensor.capacity()); dst_tensor.sync_device(); src_tensor.sync_device(); // Execute a tensor reduction over rank 2 (the 'W' dimension is reduced; NHWC => NHC) TensorReduction reduction(src_tensor.extent(), 2); cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size()); cutlass::Status status = reduction.reduce( dst_tensor.device_ref(), src_tensor.device_ref(), device_workspace.get(), reduction_identity ); EXPECT_EQ(status, cutlass::Status::kSuccess); EXPECT_EQ(hipDeviceSynchronize(), hipSuccess); // Reference check dst_tensor.sync_host(); typename TensorReduction::ReductionOp reduction_op; for (int n = 0; n < src_tensor.extent().n(); ++n) { for (int h = 0; h < src_tensor.extent().h(); ++h) { for (int c = 0; c < src_tensor.extent().c(); ++c) { ElementCompute w_accum = reduction_identity; for (int w = 0; w < src_tensor.extent().w(); ++w) { w_accum = reduction_op(w_accum, ElementCompute(src_tensor.at({n, h, w, c}))); } ElementCompute got = ElementCompute(dst_tensor.at({n, h, 0, c})); bool equal = (w_accum == got); EXPECT_TRUE(equal); if (!equal) { std::cerr << "Error at location (" << n << ", " << h << ", 0, " << c << ")" << std::endl; std::cerr << " expected: " << w_accum << std::endl << " got: " << got << std::endl; std::cerr << "Problem: " << src_tensor.extent() << " -> " << dst_tensor.extent() << std::endl; std::cerr << " Grid: " << reduction.reduction_strided.grid_shape << "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl << " Final: " << reduction.reduction_strided.grid_final << "\n Block: " << reduction.reduction_strided.threadblock_final << "\n"; return false; } } } } } } } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x8_f16x8) { int const kV = 8; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x2_f16x2) { int const kV = 2; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x1_f16x1) { int const kV = 1; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_s32x4) { int const kV = 4; using Element = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<Element>; using TensorReduction = cutlass::reduction::device::TensorReduction< Element, Element, Layout, Functor, kV, Element >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_cf32) { int const kV = 1; using ElementOutput = cutlass::complex<float>; using ElementSource = cutlass::complex<float>; using ElementCompute = cutlass::complex<float>; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_maximum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::maximum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>( -std::numeric_limits<float>::max() )); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_minimum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::minimum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(std::numeric_limits<float>::max())); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_XOR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_xor<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_AND_w_s32) { int const kV = 1; using ElementOutput = unsigned; using ElementSource = unsigned; using ElementCompute = unsigned; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(0xffffffff)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_OR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } /////////////////////////////////////////////////////////////////////////////////////////////////
4b308fb1a5d5951875d8ce8161a6979b382f5541.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for TensorReduce family of device-wide operators */ #include <iostream> #include <limits> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/reduction/device/tensor_reduce.h" #include "cutlass/functional.h" #include "cutlass/layout/tensor.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/tensor_view_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// This reduces the W dimension, transforming an NHWC tensor into NHWC with W=1. template < typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute > bool TestAllReduction_NHWC_reduce_w(ElementCompute reduction_identity = ElementCompute()) { using Layout = typename TensorReduction::Layout; using ElementOutput = typename TensorReduction::ElementOutput; using ElementSource = typename TensorReduction::ElementSource; int const kV = TensorReduction::kVectorLength; int const N_indices[] = {1, 2, 5, 10}; int const H_indices[] = {1, 3, 9 }; int const W_indices[] = {1, 5, 19, 40, 224}; int const C_indices[] = { kV, 2 * kV, 5 * kV, 9 * kV, 17 * kV, 39 * kV, 257 * kV, kV * 760 }; using Element = int; for (int N : N_indices) { for (int H : H_indices) { for (int W : W_indices) { for (int C : C_indices) { cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C}); cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, 1, C}); cutlass::reference::host::TensorFillRandomUniform( src_tensor.host_view(), 17, 10, -10, 0); cutlass::reference::host::BlockFillSequential( dst_tensor.host_data(), dst_tensor.capacity()); dst_tensor.sync_device(); src_tensor.sync_device(); // Execute a tensor reduction over rank 2 (the 'W' dimension is reduced; NHWC => NHC) TensorReduction reduction(src_tensor.extent(), 2); cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size()); cutlass::Status status = reduction.reduce( dst_tensor.device_ref(), src_tensor.device_ref(), device_workspace.get(), reduction_identity ); EXPECT_EQ(status, cutlass::Status::kSuccess); EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess); // Reference check dst_tensor.sync_host(); typename TensorReduction::ReductionOp reduction_op; for (int n = 0; n < src_tensor.extent().n(); ++n) { for (int h = 0; h < src_tensor.extent().h(); ++h) { for (int c = 0; c < src_tensor.extent().c(); ++c) { ElementCompute w_accum = reduction_identity; for (int w = 0; w < src_tensor.extent().w(); ++w) { w_accum = reduction_op(w_accum, ElementCompute(src_tensor.at({n, h, w, c}))); } ElementCompute got = ElementCompute(dst_tensor.at({n, h, 0, c})); bool equal = (w_accum == got); EXPECT_TRUE(equal); if (!equal) { std::cerr << "Error at location (" << n << ", " << h << ", 0, " << c << ")" << std::endl; std::cerr << " expected: " << w_accum << std::endl << " got: " << got << std::endl; std::cerr << "Problem: " << src_tensor.extent() << " -> " << dst_tensor.extent() << std::endl; std::cerr << " Grid: " << reduction.reduction_strided.grid_shape << "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl << " Final: " << reduction.reduction_strided.grid_final << "\n Block: " << reduction.reduction_strided.threadblock_final << "\n"; return false; } } } } } } } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x8_f16x8) { int const kV = 8; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x2_f16x2) { int const kV = 2; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_f32x1_f16x1) { int const kV = 1; using ElementOutput = float; using ElementSource = cutlass::half_t; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_s32x4) { int const kV = 4; using Element = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<Element>; using TensorReduction = cutlass::reduction::device::TensorReduction< Element, Element, Layout, Functor, kV, Element >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_reduce_w_cf32) { int const kV = 1; using ElementOutput = cutlass::complex<float>; using ElementSource = cutlass::complex<float>; using ElementCompute = cutlass::complex<float>; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::plus<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_maximum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::maximum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>( -std::numeric_limits<float>::max() )); } /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_minimum_w_cf32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::minimum<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(std::numeric_limits<float>::max())); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_XOR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_xor<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_AND_w_s32) { int const kV = 1; using ElementOutput = unsigned; using ElementSource = unsigned; using ElementCompute = unsigned; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(0xffffffff)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_OR_w_u32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::bit_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_s32) { int const kV = 1; using ElementOutput = int; using ElementSource = int; using ElementCompute = int; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ANY_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_or<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(0))); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Test tensor reduction from NHWC to NHC TEST(Reduction_TensorReduce, nhwc_ALL_w_f32) { int const kV = 1; using ElementOutput = float; using ElementSource = float; using ElementCompute = float; using Layout = cutlass::layout::TensorNHWC; // Define the functor using Functor = cutlass::logical_and<ElementCompute>; using TensorReduction = cutlass::reduction::device::TensorReduction< ElementOutput, ElementSource, Layout, Functor, kV, ElementCompute >; EXPECT_TRUE(TestAllReduction_NHWC_reduce_w<TensorReduction>(ElementCompute(1))); } /////////////////////////////////////////////////////////////////////////////////////////////////
712089924e04bb42b4d308dadbbc77767a5d6a4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <hip/hip_fp16.h> #include <vector> #include "utils/checks.h" #include "utils/cuda.cuh" #include "inplace_abn.h" #include <ATen/hip/HIPContext.h> // Operations for reduce struct SumOpH { __device__ SumOpH(const half *t, int c, int s) : tensor(t), chn(c), sp(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return __half2float(tensor[(batch * chn + plane) * sp + n]); } const half *tensor; const int chn; const int sp; }; struct VarOpH { __device__ VarOpH(float m, const half *t, int c, int s) : mean(m), tensor(t), chn(c), sp(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { const auto t = __half2float(tensor[(batch * chn + plane) * sp + n]); return (t - mean) * (t - mean); } const float mean; const half *tensor; const int chn; const int sp; }; struct GradOpH { __device__ GradOpH(float _weight, float _bias, const half *_z, const half *_dz, int c, int s) : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} __device__ __forceinline__ Pair<float> operator()(int batch, int plane, int n) { float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - bias) / weight; float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); return Pair<float>(_dz, _y * _dz); } const float weight; const float bias; const half *z; const half *dz; const int chn; const int sp; }; /*********** * mean_var ***********/ __global__ void mean_var_kernel_h(const half *x, float *mean, float *var, int num, int chn, int sp) { int plane = blockIdx.x; float norm = 1.f / static_cast<float>(num * sp); float _mean = reduce<float, SumOpH>(SumOpH(x, chn, sp), plane, num, sp) * norm; __syncthreads(); float _var = reduce<float, VarOpH>(VarOpH(_mean, x, chn, sp), plane, num, sp) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } std::vector<at::Tensor> mean_var_cuda_h(at::Tensor x) { CHECK_CUDA_INPUT(x); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Prepare output tensors auto mean = at::empty({chn},x.options().dtype(at::kFloat)); auto var = at::empty({chn},x.options().dtype(at::kFloat)); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( mean_var_kernel_h), dim3(blocks), dim3(threads), 0, stream, reinterpret_cast<half*>(x.data<at::Half>()), mean.data<float>(), var.data<float>(), num, chn, sp); return {mean, var}; } /********** * forward **********/ __global__ void forward_kernel_h(half *x, const float *mean, const float *var, const float *weight, const float *bias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; const float _mean = mean[plane]; const float _var = var[plane]; const float _weight = affine ? abs(weight[plane]) + eps : 1.f; const float _bias = affine ? bias[plane] : 0.f; const float mul = rsqrt(_var + eps) * _weight; for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { half *x_ptr = x + (batch * chn + plane) * sp + n; float _x = __half2float(*x_ptr); float _y = (_x - _mean) * mul + _bias; *x_ptr = __float2half(_y); } } } at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_CUDA_INPUT(x); CHECK_CUDA_INPUT(mean); CHECK_CUDA_INPUT(var); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( forward_kernel_h), dim3(blocks), dim3(threads), 0, stream, reinterpret_cast<half*>(x.data<at::Half>()), mean.data<float>(), var.data<float>(), weight.data<float>(), bias.data<float>(), affine, eps, num, chn, sp); return x; } __global__ void edz_eydz_kernel_h(const half *z, const half *dz, const float *weight, const float *bias, float *edz, float *eydz, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; float _weight = affine ? abs(weight[plane]) + eps : 1.f; float _bias = affine ? bias[plane] : 0.f; Pair<float> res = reduce<Pair<float>, GradOpH>(GradOpH(_weight, _bias, z, dz, chn, sp), plane, num, sp); __syncthreads(); if (threadIdx.x == 0) { edz[plane] = res.v1; eydz[plane] = res.v2; } } std::vector<at::Tensor> edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto edz = at::empty({chn},z.options().dtype(at::kFloat)); auto eydz = at::empty({chn},z.options().dtype(at::kFloat)); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( edz_eydz_kernel_h), dim3(blocks), dim3(threads), 0, stream, reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), weight.data<float>(), bias.data<float>(), edz.data<float>(), eydz.data<float>(), affine, eps, num, chn, sp); return {edz, eydz}; } __global__ void backward_kernel_h(const half *z, const half *dz, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, half *dx, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; float _weight = affine ? abs(weight[plane]) + eps : 1.f; float _bias = affine ? bias[plane] : 0.f; float _var = var[plane]; float _edz = edz[plane]; float _eydz = eydz[plane]; float _mul = _weight * rsqrt(_var + eps); float count = float(num * sp); for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - _bias) / _weight; dx[(batch * chn + plane) * sp + n] = __float2half((_dz - _edz / count - _y * _eydz / count) * _mul); } } } at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, at::Tensor edz, at::Tensor eydz, bool affine, float eps) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); CHECK_CUDA_INPUT(var); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); CHECK_CUDA_INPUT(edz); CHECK_CUDA_INPUT(eydz); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto dx = at::zeros_like(z); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( backward_kernel_h), dim3(blocks), dim3(threads), 0, stream, reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), var.data<float>(), weight.data<float>(), bias.data<float>(), edz.data<float>(), eydz.data<float>(), reinterpret_cast<half*>(dx.data<at::Half>()), affine, eps, num, chn, sp); return dx; } __global__ void leaky_relu_backward_impl_h(half *z, half *dz, float slope, int64_t count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x){ float _z = __half2float(z[i]); if (_z < 0) { dz[i] = __float2half(__half2float(dz[i]) * slope); z[i] = __float2half(_z / slope); } } } void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); int64_t count = z.numel(); dim3 threads(getNumThreads(count)); dim3 blocks = (count + threads.x - 1) / threads.x; auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( leaky_relu_backward_impl_h), dim3(blocks), dim3(threads), 0, stream, reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), slope, count); }
712089924e04bb42b4d308dadbbc77767a5d6a4b.cu
#include <ATen/ATen.h> #include <cuda_fp16.h> #include <vector> #include "utils/checks.h" #include "utils/cuda.cuh" #include "inplace_abn.h" #include <ATen/cuda/CUDAContext.h> // Operations for reduce struct SumOpH { __device__ SumOpH(const half *t, int c, int s) : tensor(t), chn(c), sp(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return __half2float(tensor[(batch * chn + plane) * sp + n]); } const half *tensor; const int chn; const int sp; }; struct VarOpH { __device__ VarOpH(float m, const half *t, int c, int s) : mean(m), tensor(t), chn(c), sp(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { const auto t = __half2float(tensor[(batch * chn + plane) * sp + n]); return (t - mean) * (t - mean); } const float mean; const half *tensor; const int chn; const int sp; }; struct GradOpH { __device__ GradOpH(float _weight, float _bias, const half *_z, const half *_dz, int c, int s) : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} __device__ __forceinline__ Pair<float> operator()(int batch, int plane, int n) { float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - bias) / weight; float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); return Pair<float>(_dz, _y * _dz); } const float weight; const float bias; const half *z; const half *dz; const int chn; const int sp; }; /*********** * mean_var ***********/ __global__ void mean_var_kernel_h(const half *x, float *mean, float *var, int num, int chn, int sp) { int plane = blockIdx.x; float norm = 1.f / static_cast<float>(num * sp); float _mean = reduce<float, SumOpH>(SumOpH(x, chn, sp), plane, num, sp) * norm; __syncthreads(); float _var = reduce<float, VarOpH>(VarOpH(_mean, x, chn, sp), plane, num, sp) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } std::vector<at::Tensor> mean_var_cuda_h(at::Tensor x) { CHECK_CUDA_INPUT(x); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Prepare output tensors auto mean = at::empty({chn},x.options().dtype(at::kFloat)); auto var = at::empty({chn},x.options().dtype(at::kFloat)); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::cuda::getCurrentCUDAStream(); mean_var_kernel_h<<<blocks, threads, 0, stream>>>( reinterpret_cast<half*>(x.data<at::Half>()), mean.data<float>(), var.data<float>(), num, chn, sp); return {mean, var}; } /********** * forward **********/ __global__ void forward_kernel_h(half *x, const float *mean, const float *var, const float *weight, const float *bias, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; const float _mean = mean[plane]; const float _var = var[plane]; const float _weight = affine ? abs(weight[plane]) + eps : 1.f; const float _bias = affine ? bias[plane] : 0.f; const float mul = rsqrt(_var + eps) * _weight; for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { half *x_ptr = x + (batch * chn + plane) * sp + n; float _x = __half2float(*x_ptr); float _y = (_x - _mean) * mul + _bias; *x_ptr = __float2half(_y); } } } at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_CUDA_INPUT(x); CHECK_CUDA_INPUT(mean); CHECK_CUDA_INPUT(var); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(x, num, chn, sp); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::cuda::getCurrentCUDAStream(); forward_kernel_h<<<blocks, threads, 0, stream>>>( reinterpret_cast<half*>(x.data<at::Half>()), mean.data<float>(), var.data<float>(), weight.data<float>(), bias.data<float>(), affine, eps, num, chn, sp); return x; } __global__ void edz_eydz_kernel_h(const half *z, const half *dz, const float *weight, const float *bias, float *edz, float *eydz, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; float _weight = affine ? abs(weight[plane]) + eps : 1.f; float _bias = affine ? bias[plane] : 0.f; Pair<float> res = reduce<Pair<float>, GradOpH>(GradOpH(_weight, _bias, z, dz, chn, sp), plane, num, sp); __syncthreads(); if (threadIdx.x == 0) { edz[plane] = res.v1; eydz[plane] = res.v2; } } std::vector<at::Tensor> edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, bool affine, float eps) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto edz = at::empty({chn},z.options().dtype(at::kFloat)); auto eydz = at::empty({chn},z.options().dtype(at::kFloat)); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::cuda::getCurrentCUDAStream(); edz_eydz_kernel_h<<<blocks, threads, 0, stream>>>( reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), weight.data<float>(), bias.data<float>(), edz.data<float>(), eydz.data<float>(), affine, eps, num, chn, sp); return {edz, eydz}; } __global__ void backward_kernel_h(const half *z, const half *dz, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, half *dx, bool affine, float eps, int num, int chn, int sp) { int plane = blockIdx.x; float _weight = affine ? abs(weight[plane]) + eps : 1.f; float _bias = affine ? bias[plane] : 0.f; float _var = var[plane]; float _edz = edz[plane]; float _eydz = eydz[plane]; float _mul = _weight * rsqrt(_var + eps); float count = float(num * sp); for (int batch = 0; batch < num; ++batch) { for (int n = threadIdx.x; n < sp; n += blockDim.x) { float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - _bias) / _weight; dx[(batch * chn + plane) * sp + n] = __float2half((_dz - _edz / count - _y * _eydz / count) * _mul); } } } at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, at::Tensor edz, at::Tensor eydz, bool affine, float eps) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); CHECK_CUDA_INPUT(var); CHECK_CUDA_INPUT(weight); CHECK_CUDA_INPUT(bias); CHECK_CUDA_INPUT(edz); CHECK_CUDA_INPUT(eydz); // Extract dimensions int64_t num, chn, sp; get_dims(z, num, chn, sp); auto dx = at::zeros_like(z); // Run kernel dim3 blocks(chn); dim3 threads(getNumThreads(sp)); auto stream = at::cuda::getCurrentCUDAStream(); backward_kernel_h<<<blocks, threads, 0, stream>>>( reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), var.data<float>(), weight.data<float>(), bias.data<float>(), edz.data<float>(), eydz.data<float>(), reinterpret_cast<half*>(dx.data<at::Half>()), affine, eps, num, chn, sp); return dx; } __global__ void leaky_relu_backward_impl_h(half *z, half *dz, float slope, int64_t count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x){ float _z = __half2float(z[i]); if (_z < 0) { dz[i] = __float2half(__half2float(dz[i]) * slope); z[i] = __float2half(_z / slope); } } } void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope) { CHECK_CUDA_INPUT(z); CHECK_CUDA_INPUT(dz); int64_t count = z.numel(); dim3 threads(getNumThreads(count)); dim3 blocks = (count + threads.x - 1) / threads.x; auto stream = at::cuda::getCurrentCUDAStream(); leaky_relu_backward_impl_h<<<blocks, threads, 0, stream>>>( reinterpret_cast<half*>(z.data<at::Half>()), reinterpret_cast<half*>(dz.data<at::Half>()), slope, count); }
ad62db5f79d245d4fb859d363bf791cc91275807.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <cmath> //#include <cstdio> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { // roiroiM template <typename Dtype> __global__ void RotateROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data,const Dtype* info) { // Remove 0.5 shift for height & width int imageWidth = int(info[1]*spatial_scale); int imageHeight = int(info[0]*spatial_scale); CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output // threadindexroibs , c, w , h // thread--7*7pooledpix int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; // roi(proposalfloat, ),bottom_roisftscale bottom_rois += n * 6; int roi_batch_ind = bottom_rois[0]; Dtype cx = bottom_rois[1]; // Dtype cy = bottom_rois[2]; Dtype h = bottom_rois[3]; Dtype w = bottom_rois[4]; Dtype angle = bottom_rois[5]/180.0*3.1415926535; //TransformPrepare // roi // 7*7Roipooling Dtype dx = -pooled_width/2.0; Dtype dy = -pooled_height/2.0; Dtype Sx = w*spatial_scale/pooled_width; // ROIPooling7*7 Dtype Sy = h*spatial_scale/pooled_height; Dtype Alpha = cos(angle); Dtype Beta = sin(angle); Dtype Dx = cx*spatial_scale; // proposal(roi) Dtype Dy = cy*spatial_scale; Dtype M[2][3]; M[0][0] = Alpha*Sx; M[0][1] = Beta*Sy; // M[0][2] = Alpha*Sx*dx+Beta*Sy*dy+Dx; M[1][0] = -Beta*Sx; M[1][1] = Alpha*Sy; M[1][2] = -Beta*Sx*dx+Alpha*Sy*dy+Dy; // roi7*7 Dtype P[8]; P[0] = M[0][0]*pw+M[0][1]*ph+M[0][2]; P[1] = M[1][0]*pw+M[1][1]*ph+M[1][2]; P[2] = M[0][0]*pw+M[0][1]*(ph+1)+M[0][2]; P[3] = M[1][0]*pw+M[1][1]*(ph+1)+M[1][2]; P[4] = M[0][0]*(pw+1)+M[0][1]*ph+M[0][2]; P[5] = M[1][0]*(pw+1)+M[1][1]*ph+M[1][2]; P[6] = M[0][0]*(pw+1)+M[0][1]*(ph+1)+M[0][2]; P[7] = M[1][0]*(pw+1)+M[1][1]*(ph+1)+M[1][2]; // pwph // int leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0)); int rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),imageWidth-1.0)); int topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0)); int bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),imageHeight-1.0)); Dtype maxval = 0; // int maxidx = -1; // maxpool // bottom_dataroialignfeaturemapdepth=1(h*w) bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype AB[2]; AB[0] = P[2] - P[0]; AB[1] = P[3] - P[1]; Dtype ABAB = AB[0]*AB[0] +AB[1]*AB[1]; Dtype AC[2]; AC[0] = P[4] - P[0]; AC[1] = P[5] - P[1]; Dtype ACAC = AC[0]*AC[0] + AC[1]*AC[1]; for (int h = topMost; h < bottomMost+1; ++h) { for (int w = leftMost; w < rightMost+1; ++w) { Dtype AP[2]; AP[0] = w - P[0]; AP[1] = h - P[1]; Dtype ABAP = AB[0]*AP[0] +AB[1]*AP[1]; Dtype ACAP = AC[0]*AP[0] + AC[1]*AP[1]; // bottom_index if(ABAB>ABAP&&ABAP>=0&&ACAC>ACAP&&ACAP>=0){ int bottom_index = h * width + w; // bin if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } } // C top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void RotateROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); const Dtype* image_info = bottom[2]->gpu_data(); int count = to p[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( RotateROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data,image_info); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void RotateROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 6; int roi_batch_ind = bottom_rois[0]; bottom_diff += (roi_batch_ind * channels + c) * height * width; int bottom_index = argmax_data[index]; if(bottom_index!=-1) bottom_diff[bottom_index]+=top_diff[index] ; } } template <typename Dtype> void RotateROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); int counter = top[0]->count(); //NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( RotateROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(counter)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; //std::cout<<top_diff[0]<<std::endl; } INSTANTIATE_LAYER_GPU_FUNCS(RotateROIPoolingLayer); } // namespace caffe
ad62db5f79d245d4fb859d363bf791cc91275807.cu
#include <cfloat> #include <cmath> //#include <cstdio> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; namespace caffe { // 前向传播的思路是:将相对原图坐标的roi缩放到特征图上对应坐标,同时进行旋转,在特征图上得到带方向的roi(这一过程通过变换矩阵M完成) template <typename Dtype> __global__ void RotateROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data,const Dtype* info) { // Remove 0.5 shift for height & width int imageWidth = int(info[1]*spatial_scale); int imageHeight = int(info[0]*spatial_scale); CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output // 根据thread并行计算的index推测当前计算线程对应在roi的bs , c, w , h位置 // 注意循环内的工作:一个thread--完成输出7*7pooled结果的一个pix,一次只算一个插值结果 int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; // 以下都是roi的坐标(proposal的float边界, 相对于原图的 ),存储在bottom_rois中,映射到ft大小上有一个scale的问题 bottom_rois += n * 6; int roi_batch_ind = bottom_rois[0]; Dtype cx = bottom_rois[1]; //注意是中心点 Dtype cy = bottom_rois[2]; Dtype h = bottom_rois[3]; Dtype w = bottom_rois[4]; Dtype angle = bottom_rois[5]/180.0*3.1415926535; //TransformPrepare // roi缩放(到特征图坐标)和旋转都是仿射变换,一个矩阵就能完成 // 这里不止缩放到特征图,他是一步到位直接再乘个系数直接缩放到7*7得到Roipooling Dtype dx = -pooled_width/2.0; Dtype dy = -pooled_height/2.0; Dtype Sx = w*spatial_scale/pooled_width; // 缩放到ROIPooling输出7*7的缩放系数 Dtype Sy = h*spatial_scale/pooled_height; Dtype Alpha = cos(angle); Dtype Beta = sin(angle); Dtype Dx = cx*spatial_scale; // 将proposal(roi)的中心点坐标(相对原图)映射到特征图,便于插值 Dtype Dy = cy*spatial_scale; Dtype M[2][3]; M[0][0] = Alpha*Sx; M[0][1] = Beta*Sy; // 这里是正,可以看出定义的角度是顺时针为正 M[0][2] = Alpha*Sx*dx+Beta*Sy*dy+Dx; M[1][0] = -Beta*Sx; M[1][1] = Alpha*Sy; M[1][2] = -Beta*Sx*dx+Alpha*Sy*dy+Dy; // 四个顶点的仿射坐标,变为斜的;可见是将最终的roi输出7*7进行了旋转 Dtype P[8]; P[0] = M[0][0]*pw+M[0][1]*ph+M[0][2]; P[1] = M[1][0]*pw+M[1][1]*ph+M[1][2]; P[2] = M[0][0]*pw+M[0][1]*(ph+1)+M[0][2]; P[3] = M[1][0]*pw+M[1][1]*(ph+1)+M[1][2]; P[4] = M[0][0]*(pw+1)+M[0][1]*ph+M[0][2]; P[5] = M[1][0]*(pw+1)+M[1][1]*ph+M[1][2]; P[6] = M[0][0]*(pw+1)+M[0][1]*(ph+1)+M[0][2]; P[7] = M[1][0]*(pw+1)+M[1][1]*(ph+1)+M[1][2]; // pwph无实际意义,并不匹配实际的坐标,这里只是单独取出来一个斜的区域 // 得到最远的上下左右坐标,并且四舍五入取正整值 int leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0)); int rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),imageWidth-1.0)); int topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0)); int bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),imageHeight-1.0)); Dtype maxval = 0; // 取得是最大值池化,这个变量采用冒泡逐个查找比较存储下最大的值 int maxidx = -1; // maxpool需要记录最大值的位置(索引),便于反向传播计算,如果是均值池化就不用 // bottom_data:需要进行roialign的featuremap的首地址指针(depth=1),注意特征图是(h*w)的一维数组。 bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype AB[2]; AB[0] = P[2] - P[0]; AB[1] = P[3] - P[1]; Dtype ABAB = AB[0]*AB[0] +AB[1]*AB[1]; Dtype AC[2]; AC[0] = P[4] - P[0]; AC[1] = P[5] - P[1]; Dtype ACAC = AC[0]*AC[0] + AC[1]*AC[1]; for (int h = topMost; h < bottomMost+1; ++h) { for (int w = leftMost; w < rightMost+1; ++w) { Dtype AP[2]; AP[0] = w - P[0]; AP[1] = h - P[1]; Dtype ABAP = AB[0]*AP[0] +AB[1]*AP[1]; Dtype ACAP = AC[0]*AP[0] + AC[1]*AP[1]; // bottom_index是在原始特征图的遍历位置 if(ABAB>ABAP&&ABAP>=0&&ACAC>ACAP&&ACAP>=0){ int bottom_index = h * width + w; // 逐个比较当前遍历的bin内元素,取最大 if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } } // 将最大的结果取出并保存到值输出,另存其索引(C语言的数组指针用来调地址取内容同步进行就是方便) top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void RotateROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); const Dtype* image_info = bottom[2]->gpu_data(); int count = to p[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) RotateROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data,image_info); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void RotateROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 6; int roi_batch_ind = bottom_rois[0]; bottom_diff += (roi_batch_ind * channels + c) * height * width; int bottom_index = argmax_data[index]; if(bottom_index!=-1) bottom_diff[bottom_index]+=top_diff[index] ; } } template <typename Dtype> void RotateROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); int counter = top[0]->count(); //NOLINT_NEXT_LINE(whitespace/operators) RotateROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(counter), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; //std::cout<<top_diff[0]<<std::endl; } INSTANTIATE_LAYER_GPU_FUNCS(RotateROIPoolingLayer); } // namespace caffe
8a5125eafa05897a55ff99bfddc0b7fb478641fc.hip
// !!! This is a file automatically generated by hipify!!! #include "mtbs_cu.h" #include <pthread.h> #include "tbs_sd.h" #include "sched_pagoda.h" #define MAX_ENTRIES_PER_POOL 64 static pthread_spinlock_t lock; static tentry_t *taskentries; static BOOL *lock_taskcolumns; static unsigned last_column; static unsigned n_tasktables; static tentry_t *g_taskentries; static unsigned numEntriesPerPool; static __device__ tentry_t *d_taskentries; static __device__ int *d_readytable; static __device__ unsigned d_numEntriesPerPool; static int *ready_table; static int *prev_table; static int *next_table; static hipStream_t strm_submit; #define NEXT_COLUMN(col) do { (col) = ((col) + 1) % n_tasktables; } while (0) /* in usec */ static unsigned long long last_tick_submitted; #include "sched_pagoda.cuh" #define N_QUEUED_TASKID_PREV 256 static int queued_taskid_prevs[N_QUEUED_TASKID_PREV]; static int qtp_start, qtp_end; #define NEXT_QTP(qtp) (((qtp) + 1) % N_QUEUED_TASKID_PREV) static void push_prev_taskid(int taskid_prev) { pthread_spin_lock(&lock); if (NEXT_QTP(qtp_start) == qtp_end) { pthread_spin_unlock(&lock); error("queued previous taskid full"); exit(14); } queued_taskid_prevs[qtp_start] = taskid_prev; qtp_start = NEXT_QTP(qtp_start); pthread_spin_unlock(&lock); } static int pop_prev_taskid(int taskid_next) { int taskid_prev; pthread_spin_lock(&lock); if (qtp_start == qtp_end) { pthread_spin_unlock(&lock); return -1; } taskid_prev = queued_taskid_prevs[qtp_end]; if (taskid_next < 0) { if (prev_table[taskid_prev - 2] > 0) { pthread_spin_unlock(&lock); return 0; } } qtp_end = NEXT_QTP(qtp_end); if (taskid_next > 0) { prev_table[taskid_next - 2] = taskid_prev; next_table[taskid_prev - 2] = taskid_next; } pthread_spin_unlock(&lock); return taskid_prev; } static unsigned lock_table(void) { unsigned start_col, col; again: pthread_spin_lock(&lock); start_col = last_column; while (lock_taskcolumns[last_column]) { NEXT_COLUMN(last_column); if (last_column == start_col) { pthread_spin_unlock(&lock); usleep(0); goto again; } } lock_taskcolumns[last_column] = TRUE; col = last_column; NEXT_COLUMN(last_column); pthread_spin_unlock(&lock); return col; } static void unlock_table(unsigned col) { pthread_spin_lock(&lock); lock_taskcolumns[col] = FALSE; pthread_spin_unlock(&lock); } static void start_dummy_submitter(void); static void mark_prev_task_ready(void) { while (TRUE) { tentry_t *d_tentry; int taskid_prev = pop_prev_taskid(-1); if (taskid_prev <= 0) { if (taskid_prev == 0) start_dummy_submitter(); return; } d_tentry = g_taskentries + taskid_prev - 2; cuMemcpyHtoDAsync((hipDeviceptr_t)&d_tentry->ready, &taskid_prev, sizeof(int), strm_submit); hipStreamSynchronize(strm_submit); } } static void * dummy_submitter_func(void *ctx) { while (TRUE) { unsigned long long ticks, ticks_end; ticks = get_ticks(); pthread_spin_lock(&lock); ticks_end = last_tick_submitted + 30000; if (ticks_end > ticks) { pthread_spin_unlock(&lock); usleep(ticks_end - ticks); } else { last_tick_submitted = 0; pthread_spin_unlock(&lock); mark_prev_task_ready(); break; } } return NULL; } static void start_dummy_submitter(void) { unsigned long long ticks; ticks = get_ticks(); pthread_spin_lock(&lock); if (last_tick_submitted == 0) { pthread_t dummy_submitter; last_tick_submitted = ticks; pthread_spin_unlock(&lock); pthread_create(&dummy_submitter, NULL, dummy_submitter_func, NULL); pthread_detach(dummy_submitter); } else { last_tick_submitted = ticks; pthread_spin_unlock(&lock); } } static tentry_t * find_empty_tentry(unsigned col) { tentry_t *tentry; unsigned row; tentry = taskentries + numEntriesPerPool * col; for (row = 0; row < numEntriesPerPool; row++, tentry++) { if (tentry->ready == 0) return tentry; } return NULL; } static sk_t submit_skrun_pagoda(vstream_t vstream, skrun_t *skr) { tentry_t *tentry; unsigned col; unsigned offset; again: col = lock_table(); tentry = find_empty_tentry(col); if (tentry == NULL) { unlock_table(col); goto again; } offset = tentry - taskentries; tentry->ready = pop_prev_taskid(offset + 2); unlock_table(col); memcpy(&tentry->skrun, skr, sizeof(skrun_t)); cuMemcpyHtoDAsync((hipDeviceptr_t)(g_taskentries + offset), tentry, sizeof(tentry_t), strm_submit); hipStreamSynchronize(strm_submit); ready_table[offset] = -1; push_prev_taskid(offset + 2); start_dummy_submitter(); return (sk_t)tentry; } static void wait_skrun_pagoda(sk_t sk, vstream_t vstream, int *pres) { tentry_t *tentry = (tentry_t *)sk; tentry_t *d_tentry; unsigned offset; offset = tentry - taskentries; d_tentry = g_taskentries + offset; while (TRUE) { if (ready_table[offset] == 0) { break; } usleep(100); } cuMemcpyDtoHAsync(pres, (hipDeviceptr_t)&d_tentry->skrun.res, sizeof(int), strm_submit); hipStreamSynchronize(strm_submit); pthread_spin_lock(&lock); if (next_table[offset] > 0) { prev_table[next_table[offset] - 2] = 0; } prev_table[offset] = 0; pthread_spin_unlock(&lock); tentry->ready = 0; } static void init_skrun_pagoda(void) { void *params[4]; unsigned n_tentries; unsigned i; hipStreamCreate__(&strm_submit, hipStreamNonBlocking); numEntriesPerPool = n_queued_kernels; if (numEntriesPerPool > MAX_ENTRIES_PER_POOL) numEntriesPerPool = MAX_ENTRIES_PER_POOL; n_tasktables = n_sm_count * n_MTBs_per_sm; n_tentries = numEntriesPerPool * n_tasktables; taskentries = (tentry_t *)malloc(sizeof(tentry_t) * n_tentries); lock_taskcolumns = (BOOL *)calloc(n_tasktables, sizeof(BOOL)); pthread_spin_init(&lock, 0); g_taskentries = (tentry_t *)mtbs_cudaMalloc(sizeof(tentry_t) * n_tentries); hipMemAllocHost((void **)&ready_table, sizeof(int) * n_tentries); prev_table = (int *)malloc(sizeof(int) * n_tentries); next_table = (int *)malloc(sizeof(int) * n_tentries); for (i = 0; i < n_tentries; i++) { taskentries[i].ready = 0; taskentries[i].sched = 0; ready_table[i] = 0; prev_table[i] = 0; next_table[i] = 0; } params[0] = &g_taskentries; params[1] = &ready_table; params[2] = &numEntriesPerPool; params[3] = &n_tasktables; invoke_kernel_func("func_init_pagoda", params); } static void fini_skrun_pagoda(void) { hipHostFree(ready_table); mtbs_cudaFree(g_taskentries); } sched_t sched_sd_pagoda = { "pagoda", TBS_TYPE_SD_PAGODA, "pagoda_master_kernel", init_skrun_pagoda, fini_skrun_pagoda, submit_skrun_pagoda, wait_skrun_pagoda, };
8a5125eafa05897a55ff99bfddc0b7fb478641fc.cu
#include "mtbs_cu.h" #include <pthread.h> #include "tbs_sd.h" #include "sched_pagoda.h" #define MAX_ENTRIES_PER_POOL 64 static pthread_spinlock_t lock; static tentry_t *taskentries; static BOOL *lock_taskcolumns; static unsigned last_column; static unsigned n_tasktables; static tentry_t *g_taskentries; static unsigned numEntriesPerPool; static __device__ tentry_t *d_taskentries; static __device__ int *d_readytable; static __device__ unsigned d_numEntriesPerPool; static int *ready_table; static int *prev_table; static int *next_table; static CUstream strm_submit; #define NEXT_COLUMN(col) do { (col) = ((col) + 1) % n_tasktables; } while (0) /* in usec */ static unsigned long long last_tick_submitted; #include "sched_pagoda.cuh" #define N_QUEUED_TASKID_PREV 256 static int queued_taskid_prevs[N_QUEUED_TASKID_PREV]; static int qtp_start, qtp_end; #define NEXT_QTP(qtp) (((qtp) + 1) % N_QUEUED_TASKID_PREV) static void push_prev_taskid(int taskid_prev) { pthread_spin_lock(&lock); if (NEXT_QTP(qtp_start) == qtp_end) { pthread_spin_unlock(&lock); error("queued previous taskid full"); exit(14); } queued_taskid_prevs[qtp_start] = taskid_prev; qtp_start = NEXT_QTP(qtp_start); pthread_spin_unlock(&lock); } static int pop_prev_taskid(int taskid_next) { int taskid_prev; pthread_spin_lock(&lock); if (qtp_start == qtp_end) { pthread_spin_unlock(&lock); return -1; } taskid_prev = queued_taskid_prevs[qtp_end]; if (taskid_next < 0) { if (prev_table[taskid_prev - 2] > 0) { pthread_spin_unlock(&lock); return 0; } } qtp_end = NEXT_QTP(qtp_end); if (taskid_next > 0) { prev_table[taskid_next - 2] = taskid_prev; next_table[taskid_prev - 2] = taskid_next; } pthread_spin_unlock(&lock); return taskid_prev; } static unsigned lock_table(void) { unsigned start_col, col; again: pthread_spin_lock(&lock); start_col = last_column; while (lock_taskcolumns[last_column]) { NEXT_COLUMN(last_column); if (last_column == start_col) { pthread_spin_unlock(&lock); usleep(0); goto again; } } lock_taskcolumns[last_column] = TRUE; col = last_column; NEXT_COLUMN(last_column); pthread_spin_unlock(&lock); return col; } static void unlock_table(unsigned col) { pthread_spin_lock(&lock); lock_taskcolumns[col] = FALSE; pthread_spin_unlock(&lock); } static void start_dummy_submitter(void); static void mark_prev_task_ready(void) { while (TRUE) { tentry_t *d_tentry; int taskid_prev = pop_prev_taskid(-1); if (taskid_prev <= 0) { if (taskid_prev == 0) start_dummy_submitter(); return; } d_tentry = g_taskentries + taskid_prev - 2; cuMemcpyHtoDAsync((CUdeviceptr)&d_tentry->ready, &taskid_prev, sizeof(int), strm_submit); cuStreamSynchronize(strm_submit); } } static void * dummy_submitter_func(void *ctx) { while (TRUE) { unsigned long long ticks, ticks_end; ticks = get_ticks(); pthread_spin_lock(&lock); ticks_end = last_tick_submitted + 30000; if (ticks_end > ticks) { pthread_spin_unlock(&lock); usleep(ticks_end - ticks); } else { last_tick_submitted = 0; pthread_spin_unlock(&lock); mark_prev_task_ready(); break; } } return NULL; } static void start_dummy_submitter(void) { unsigned long long ticks; ticks = get_ticks(); pthread_spin_lock(&lock); if (last_tick_submitted == 0) { pthread_t dummy_submitter; last_tick_submitted = ticks; pthread_spin_unlock(&lock); pthread_create(&dummy_submitter, NULL, dummy_submitter_func, NULL); pthread_detach(dummy_submitter); } else { last_tick_submitted = ticks; pthread_spin_unlock(&lock); } } static tentry_t * find_empty_tentry(unsigned col) { tentry_t *tentry; unsigned row; tentry = taskentries + numEntriesPerPool * col; for (row = 0; row < numEntriesPerPool; row++, tentry++) { if (tentry->ready == 0) return tentry; } return NULL; } static sk_t submit_skrun_pagoda(vstream_t vstream, skrun_t *skr) { tentry_t *tentry; unsigned col; unsigned offset; again: col = lock_table(); tentry = find_empty_tentry(col); if (tentry == NULL) { unlock_table(col); goto again; } offset = tentry - taskentries; tentry->ready = pop_prev_taskid(offset + 2); unlock_table(col); memcpy(&tentry->skrun, skr, sizeof(skrun_t)); cuMemcpyHtoDAsync((CUdeviceptr)(g_taskentries + offset), tentry, sizeof(tentry_t), strm_submit); cuStreamSynchronize(strm_submit); ready_table[offset] = -1; push_prev_taskid(offset + 2); start_dummy_submitter(); return (sk_t)tentry; } static void wait_skrun_pagoda(sk_t sk, vstream_t vstream, int *pres) { tentry_t *tentry = (tentry_t *)sk; tentry_t *d_tentry; unsigned offset; offset = tentry - taskentries; d_tentry = g_taskentries + offset; while (TRUE) { if (ready_table[offset] == 0) { break; } usleep(100); } cuMemcpyDtoHAsync(pres, (CUdeviceptr)&d_tentry->skrun.res, sizeof(int), strm_submit); cuStreamSynchronize(strm_submit); pthread_spin_lock(&lock); if (next_table[offset] > 0) { prev_table[next_table[offset] - 2] = 0; } prev_table[offset] = 0; pthread_spin_unlock(&lock); tentry->ready = 0; } static void init_skrun_pagoda(void) { void *params[4]; unsigned n_tentries; unsigned i; cuStreamCreate(&strm_submit, CU_STREAM_NON_BLOCKING); numEntriesPerPool = n_queued_kernels; if (numEntriesPerPool > MAX_ENTRIES_PER_POOL) numEntriesPerPool = MAX_ENTRIES_PER_POOL; n_tasktables = n_sm_count * n_MTBs_per_sm; n_tentries = numEntriesPerPool * n_tasktables; taskentries = (tentry_t *)malloc(sizeof(tentry_t) * n_tentries); lock_taskcolumns = (BOOL *)calloc(n_tasktables, sizeof(BOOL)); pthread_spin_init(&lock, 0); g_taskentries = (tentry_t *)mtbs_cudaMalloc(sizeof(tentry_t) * n_tentries); cuMemAllocHost((void **)&ready_table, sizeof(int) * n_tentries); prev_table = (int *)malloc(sizeof(int) * n_tentries); next_table = (int *)malloc(sizeof(int) * n_tentries); for (i = 0; i < n_tentries; i++) { taskentries[i].ready = 0; taskentries[i].sched = 0; ready_table[i] = 0; prev_table[i] = 0; next_table[i] = 0; } params[0] = &g_taskentries; params[1] = &ready_table; params[2] = &numEntriesPerPool; params[3] = &n_tasktables; invoke_kernel_func("func_init_pagoda", params); } static void fini_skrun_pagoda(void) { cuMemFreeHost(ready_table); mtbs_cudaFree(g_taskentries); } sched_t sched_sd_pagoda = { "pagoda", TBS_TYPE_SD_PAGODA, "pagoda_master_kernel", init_skrun_pagoda, fini_skrun_pagoda, submit_skrun_pagoda, wait_skrun_pagoda, };
abff2a39ea996c4e58f6258197c4b9465da2ffa7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Recursive Gaussian filter sgreen 8/1/08 This code sample implements a Gaussian blur using Deriche's recursive method: http://citeseer.ist.psu.edu/deriche93recursively.html This is similar to the box filter sample in the SDK, but it uses the previous outputs of the filter as well as the previous inputs. This is also known as an IIR (infinite impulse response) filter, since its response to an input impulse can last forever. The main advantage of this method is that the execution time is independent of the filter width. The GPU processes columns of the image in parallel. To avoid uncoalesced reads for the row pass we transpose the image and then transpose it back again afterwards. The implementation is based on code from the CImg library: http://cimg.sourceforge.net/ Thanks to David Tschumperl and all the CImg contributors! */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_math.h> #include "recursiveGaussian_kernel.cuh" #define USE_SIMPLE_FILTER 0 //Round a / b to nearest higher integer value int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /* Transpose a 2D array (see SDK transpose example) */ extern "C" void transpose(uint *d_src, uint *d_dest, uint width, int height) { dim3 grid(iDivUp(width, BLOCK_DIM), iDivUp(height, BLOCK_DIM), 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); hipLaunchKernelGGL(( d_transpose), dim3(grid), dim3(threads) , 0, 0, d_dest, d_src, width, height); getLastCudaError("Kernel execution failed"); } /* Perform Gaussian filter on a 2D image using CUDA Parameters: d_src - pointer to input image in device memory d_dest - pointer to destination image in device memory d_temp - pointer to temporary storage in device memory width - image width height - image height sigma - sigma of Gaussian order - filter order (0, 1 or 2) */ // 8-bit RGBA version extern "C" void gaussianFilterRGBA(uint *d_src, uint *d_dest, uint *d_temp, int width, int height, float sigma, int order, int nthreads) { // compute filter coefficients const float nsigma = sigma < 0.1f ? 0.1f : sigma, alpha = 1.695f / nsigma, ema = (float)::exp(-alpha), ema2 = (float)::exp(-2*alpha), b1 = -2*ema, b2 = ema2; float a0 = 0, a1 = 0, a2 = 0, a3 = 0, coefp = 0, coefn = 0; switch (order) { case 0: { const float k = (1-ema)*(1-ema)/(1+2*alpha*ema-ema2); a0 = k; a1 = k*(alpha-1)*ema; a2 = k*(alpha+1)*ema; a3 = -k*ema2; } break; case 1: { const float k = (1-ema)*(1-ema)/ema; a0 = k*ema; a1 = a3 = 0; a2 = -a0; } break; case 2: { const float ea = (float)::exp(-alpha), k = -(ema2-1)/(2*alpha*ema), kn = (-2*(-1+3*ea-3*ea*ea+ea*ea*ea)/(3*ea+1+3*ea*ea+ea*ea*ea)); a0 = kn; a1 = -kn*(1+k*alpha)*ema; a2 = kn*(1-k*alpha)*ema; a3 = -kn*ema2; } break; default: fprintf(stderr, "gaussianFilter: invalid order parameter!\n"); return; } coefp = (a0+a1)/(1+b1+b2); coefn = (a2+a3)/(1+b1+b2); // process columns #if USE_SIMPLE_FILTER hipLaunchKernelGGL(( d_simpleRecursive_rgba), dim3(iDivUp(width, nthreads)), dim3(nthreads) , 0, 0, d_src, d_temp, width, height, ema); #else hipLaunchKernelGGL(( d_recursiveGaussian_rgba), dim3(iDivUp(width, nthreads)), dim3(nthreads), 3072 , 0, d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads, 3328>>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads>>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif getLastCudaError("Kernel execution failed"); transpose(d_temp, d_dest, width, height); getLastCudaError("transpose: Kernel execution failed"); // process rows #if USE_SIMPLE_FILTER hipLaunchKernelGGL(( d_simpleRecursive_rgba), dim3(iDivUp(height, nthreads)), dim3(nthreads) , 0, 0, d_dest, d_temp, height, width, ema); #else hipLaunchKernelGGL(( d_recursiveGaussian_rgba), dim3(iDivUp(height, nthreads)), dim3(nthreads), 3072 , 0, d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(height, nthreads), nthreads, 3328 >>>(d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif getLastCudaError("Kernel execution failed"); transpose(d_temp, d_dest, height, width); }
abff2a39ea996c4e58f6258197c4b9465da2ffa7.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Recursive Gaussian filter sgreen 8/1/08 This code sample implements a Gaussian blur using Deriche's recursive method: http://citeseer.ist.psu.edu/deriche93recursively.html This is similar to the box filter sample in the SDK, but it uses the previous outputs of the filter as well as the previous inputs. This is also known as an IIR (infinite impulse response) filter, since its response to an input impulse can last forever. The main advantage of this method is that the execution time is independent of the filter width. The GPU processes columns of the image in parallel. To avoid uncoalesced reads for the row pass we transpose the image and then transpose it back again afterwards. The implementation is based on code from the CImg library: http://cimg.sourceforge.net/ Thanks to David Tschumperl� and all the CImg contributors! */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_math.h> #include "recursiveGaussian_kernel.cuh" #define USE_SIMPLE_FILTER 0 //Round a / b to nearest higher integer value int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } /* Transpose a 2D array (see SDK transpose example) */ extern "C" void transpose(uint *d_src, uint *d_dest, uint width, int height) { dim3 grid(iDivUp(width, BLOCK_DIM), iDivUp(height, BLOCK_DIM), 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); d_transpose<<< grid, threads >>>(d_dest, d_src, width, height); getLastCudaError("Kernel execution failed"); } /* Perform Gaussian filter on a 2D image using CUDA Parameters: d_src - pointer to input image in device memory d_dest - pointer to destination image in device memory d_temp - pointer to temporary storage in device memory width - image width height - image height sigma - sigma of Gaussian order - filter order (0, 1 or 2) */ // 8-bit RGBA version extern "C" void gaussianFilterRGBA(uint *d_src, uint *d_dest, uint *d_temp, int width, int height, float sigma, int order, int nthreads) { // compute filter coefficients const float nsigma = sigma < 0.1f ? 0.1f : sigma, alpha = 1.695f / nsigma, ema = (float)std::exp(-alpha), ema2 = (float)std::exp(-2*alpha), b1 = -2*ema, b2 = ema2; float a0 = 0, a1 = 0, a2 = 0, a3 = 0, coefp = 0, coefn = 0; switch (order) { case 0: { const float k = (1-ema)*(1-ema)/(1+2*alpha*ema-ema2); a0 = k; a1 = k*(alpha-1)*ema; a2 = k*(alpha+1)*ema; a3 = -k*ema2; } break; case 1: { const float k = (1-ema)*(1-ema)/ema; a0 = k*ema; a1 = a3 = 0; a2 = -a0; } break; case 2: { const float ea = (float)std::exp(-alpha), k = -(ema2-1)/(2*alpha*ema), kn = (-2*(-1+3*ea-3*ea*ea+ea*ea*ea)/(3*ea+1+3*ea*ea+ea*ea*ea)); a0 = kn; a1 = -kn*(1+k*alpha)*ema; a2 = kn*(1-k*alpha)*ema; a3 = -kn*ema2; } break; default: fprintf(stderr, "gaussianFilter: invalid order parameter!\n"); return; } coefp = (a0+a1)/(1+b1+b2); coefn = (a2+a3)/(1+b1+b2); // process columns #if USE_SIMPLE_FILTER d_simpleRecursive_rgba<<< iDivUp(width, nthreads), nthreads >>>(d_src, d_temp, width, height, ema); #else d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads, 3072 >>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads, 3328>>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads>>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif getLastCudaError("Kernel execution failed"); transpose(d_temp, d_dest, width, height); getLastCudaError("transpose: Kernel execution failed"); // process rows #if USE_SIMPLE_FILTER d_simpleRecursive_rgba<<< iDivUp(height, nthreads), nthreads >>>(d_dest, d_temp, height, width, ema); #else d_recursiveGaussian_rgba<<< iDivUp(height, nthreads), nthreads, 3072 >>>(d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn); //d_recursiveGaussian_rgba<<< iDivUp(height, nthreads), nthreads, 3328 >>>(d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn); #endif getLastCudaError("Kernel execution failed"); transpose(d_temp, d_dest, height, width); }
a2d1c64cb094eabd2b82bfd881d4e65612df67e3.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<stdio.h> #include<stdlib.h> #include <hip/hip_runtime.h> hipEvent_t start, stop; float elapsed_time_ms; __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) { int tx = threadIdx.x + blockIdx.x*blockDim.x; int ty = threadIdx.y + blockIdx.y*blockDim.y; float Pvalue = 0; for(int k = 0; k < Width ; ++k){ float Mdelement = Md[ty*Width + k]; float Ndelement = Nd[k*Width + tx]; Pvalue += (Mdelement*Ndelement); } Pd[ty*Width + tx] = Pvalue; } void MatrixMultiplication(float *M, float *N, float *P, int Width) { int size = Width*Width*sizeof(float); float *Md, *Nd, *Pd; int k = 100; int l = 100; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**)&Md, size); hipMemcpy(Md,M,size,hipMemcpyHostToDevice); hipMalloc((void**)&Nd, size); hipMemcpy(Nd,N,size,hipMemcpyHostToDevice); hipMalloc((void**)&Pd,size); dim3 dimBlock((k-1)/Width+1,(l-1)/Width+1); dim3 dimGrid(Width,Width); hipEventRecord(start, 0); hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd,Width); hipMemcpy(P,Pd,size,hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); hipFree(Md); hipFree(Nd); hipFree(Pd); } int main(void) { void MatrixMultiplication(float *, float *, float *, int); const int Width= 100; float M[Width*Width], N[Width*Width], P[Width*Width]; for(int i = 0; i < (Width*Width) ; i++){ M[i] = 5; N[i] = 5; P[i] = 0; } MatrixMultiplication(M, N, P, Width); for(int i = 0; i < (Width*Width) ; i++){ printf("%f \t", P[i]); } printf("Computation time of GPU: %f ms.\n This is a change", elapsed_time_ms); // exe. time return 0; }
a2d1c64cb094eabd2b82bfd881d4e65612df67e3.cu
#include<cuda.h> #include<stdio.h> #include<stdlib.h> #include <cuda_runtime.h> cudaEvent_t start, stop; float elapsed_time_ms; __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) { int tx = threadIdx.x + blockIdx.x*blockDim.x; int ty = threadIdx.y + blockIdx.y*blockDim.y; float Pvalue = 0; for(int k = 0; k < Width ; ++k){ float Mdelement = Md[ty*Width + k]; float Ndelement = Nd[k*Width + tx]; Pvalue += (Mdelement*Ndelement); } Pd[ty*Width + tx] = Pvalue; } void MatrixMultiplication(float *M, float *N, float *P, int Width) { int size = Width*Width*sizeof(float); float *Md, *Nd, *Pd; int k = 100; int l = 100; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&Md, size); cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&Nd, size); cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&Pd,size); dim3 dimBlock((k-1)/Width+1,(l-1)/Width+1); dim3 dimGrid(Width,Width); cudaEventRecord(start, 0); MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width); cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); cudaFree(Md); cudaFree(Nd); cudaFree(Pd); } int main(void) { void MatrixMultiplication(float *, float *, float *, int); const int Width= 100; float M[Width*Width], N[Width*Width], P[Width*Width]; for(int i = 0; i < (Width*Width) ; i++){ M[i] = 5; N[i] = 5; P[i] = 0; } MatrixMultiplication(M, N, P, Width); for(int i = 0; i < (Width*Width) ; i++){ printf("%f \t", P[i]); } printf("Computation time of GPU: %f ms.\n This is a change", elapsed_time_ms); // exe. time return 0; }
663156e62c0c074883b74d0528692f7568b01396.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Assignment: ECE 451 Programming Assignment 2 Code: GpuSumPrimes.cu Group: David Swanson, Daniel Caballero, Michael Wilder Description: This code adds all the prime numbers up to a certain number input by the user. This code takes one parameter (SIZE) from the user and uses the CUDA library to run the calculations needed in parallel on 1024 threads. */ #include <stdio.h> #define BLOCK_SIZE 1024 /* SumPrimes the function on the device that calculates if a number is prime. It takes a pointer to the allocated array on the GPU and the size of the array. */ __global__ void SumPrimes (int *device_array, int SIZE) { // Index is calculated based on which block and thread is being worked. int index = threadIdx.x + blockIdx.x * blockDim.x; int i; int Prime = 1; // If the index is valid, then we need to check if it is prime. if (index < SIZE) { if ((index) == 0 || (index) == 1) { device_array[index] = 0; } // If the number is not prime, the value in the array is set to 0 else { for (i=2; i*i <= index; i++) { if (index % i == 0) { Prime = 0; device_array[index] = 0; break; } } // if the number is prime, the value in the array is set to the number. if (Prime) device_array[index] = index; } } } /* The main function of the code allocates memory on the host and device, transfers data between the two, and calls the SumPrimes function. */ int main(int argc, char* argv []){ int SIZE = atoi(argv[1]) + 1; int i; long int sum; int *host_array; int *device_array; sum = 0; // Allocate memory for host array and device array then copy host array to device array. host_array = (int *)malloc(SIZE*sizeof(int)); hipMalloc(&device_array, SIZE*sizeof(int)); hipMemcpy(device_array, host_array, SIZE*sizeof(int), hipMemcpyHostToDevice); // Define how many blocks and threads that need to be used when calling SumPrimes. // A 1D array is used. The size of blocksPerGrid is set in a way to prevent overflow. dim3 blocksPerGrid((SIZE + BLOCK_SIZE - 1)/BLOCK_SIZE,1,1); dim3 threadsPerBlock(BLOCK_SIZE,1,1); hipLaunchKernelGGL(( SumPrimes) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_array, SIZE); // Copy final array from device to host then clear memory in the device. hipMemcpy(host_array, device_array, SIZE*sizeof(int), hipMemcpyDeviceToHost); hipFree(device_array); // Testing print statement. printf("I am adding: "); // Add all the elements in the array. Only prime numbers will be non-zero. for (i = 0; i < SIZE; i++) { if (host_array[i] != 0) printf("%d ", host_array[i]); sum += host_array[i]; } printf("\nSum = %ld \n", sum); return 0; }
663156e62c0c074883b74d0528692f7568b01396.cu
/* Assignment: ECE 451 Programming Assignment 2 Code: GpuSumPrimes.cu Group: David Swanson, Daniel Caballero, Michael Wilder Description: This code adds all the prime numbers up to a certain number input by the user. This code takes one parameter (SIZE) from the user and uses the CUDA library to run the calculations needed in parallel on 1024 threads. */ #include <stdio.h> #define BLOCK_SIZE 1024 /* SumPrimes the function on the device that calculates if a number is prime. It takes a pointer to the allocated array on the GPU and the size of the array. */ __global__ void SumPrimes (int *device_array, int SIZE) { // Index is calculated based on which block and thread is being worked. int index = threadIdx.x + blockIdx.x * blockDim.x; int i; int Prime = 1; // If the index is valid, then we need to check if it is prime. if (index < SIZE) { if ((index) == 0 || (index) == 1) { device_array[index] = 0; } // If the number is not prime, the value in the array is set to 0 else { for (i=2; i*i <= index; i++) { if (index % i == 0) { Prime = 0; device_array[index] = 0; break; } } // if the number is prime, the value in the array is set to the number. if (Prime) device_array[index] = index; } } } /* The main function of the code allocates memory on the host and device, transfers data between the two, and calls the SumPrimes function. */ int main(int argc, char* argv []){ int SIZE = atoi(argv[1]) + 1; int i; long int sum; int *host_array; int *device_array; sum = 0; // Allocate memory for host array and device array then copy host array to device array. host_array = (int *)malloc(SIZE*sizeof(int)); cudaMalloc(&device_array, SIZE*sizeof(int)); cudaMemcpy(device_array, host_array, SIZE*sizeof(int), cudaMemcpyHostToDevice); // Define how many blocks and threads that need to be used when calling SumPrimes. // A 1D array is used. The size of blocksPerGrid is set in a way to prevent overflow. dim3 blocksPerGrid((SIZE + BLOCK_SIZE - 1)/BLOCK_SIZE,1,1); dim3 threadsPerBlock(BLOCK_SIZE,1,1); SumPrimes <<<blocksPerGrid, threadsPerBlock>>>(device_array, SIZE); // Copy final array from device to host then clear memory in the device. cudaMemcpy(host_array, device_array, SIZE*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_array); // Testing print statement. printf("I am adding: "); // Add all the elements in the array. Only prime numbers will be non-zero. for (i = 0; i < SIZE; i++) { if (host_array[i] != 0) printf("%d ", host_array[i]); sum += host_array[i]; } printf("\nSum = %ld \n", sum); return 0; }
0222fb228557029210a34fb9efed990cf9f308a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ /* blk_M=64 blk_N=64 blk_K=16 nthd_x=64 nthd_y=4 */ #include "common_magma.h" #include "commonblas_d.h" #define magmablas_dgemm_fermi magmablas_dgemm texture<int2,1> tex_x_double_A; texture<int2,1> tex_x_double_B; static __inline__ __device__ double fetch_x_A(const int& i) { register int2 v = tex1Dfetch(tex_x_double_A, i); return __hiloint2double(v.y, v.x); } static __inline__ __device__ double fetch_x_B(const int& i) { register int2 v = tex1Dfetch(tex_x_double_B, i); return __hiloint2double(v.y, v.x); } extern "C" __global__ void fermiDgemm_v2_kernel_NN(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Abs[64][17]; __shared__ double Bb[16][65]; int tll = ty2; double xxA[4]; double xxB[4]; //int trackA = offsetA + ibx +__mul24( ty2, lda) + tx2 ; //A += trackA; A += (offsetA + ibx +__mul24( ty2, lda) + tx2); //int trackB = offsetB + tx2+ __mul24(iby + ty2 * 4, ldb ); //B += trackB; B += (offsetB + tx2+ __mul24(iby + ty2 * 4, ldb )); #pragma unroll for(int y=0; y<4; y++) Abs[tx2+ y*16][ty2] = /* (tll<k)* */ A[y*16] ; //Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16) ; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] = B[y * ldb] ; // Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll+=16; A += lda *16 ; B += 16; //trackA += 16*lda ; //trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ A[y*16]; // xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = B[y*ldb]; // xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Abs[tx2+y*16][ty2] = xxA[y]; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = iby + ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16]; } C += ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_TN(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + tx2 + __mul24( ibx + ty2*4, lda ); int trackB = offsetB + tx2 + __mul24( iby + ty2*4, ldb ); A+= trackA; B+= trackB; int tll = tx2; #pragma unroll for(int y=0; y<4; y++) Abs[ty2*4+y][tx2] = (tll<k)* fetch_x_A(trackA + y*lda); #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] = /* (tll<k)* */ fetch_x_B( trackB + y*ldb ); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll +=16; B += 16; A += 16 ; trackA+=16 ; trackB+=16; #pragma unroll for(int y=0; y<4; y++) xxA[y] = (tll<k)* fetch_x_A(trackA + y*lda); #pragma unroll for(int y=0; y<4; y++) xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for(int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1]; #pragma unroll for(int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for(int x=0; x<4; x++) { #pragma unroll for(int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Abs[ty2*4+y][tx2] = xxA[y]; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] =xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for(int j1=0; j1<16; j1++) { #pragma unroll for(int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1]; #pragma unroll for(int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for(int x=0; x<4; x++) { #pragma unroll for(int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } int gy = iby+ty2; #pragma unroll for(int y=0;y<4;y++, gy+=16) { int gx = ibx+tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n) C[x*16] =alpha*Cb[y+x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_TT(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2; int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb); A += trackA; B += trackB; int tll = tx2; #pragma unroll for(int y=0; y<4; y++) Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y); #pragma unroll for(int y=0; y<4; y++) Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll+=16; A += 16; B += 16*ldb; trackA+=16; trackB+=16*ldb; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B(trackB + 16*y); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1]; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0;y<4;y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } __syncthreads(); #pragma unroll for( int y=0; y<4; y++) Abs[ty2 + 16*y][tx2] = xxA[y]; #pragma unroll for( int y=0; y<4; y++) Bb[ty2][tx2+y*16] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for( int j1=0; j1<16; j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1]; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0; y<4; y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } int gy = iby + ty2; #pragma unroll for( int y=0; y<4; y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0; x<4; x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_NT(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2= idt%16; const int ty2= idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ; int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb); A+= trackA; B += trackB; int tll = ty2; #pragma unroll for(int y=0; y<4; y++) Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for(int y=0; y<4; y++) Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll += 16; A += lda *16 ; B += 16*ldb; trackA+=16*lda ; trackB+=16*ldb; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0; y<4; y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } __syncthreads(); #pragma unroll for( int y=0; y<4; y++) Abs[tx2 + y*16][ty2] = xxA[y]; #pragma unroll for( int y=0; y<4; y++) Bb[ty2][tx2+y*16] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24(ty2 + iby ,ldc); #pragma unroll for(int j1=0; j1<16; j1++) { #pragma unroll for( int y=0; y<4; y++) Bxp[y] = Bb[j1][ty2 + y*16]; #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0;y<4;y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } int gy = iby + ty2; #pragma unroll for( int y=0; y<4; y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0; x<4; x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y + x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" void magmablas_dgemm_fermi( char TRANSA, char TRANSB, int m , int n , int k , double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= DGEMM performs one of the matrix-matrix operations C := alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X', alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ========== TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n', op( A ) = A. TRANSA = 'T' or 't', op( A ) = A'. TRANSA = 'C' or 'c', op( A ) = A'. Unchanged on exit. TRANSB - CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: TRANSB = 'N' or 'n', op( B ) = B. TRANSB = 'T' or 't', op( B ) = B'. TRANSB = 'C' or 'c', op( B ) = B'. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix op( A ) and of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix op( B ) and the number of columns of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry, K specifies the number of columns of the matrix op( A ) and the number of rows of the matrix op( B ). K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = 'N' or 'n', and is m otherwise. Before entry with TRANSA = 'N' or 'n', the leading m by k part of the array A must contain the matrix A, otherwise the leading k by m part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = 'N' or 'n' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = 'N' or 'n', and is k otherwise. Before entry with TRANSB = 'N' or 'n', the leading k by n part of the array B must contain the matrix B, otherwise the leading n by k part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANSB = 'N' or 'n' then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n matrix ( alpha*op( A )*op( B ) + beta*C ). LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. ===================================================================== */ if (m<=0 || n<=0 || k<=0) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 1, TransB = 1; if (TRANSA == 'N' || TRANSA == 'n') TransA = 0; if (TRANSB == 'N' || TRANSB == 'n') TransB = 0; size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m); size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k); // size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512) / 2; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if (sizeA>=CUBLAS_MAX_1DBUF_SIZE || sizeB>=CUBLAS_MAX_1DBUF_SIZE ) { // printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using hipblasSgemm\n", CUBLAS_MAX_1DBUF_SIZE); hipblasDgemm(TRANSA, TRANSB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); return; } hipError_t errt; errt = hipBindTexture(&offsetA, tex_x_double_A, (int2 *)A, sizeA * sizeof(A[0])); if( errt != hipSuccess) { printf("cannot bind to texture\n"); return; } errt = hipBindTexture(&offsetB, tex_x_double_B, (int2 *)B, sizeB * sizeof(B[0])); if( errt != hipSuccess) { printf("cannot bind to texture\n"); return; } dim3 threads( 64, 4 ); dim3 grid(m/(64)+(m%(64)!=0),n/(64)+(n%(64)!=0)); offsetA = offsetA/sizeof(A[0]); offsetB = offsetB/sizeof(B[0]); if ( TransB ) if ( !TransA ) hipLaunchKernelGGL(( fermiDgemm_v2_kernel_NT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else hipLaunchKernelGGL(( fermiDgemm_v2_kernel_TT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else if ( !TransA ) hipLaunchKernelGGL(( fermiDgemm_v2_kernel_NN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else hipLaunchKernelGGL(( fermiDgemm_v2_kernel_TN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); hipUnbindTexture ( tex_x_double_A ) ; hipUnbindTexture ( tex_x_double_B ) ; }
0222fb228557029210a34fb9efed990cf9f308a9.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @precisions normal d */ /* blk_M=64 blk_N=64 blk_K=16 nthd_x=64 nthd_y=4 */ #include "common_magma.h" #include "commonblas_d.h" #define magmablas_dgemm_fermi magmablas_dgemm texture<int2,1> tex_x_double_A; texture<int2,1> tex_x_double_B; static __inline__ __device__ double fetch_x_A(const int& i) { register int2 v = tex1Dfetch(tex_x_double_A, i); return __hiloint2double(v.y, v.x); } static __inline__ __device__ double fetch_x_B(const int& i) { register int2 v = tex1Dfetch(tex_x_double_B, i); return __hiloint2double(v.y, v.x); } extern "C" __global__ void fermiDgemm_v2_kernel_NN(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Abs[64][17]; __shared__ double Bb[16][65]; int tll = ty2; double xxA[4]; double xxB[4]; //int trackA = offsetA + ibx +__mul24( ty2, lda) + tx2 ; //A += trackA; A += (offsetA + ibx +__mul24( ty2, lda) + tx2); //int trackB = offsetB + tx2+ __mul24(iby + ty2 * 4, ldb ); //B += trackB; B += (offsetB + tx2+ __mul24(iby + ty2 * 4, ldb )); #pragma unroll for(int y=0; y<4; y++) Abs[tx2+ y*16][ty2] = /* (tll<k)* */ A[y*16] ; //Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16) ; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] = B[y * ldb] ; // Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll+=16; A += lda *16 ; B += 16; //trackA += 16*lda ; //trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ A[y*16]; // xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = B[y*ldb]; // xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Abs[tx2+y*16][ty2] = xxA[y]; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = iby + ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16]; } C += ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_TN(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + tx2 + __mul24( ibx + ty2*4, lda ); int trackB = offsetB + tx2 + __mul24( iby + ty2*4, ldb ); A+= trackA; B+= trackB; int tll = tx2; #pragma unroll for(int y=0; y<4; y++) Abs[ty2*4+y][tx2] = (tll<k)* fetch_x_A(trackA + y*lda); #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] = /* (tll<k)* */ fetch_x_B( trackB + y*ldb ); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll +=16; B += 16; A += 16 ; trackA+=16 ; trackB+=16; #pragma unroll for(int y=0; y<4; y++) xxA[y] = (tll<k)* fetch_x_A(trackA + y*lda); #pragma unroll for(int y=0; y<4; y++) xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for(int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1]; #pragma unroll for(int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for(int x=0; x<4; x++) { #pragma unroll for(int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Abs[ty2*4+y][tx2] = xxA[y]; #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4+y] =xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for(int j1=0; j1<16; j1++) { #pragma unroll for(int y=0; y<4; y++) Axs[y] = Abs[tx2+y*16][j1]; #pragma unroll for(int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2+y*16]; #pragma unroll for(int x=0; x<4; x++) { #pragma unroll for(int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } int gy = iby+ty2; #pragma unroll for(int y=0;y<4;y++, gy+=16) { int gx = ibx+tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n) C[x*16] =alpha*Cb[y+x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_TT(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2; int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb); A += trackA; B += trackB; int tll = tx2; #pragma unroll for(int y=0; y<4; y++) Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y); #pragma unroll for(int y=0; y<4; y++) Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll+=16; A += 16; B += 16*ldb; trackA+=16; trackB+=16*ldb; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B(trackB + 16*y); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1]; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0;y<4;y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } __syncthreads(); #pragma unroll for( int y=0; y<4; y++) Abs[ty2 + 16*y][tx2] = xxA[y]; #pragma unroll for( int y=0; y<4; y++) Bb[ty2][tx2+y*16] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24 (ty2 + iby ,ldc); #pragma unroll for( int j1=0; j1<16; j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1]; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0; y<4; y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } int gy = iby + ty2; #pragma unroll for( int y=0; y<4; y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0; x<4; x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y+x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" __global__ void fermiDgemm_v2_kernel_NT(double *C, const double *A, const double *B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta, int offsetA, int offsetB) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * 64; const int ibx = blockIdx.x * 64; const int idt = ty * 64 + tx; const int tx2= idt%16; const int ty2= idt/16; __shared__ double Bb[16][65]; __shared__ double Abs[64][17]; double xxA[4]; double xxB[4]; int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ; int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb); A+= trackA; B += trackB; int tll = ty2; #pragma unroll for(int y=0; y<4; y++) Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for(int y=0; y<4; y++) Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y); __syncthreads(); double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<(k-16); k1+=16) { tll += 16; A += lda *16 ; B += 16*ldb; trackA+=16*lda ; trackB+=16*ldb; #pragma unroll for( int y=0; y<4; y++) xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16); #pragma unroll for( int y=0; y<4; y++) xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0; y<4; y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } __syncthreads(); #pragma unroll for( int y=0; y<4; y++) Abs[tx2 + y*16][ty2] = xxA[y]; #pragma unroll for( int y=0; y<4; y++) Bb[ty2][tx2+y*16] = xxB[y]; __syncthreads(); } C += tx2 + ibx + __mul24(ty2 + iby ,ldc); #pragma unroll for(int j1=0; j1<16; j1++) { #pragma unroll for( int y=0; y<4; y++) Bxp[y] = Bb[j1][ty2 + y*16]; #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1] ; #pragma unroll for( int x=0; x<4; x++) #pragma unroll for( int y=0;y<4;y++) Cb[x*4+y] += Axs[x]*Bxp[y]; } int gy = iby + ty2; #pragma unroll for( int y=0; y<4; y++, gy+=16) { int gx = ibx + tx2; #pragma unroll for(int x=0; x<4; x++, gx+=16) { if (gx < m && gy < n) C[x*16] = alpha*Cb[y + x*4] + beta * C[x*16]; } C+=ldc*16; } } extern "C" void magmablas_dgemm_fermi( char TRANSA, char TRANSB, int m , int n , int k , double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc ) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= DGEMM performs one of the matrix-matrix operations C := alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X', alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ========== TRANSA - CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: TRANSA = 'N' or 'n', op( A ) = A. TRANSA = 'T' or 't', op( A ) = A'. TRANSA = 'C' or 'c', op( A ) = A'. Unchanged on exit. TRANSB - CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: TRANSB = 'N' or 'n', op( B ) = B. TRANSB = 'T' or 't', op( B ) = B'. TRANSB = 'C' or 'c', op( B ) = B'. Unchanged on exit. M - INTEGER. On entry, M specifies the number of rows of the matrix op( A ) and of the matrix C. M must be at least zero. Unchanged on exit. N - INTEGER. On entry, N specifies the number of columns of the matrix op( B ) and the number of columns of the matrix C. N must be at least zero. Unchanged on exit. K - INTEGER. On entry, K specifies the number of columns of the matrix op( A ) and the number of rows of the matrix op( B ). K must be at least zero. Unchanged on exit. ALPHA - DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = 'N' or 'n', and is m otherwise. Before entry with TRANSA = 'N' or 'n', the leading m by k part of the array A must contain the matrix A, otherwise the leading k by m part of the array A must contain the matrix A. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = 'N' or 'n' then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). Unchanged on exit. B - DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = 'N' or 'n', and is k otherwise. Before entry with TRANSB = 'N' or 'n', the leading k by n part of the array B must contain the matrix B, otherwise the leading n by k part of the array B must contain the matrix B. Unchanged on exit. LDB - INTEGER. On entry, LDB specifies the first dimension of B as declared in the calling (sub) program. When TRANSB = 'N' or 'n' then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). Unchanged on exit. BETA - DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then C need not be set on input. Unchanged on exit. C - DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array C must contain the matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the array C is overwritten by the m by n matrix ( alpha*op( A )*op( B ) + beta*C ). LDC - INTEGER. On entry, LDC specifies the first dimension of C as declared in the calling (sub) program. LDC must be at least max( 1, m ). Unchanged on exit. ===================================================================== */ if (m<=0 || n<=0 || k<=0) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 1, TransB = 1; if (TRANSA == 'N' || TRANSA == 'n') TransA = 0; if (TRANSB == 'N' || TRANSB == 'n') TransB = 0; size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m); size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k); // size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512) / 2; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if (sizeA>=CUBLAS_MAX_1DBUF_SIZE || sizeB>=CUBLAS_MAX_1DBUF_SIZE ) { // printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using cublasSgemm\n", CUBLAS_MAX_1DBUF_SIZE); cublasDgemm(TRANSA, TRANSB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); return; } cudaError_t errt; errt = cudaBindTexture(&offsetA, tex_x_double_A, (int2 *)A, sizeA * sizeof(A[0])); if( errt != cudaSuccess) { printf("cannot bind to texture\n"); return; } errt = cudaBindTexture(&offsetB, tex_x_double_B, (int2 *)B, sizeB * sizeof(B[0])); if( errt != cudaSuccess) { printf("cannot bind to texture\n"); return; } dim3 threads( 64, 4 ); dim3 grid(m/(64)+(m%(64)!=0),n/(64)+(n%(64)!=0)); offsetA = offsetA/sizeof(A[0]); offsetB = offsetB/sizeof(B[0]); if ( TransB ) if ( !TransA ) fermiDgemm_v2_kernel_NT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else fermiDgemm_v2_kernel_TT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else if ( !TransA ) fermiDgemm_v2_kernel_NN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); else fermiDgemm_v2_kernel_TN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb, ldc, alpha, beta, (int)offsetA, (int)offsetB); cudaUnbindTexture ( tex_x_double_A ) ; cudaUnbindTexture ( tex_x_double_B ) ; }
2662d01f1e41d77890073bc6852885e829e819cd.hip
// !!! This is a file automatically generated by hipify!!! /* * This program reads a matrix with Matrix Market format, * then it turns it into 2 objects : * - one matrix in CSR format * - one matrix in ELLPACK format * Then it proceeds the mulctiplication of this matrix with a vector using cuda */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <iostream> #include <string.h> #include <ctype.h> #include <hip/hip_runtime.h> // For CUDA runtime API #include <helper_cuda.h> // For checkCudaError macro #include <helper_timer.h> // For CUDA SDK timers //Simple dimension: define a 1D block structure #define BD 256 const dim3 BLOCK_DIM(BD); #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0] == 'M') #define mm_is_sparse(typecode) ((typecode)[1] == 'C') #define mm_is_coordinate(typecode) ((typecode)[1] == 'C') #define mm_is_dense(typecode) ((typecode)[1] == 'A') #define mm_is_array(typecode) ((typecode)[1] == 'A') #define mm_is_complex(typecode) ((typecode)[2] == 'C') #define mm_is_real(typecode) ((typecode)[2] == 'R') #define mm_is_pattern(typecode) ((typecode)[2] == 'P') #define mm_is_integer(typecode) ((typecode)[2] == 'I') #define mm_is_symmetric(typecode) ((typecode)[3] == 'S') #define mm_is_general(typecode) ((typecode)[3] == 'G') #define mm_is_skew(typecode) ((typecode)[3] == 'K') #define mm_is_hermitian(typecode) ((typecode)[3] == 'H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0] = 'M') #define mm_set_coordinate(typecode) ((*typecode)[1] = 'C') #define mm_set_array(typecode) ((*typecode)[1] = 'A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_sparse(typecode) mm_set_coordinate(typecode) #define mm_set_complex(typecode) ((*typecode)[2] = 'C') #define mm_set_real(typecode) ((*typecode)[2] = 'R') #define mm_set_pattern(typecode) ((*typecode)[2] = 'P') #define mm_set_integer(typecode) ((*typecode)[2] = 'I') #define mm_set_symmetric(typecode) ((*typecode)[3] = 'S') #define mm_set_general(typecode) ((*typecode)[3] = 'G') #define mm_set_skew(typecode) ((*typecode)[3] = 'K') #define mm_set_hermitian(typecode) ((*typecode)[3] = 'H') #define mm_clear_typecode(typecode) ((*typecode)[0] = (*typecode)[1] = \ (*typecode)[2] = ' ', \ (*typecode)[3] = 'G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSE_STR "coordinate" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = *nz = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; } while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d %d", M, N, nz) == 3) return 0; else do { num_items_read = fscanf(f, "%d %d %d", M, N, nz); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 3); return 0; } char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; char *types[4]; char *mm_strdup(const char *); int error = 0; /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = MM_MTX_STR; else error = 1; /* check for CRD or ARR matrix */ if (mm_is_sparse(matcode)) types[1] = MM_SPARSE_STR; else if (mm_is_dense(matcode)) types[1] = MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = MM_SKEW_STR; else return NULL; sprintf(buffer, "%s %s %s %s", types[0], types[1], types[2], types[3]); return mm_strdup(buffer); } int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5) return MM_PREMATURE_EOF; for (p = mtx; *p != '\0'; *p = tolower(*p), p++) ; /* convert to lower case */ for (p = crd; *p != '\0'; *p = tolower(*p), p++) ; for (p = data_type; *p != '\0'; *p = tolower(*p), p++) ; for (p = storage_scheme; *p != '\0'; *p = tolower(*p), p++) ; /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSE_STR) == 0) mm_set_sparse(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_) { FILE *f; MM_typecode matcode; int M, N, nz; int i; double *val; int *I, *J; if ((f = fopen(fname, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { printf("mm_read_unsymetric: Could not process Matrix Market banner "); printf(" in file [%s]\n", fname); return -1; } if (!(mm_is_real(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode))) { fprintf(stderr, "Sorry, this application does not support "); fprintf(stderr, "Market Market type: [%s]\n", mm_typecode_to_str(matcode)); return -1; } /* find out size of sparse matrix: M, N, nz .... */ if (mm_read_mtx_crd_size(f, &M, &N, &nz) != 0) { fprintf(stderr, "read_unsymmetric_sparse(): could not parse matrix size.\n"); return -1; } *M_ = M; *N_ = N; *nz_ = nz; /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); *val_ = val; *I_ = I; *J_ = J; /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } fclose(f); return 0; } int mm_is_valid(MM_typecode matcode) { if (!mm_is_matrix(matcode)) return 0; if (mm_is_dense(matcode) && mm_is_pattern(matcode)) return 0; if (mm_is_real(matcode) && mm_is_hermitian(matcode)) return 0; if (mm_is_pattern(matcode) && (mm_is_hermitian(matcode) || mm_is_skew(matcode))) return 0; return 1; } int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz) { if (fprintf(f, "%d %d %d\n", M, N, nz) != 3) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_read_mtx_array_size(FILE *f, int *M, int *N) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; } while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d", M, N) == 2) return 0; else /* we have a blank line */ do { num_items_read = fscanf(f, "%d %d", M, N); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 2); return 0; } int mm_write_mtx_array_size(FILE *f, int M, int N) { if (fprintf(f, "%d %d\n", M, N) != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } /*-------------------------------------------------------------------------*/ /******************************************************************/ /* use when I[], J[], and val[]J, and val[] are already allocated */ /******************************************************************/ int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { int i; if (mm_is_complex(matcode)) { for (i = 0; i < nz; i++) if (fscanf(f, "%d %d %lg %lg", &I[i], &J[i], &val[2 * i], &val[2 * i + 1]) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { for (i = 0; i < nz; i++) { if (fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]) != 3) return MM_PREMATURE_EOF; } } else if (mm_is_pattern(matcode)) { for (i = 0; i < nz; i++) if (fscanf(f, "%d %d", &I[i], &J[i]) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *imag, MM_typecode matcode) { if (mm_is_complex(matcode)) { if (fscanf(f, "%d %d %lg %lg", I, J, real, imag) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { if (fscanf(f, "%d %d %lg\n", I, J, real) != 3) return MM_PREMATURE_EOF; } else if (mm_is_pattern(matcode)) { if (fscanf(f, "%d %d", I, J) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } /************************************************************************ mm_read_mtx_crd() fills M, N, nz, array of values, and return type code, e.g. 'MCRS' if matrix is complex, values[] is of size 2*nz, (nz pairs of real/imaginary values) ************************************************************************/ int mm_read_mtx_crd(char *fname, int *M, int *N, int *nz, int **I, int **J, double **val, MM_typecode *matcode) { int ret_code; FILE *f; if (strcmp(fname, "stdin") == 0) f = stdin; else if ((f = fopen(fname, "r")) == NULL) return MM_COULD_NOT_READ_FILE; if ((ret_code = mm_read_banner(f, matcode)) != 0) return ret_code; if (!(mm_is_valid(*matcode) && mm_is_sparse(*matcode) && mm_is_matrix(*matcode))) return MM_UNSUPPORTED_TYPE; if ((ret_code = mm_read_mtx_crd_size(f, M, N, nz)) != 0) return ret_code; *I = (int *)malloc(*nz * sizeof(int)); *J = (int *)malloc(*nz * sizeof(int)); *val = NULL; if (mm_is_complex(*matcode)) { *val = (double *)malloc(*nz * 2 * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_real(*matcode)) { *val = (double *)malloc(*nz * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_pattern(*matcode)) { ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } if (f != stdin) fclose(f); return 0; } int mm_write_banner(FILE *f, MM_typecode matcode) { char *str = mm_typecode_to_str(matcode); int ret_code; ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str); free(str); if (ret_code != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { FILE *f; int i; if (strcmp(fname, "stdout") == 0) f = stdout; else if ((f = fopen(fname, "w")) == NULL) return MM_COULD_NOT_WRITE_FILE; /* print banner followed by typecode */ fprintf(f, "%s ", MatrixMarketBanner); fprintf(f, "%s\n", mm_typecode_to_str(matcode)); /* print matrix sizes and nonzeros */ fprintf(f, "%d %d %d\n", M, N, nz); /* print values */ if (mm_is_pattern(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d\n", I[i], J[i]); else if (mm_is_real(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d %20.16g\n", I[i], J[i], val[i]); else if (mm_is_complex(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d %20.16g %20.16g\n", I[i], J[i], val[2 * i], val[2 * i + 1]); else { if (f != stdout) fclose(f); return MM_UNSUPPORTED_TYPE; } if (f != stdout) fclose(f); return 0; } /** * Create a new copy of a string s. mm_strdup() is a common routine, but * not part of ANSI C, so it is included here. Used by mm_typecode_to_str(). * */ char *mm_strdup(const char *s) { int len = strlen(s); char *s2 = (char *)malloc((len + 1) * sizeof(char)); return strcpy(s2, s); } // GPU implementation of matrix_vector product: see if you can use // one thread per row. You'll need to get the addressing right! // each block of rows. __global__ void gpuMatrixVector(int rows, int cols, const float *A, const float *x, float *y) { int tr = threadIdx.x; int row = blockIdx.x * blockDim.x + tr; if (row < rows) { // Starting address of indexing within matrix A int idxm = row * cols; float t = 0.0; for (int ic = 0; ic < cols; ic++) { t += A[idxm] * x[ic]; idxm++; } y[row] = t; } } // Simple CPU implementation of matrix-vector product void MatrixVectorCSR(int M, const int *IRP, const int *JA, const double *AS, const double *x, double *y) { int i, j; double t; for (i = 0; i < M; ++i) { t = 0.0; for (j = IRP[i]; j < IRP[i + 1]; ++j) { t += AS[j] * x[JA[j]]; } y[i] = t; } } __global__ void MatrixVectorCSRParallel(int M, const int *IRP, const int *JA, const double *AS, const double *x, double *y) { int j; int tr = threadIdx.x; int m = blockIdx.x * blockDim.x + tr; if (m < M) { double sum = 0.0; for (j = IRP[m]; j < IRP[m + 1]; ++j) { sum += AS[j] * x[JA[j]]; } y[m] = sum; } } int main(int argc, char *argv[]) { int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; int i, *I, *J; double *val; bool isCsrFormat; if (argc < 3) { fprintf(stderr, "Usage: %s [martix-market-filename] [1 for CSR, 2 for Ellpack]\n", argv[0]); exit(1); } else { if ((f = fopen(argv[1], "r")) == NULL) exit(1); if (strcmp(argv[2], "1") == 0) { isCsrFormat = true; } else if (strcmp(argv[2], "2") == 0) { isCsrFormat = false; } else { printf("Second argument should be 1 for CSR or 2 for ELLPACK\n"); exit(1); } } if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode)) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0) exit(1); /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } if (f != stdin) fclose(f); /************************/ /* now write out matrix */ /************************/ mm_write_banner(stdout, matcode); mm_write_mtx_crd_size(stdout, M, N, nz); for (i = 0; i < nz; i++) fprintf(stdout, "%d %d %20.19g\n", I[i], J[i], val[i]); printf("Columns : %d, rows : %d, non-zeros : %d\n\n", M, N, nz); // CREATING VECTORS double *x = (double *)malloc(sizeof(double) * M); double *y = (double *)malloc(sizeof(double) * M); for (i = 0; i < M; i++) { x[i] = 100.0f * ((double)rand()) / RAND_MAX; } if (isCsrFormat == true) { /*************************/ /* CSR FORMAT CALCULATION*/ /*************************/ int *IRP = (int *)malloc((M + 1) * sizeof(int)); // ASSUMING MATLAB FIRST COLUMN INDEXING IRP[0] = 1; int index = 0; int local_row_nz = 1; for (i = 0; i < nz; i++) { if (I[i] == I[i + 1]) { local_row_nz++; } else { if (index <= M) { IRP[index + 1] = IRP[index] + local_row_nz; local_row_nz = 1; index++; } } } // ----------------------- Host memory initialisation ----------------------- // double *h_y_d = new double[M]; std::cout << "Matrix-vector product: single thread per row version " << std::endl; std::cout << "Test case: " << M << " x " << M << std::endl; // ---------------------- Device memory initialisation ---------------------- // // Allocate memory space on the device. int *d_IRP, *d_J; double *d_val, *d_x, *d_y; checkCudaErrors(hipMalloc((void **)&d_IRP, (M+1) * sizeof(int))); checkCudaErrors(hipMalloc((void **)&d_J, nz * sizeof(int))); checkCudaErrors(hipMalloc((void **)&d_val, nz * sizeof(double))); checkCudaErrors(hipMalloc((void **)&d_x, M * sizeof(double))); checkCudaErrors(hipMalloc((void **)&d_y, M * sizeof(double))); // Copy matrices from the host (CPU) to the device (GPU). checkCudaErrors(hipMemcpy(d_IRP, IRP, (M+1) * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_J, J, nz * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_val, val, nz * sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_x, x, M * sizeof(double), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_y, y, M * sizeof(double), hipMemcpyHostToDevice)); // ------------------------ Calculations on the CPU ------------------------- // float flopcnt = 2.e-6 * M; // Create the CUDA SDK timer. StopWatchInterface *timer = 0; sdkCreateTimer(&timer); timer->start(); // CpuMatrixVector(nrows, ncols, h_A, h_x, h_y); MatrixVectorCSR(M, IRP, J, val, x, y); timer->stop(); float cpuflops = flopcnt / timer->getTime(); std::cout << " CPU time: " << timer->getTime() << " ms." << " GFLOPS " << cpuflops << std::endl; // ------------------------ Calculations on the GPU ------------------------- // // Calculate the dimension of the grid of blocks (1D) necessary to cover // all rows. const dim3 GRID_DIM((nrows - 1 + BLOCK_DIM.x) / BLOCK_DIM.x, 1); timer->reset(); timer->start(); hipLaunchKernelGGL(( MatrixVectorCSRParallel), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, 0, M, d_IRP, d_J, d_val, d_x, d_y); checkCudaErrors(hipDeviceSynchronize()); timer->stop(); float gpuflops = flopcnt / timer->getTime(); std::cout << " GPU time: " << timer->getTime() << " ms." << " GFLOPS " << gpuflops << std::endl; // Download the resulting vector d_y from the device and store it in h_y_d. // checkCudaErrors(hipMemcpy(h_y_d, d_y, nrows * sizeof(float), hipMemcpyDeviceToHost)); // // Now let's check if the results are the same. // float reldiff = 0.0f; // float diff = 0.0f; // for (int i = 0; i < M; ++i) // { // float maxabs = ::max(std::abs(y[i]), std::abs(h_y_d[i])); // if (maxabs == 0.0) // maxabs = 1.0; // reldiff = ::max(reldiff, std::abs(y[i] - h_y_d[i]) / maxabs); // diff = ::max(diff, std::abs(y[i] - h_y_d[i])); // } // std::cout << "Max diff = " << diff << " Max rel diff = " << reldiff << std::endl; // Rel diff should be as close as possible to unit roundoff; float // corresponds to IEEE single precision, so unit roundoff is // 1.19e-07 // // ------------------------------- Cleaning up ------------------------------ // delete timer; checkCudaErrors(hipFree(d_IRP)); checkCudaErrors(hipFree(d_J)); checkCudaErrors(hipFree(d_val)); checkCudaErrors(hipFree(d_x)); checkCudaErrors(hipFree(d_y)); delete[] h_A; delete[] h_x; delete[] h_y; delete[] h_y_d; } return 0; }
2662d01f1e41d77890073bc6852885e829e819cd.cu
/* * This program reads a matrix with Matrix Market format, * then it turns it into 2 objects : * - one matrix in CSR format * - one matrix in ELLPACK format * Then it proceeds the mulctiplication of this matrix with a vector using cuda */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <iostream> #include <string.h> #include <ctype.h> #include <cuda_runtime.h> // For CUDA runtime API #include <helper_cuda.h> // For checkCudaError macro #include <helper_timer.h> // For CUDA SDK timers //Simple dimension: define a 1D block structure #define BD 256 const dim3 BLOCK_DIM(BD); #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0] == 'M') #define mm_is_sparse(typecode) ((typecode)[1] == 'C') #define mm_is_coordinate(typecode) ((typecode)[1] == 'C') #define mm_is_dense(typecode) ((typecode)[1] == 'A') #define mm_is_array(typecode) ((typecode)[1] == 'A') #define mm_is_complex(typecode) ((typecode)[2] == 'C') #define mm_is_real(typecode) ((typecode)[2] == 'R') #define mm_is_pattern(typecode) ((typecode)[2] == 'P') #define mm_is_integer(typecode) ((typecode)[2] == 'I') #define mm_is_symmetric(typecode) ((typecode)[3] == 'S') #define mm_is_general(typecode) ((typecode)[3] == 'G') #define mm_is_skew(typecode) ((typecode)[3] == 'K') #define mm_is_hermitian(typecode) ((typecode)[3] == 'H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0] = 'M') #define mm_set_coordinate(typecode) ((*typecode)[1] = 'C') #define mm_set_array(typecode) ((*typecode)[1] = 'A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_sparse(typecode) mm_set_coordinate(typecode) #define mm_set_complex(typecode) ((*typecode)[2] = 'C') #define mm_set_real(typecode) ((*typecode)[2] = 'R') #define mm_set_pattern(typecode) ((*typecode)[2] = 'P') #define mm_set_integer(typecode) ((*typecode)[2] = 'I') #define mm_set_symmetric(typecode) ((*typecode)[3] = 'S') #define mm_set_general(typecode) ((*typecode)[3] = 'G') #define mm_set_skew(typecode) ((*typecode)[3] = 'K') #define mm_set_hermitian(typecode) ((*typecode)[3] = 'H') #define mm_clear_typecode(typecode) ((*typecode)[0] = (*typecode)[1] = \ (*typecode)[2] = ' ', \ (*typecode)[3] = 'G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSE_STR "coordinate" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = *nz = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; } while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d %d", M, N, nz) == 3) return 0; else do { num_items_read = fscanf(f, "%d %d %d", M, N, nz); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 3); return 0; } char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; char *types[4]; char *mm_strdup(const char *); int error = 0; /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = MM_MTX_STR; else error = 1; /* check for CRD or ARR matrix */ if (mm_is_sparse(matcode)) types[1] = MM_SPARSE_STR; else if (mm_is_dense(matcode)) types[1] = MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = MM_SKEW_STR; else return NULL; sprintf(buffer, "%s %s %s %s", types[0], types[1], types[2], types[3]); return mm_strdup(buffer); } int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5) return MM_PREMATURE_EOF; for (p = mtx; *p != '\0'; *p = tolower(*p), p++) ; /* convert to lower case */ for (p = crd; *p != '\0'; *p = tolower(*p), p++) ; for (p = data_type; *p != '\0'; *p = tolower(*p), p++) ; for (p = storage_scheme; *p != '\0'; *p = tolower(*p), p++) ; /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSE_STR) == 0) mm_set_sparse(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_) { FILE *f; MM_typecode matcode; int M, N, nz; int i; double *val; int *I, *J; if ((f = fopen(fname, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { printf("mm_read_unsymetric: Could not process Matrix Market banner "); printf(" in file [%s]\n", fname); return -1; } if (!(mm_is_real(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode))) { fprintf(stderr, "Sorry, this application does not support "); fprintf(stderr, "Market Market type: [%s]\n", mm_typecode_to_str(matcode)); return -1; } /* find out size of sparse matrix: M, N, nz .... */ if (mm_read_mtx_crd_size(f, &M, &N, &nz) != 0) { fprintf(stderr, "read_unsymmetric_sparse(): could not parse matrix size.\n"); return -1; } *M_ = M; *N_ = N; *nz_ = nz; /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); *val_ = val; *I_ = I; *J_ = J; /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } fclose(f); return 0; } int mm_is_valid(MM_typecode matcode) { if (!mm_is_matrix(matcode)) return 0; if (mm_is_dense(matcode) && mm_is_pattern(matcode)) return 0; if (mm_is_real(matcode) && mm_is_hermitian(matcode)) return 0; if (mm_is_pattern(matcode) && (mm_is_hermitian(matcode) || mm_is_skew(matcode))) return 0; return 1; } int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz) { if (fprintf(f, "%d %d %d\n", M, N, nz) != 3) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_read_mtx_array_size(FILE *f, int *M, int *N) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; } while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d", M, N) == 2) return 0; else /* we have a blank line */ do { num_items_read = fscanf(f, "%d %d", M, N); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 2); return 0; } int mm_write_mtx_array_size(FILE *f, int M, int N) { if (fprintf(f, "%d %d\n", M, N) != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } /*-------------------------------------------------------------------------*/ /******************************************************************/ /* use when I[], J[], and val[]J, and val[] are already allocated */ /******************************************************************/ int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { int i; if (mm_is_complex(matcode)) { for (i = 0; i < nz; i++) if (fscanf(f, "%d %d %lg %lg", &I[i], &J[i], &val[2 * i], &val[2 * i + 1]) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { for (i = 0; i < nz; i++) { if (fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]) != 3) return MM_PREMATURE_EOF; } } else if (mm_is_pattern(matcode)) { for (i = 0; i < nz; i++) if (fscanf(f, "%d %d", &I[i], &J[i]) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *imag, MM_typecode matcode) { if (mm_is_complex(matcode)) { if (fscanf(f, "%d %d %lg %lg", I, J, real, imag) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { if (fscanf(f, "%d %d %lg\n", I, J, real) != 3) return MM_PREMATURE_EOF; } else if (mm_is_pattern(matcode)) { if (fscanf(f, "%d %d", I, J) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } /************************************************************************ mm_read_mtx_crd() fills M, N, nz, array of values, and return type code, e.g. 'MCRS' if matrix is complex, values[] is of size 2*nz, (nz pairs of real/imaginary values) ************************************************************************/ int mm_read_mtx_crd(char *fname, int *M, int *N, int *nz, int **I, int **J, double **val, MM_typecode *matcode) { int ret_code; FILE *f; if (strcmp(fname, "stdin") == 0) f = stdin; else if ((f = fopen(fname, "r")) == NULL) return MM_COULD_NOT_READ_FILE; if ((ret_code = mm_read_banner(f, matcode)) != 0) return ret_code; if (!(mm_is_valid(*matcode) && mm_is_sparse(*matcode) && mm_is_matrix(*matcode))) return MM_UNSUPPORTED_TYPE; if ((ret_code = mm_read_mtx_crd_size(f, M, N, nz)) != 0) return ret_code; *I = (int *)malloc(*nz * sizeof(int)); *J = (int *)malloc(*nz * sizeof(int)); *val = NULL; if (mm_is_complex(*matcode)) { *val = (double *)malloc(*nz * 2 * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_real(*matcode)) { *val = (double *)malloc(*nz * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_pattern(*matcode)) { ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } if (f != stdin) fclose(f); return 0; } int mm_write_banner(FILE *f, MM_typecode matcode) { char *str = mm_typecode_to_str(matcode); int ret_code; ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str); free(str); if (ret_code != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { FILE *f; int i; if (strcmp(fname, "stdout") == 0) f = stdout; else if ((f = fopen(fname, "w")) == NULL) return MM_COULD_NOT_WRITE_FILE; /* print banner followed by typecode */ fprintf(f, "%s ", MatrixMarketBanner); fprintf(f, "%s\n", mm_typecode_to_str(matcode)); /* print matrix sizes and nonzeros */ fprintf(f, "%d %d %d\n", M, N, nz); /* print values */ if (mm_is_pattern(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d\n", I[i], J[i]); else if (mm_is_real(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d %20.16g\n", I[i], J[i], val[i]); else if (mm_is_complex(matcode)) for (i = 0; i < nz; i++) fprintf(f, "%d %d %20.16g %20.16g\n", I[i], J[i], val[2 * i], val[2 * i + 1]); else { if (f != stdout) fclose(f); return MM_UNSUPPORTED_TYPE; } if (f != stdout) fclose(f); return 0; } /** * Create a new copy of a string s. mm_strdup() is a common routine, but * not part of ANSI C, so it is included here. Used by mm_typecode_to_str(). * */ char *mm_strdup(const char *s) { int len = strlen(s); char *s2 = (char *)malloc((len + 1) * sizeof(char)); return strcpy(s2, s); } // GPU implementation of matrix_vector product: see if you can use // one thread per row. You'll need to get the addressing right! // each block of rows. __global__ void gpuMatrixVector(int rows, int cols, const float *A, const float *x, float *y) { int tr = threadIdx.x; int row = blockIdx.x * blockDim.x + tr; if (row < rows) { // Starting address of indexing within matrix A int idxm = row * cols; float t = 0.0; for (int ic = 0; ic < cols; ic++) { t += A[idxm] * x[ic]; idxm++; } y[row] = t; } } // Simple CPU implementation of matrix-vector product void MatrixVectorCSR(int M, const int *IRP, const int *JA, const double *AS, const double *x, double *y) { int i, j; double t; for (i = 0; i < M; ++i) { t = 0.0; for (j = IRP[i]; j < IRP[i + 1]; ++j) { t += AS[j] * x[JA[j]]; } y[i] = t; } } __global__ void MatrixVectorCSRParallel(int M, const int *IRP, const int *JA, const double *AS, const double *x, double *y) { int j; int tr = threadIdx.x; int m = blockIdx.x * blockDim.x + tr; if (m < M) { double sum = 0.0; for (j = IRP[m]; j < IRP[m + 1]; ++j) { sum += AS[j] * x[JA[j]]; } y[m] = sum; } } int main(int argc, char *argv[]) { int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; int i, *I, *J; double *val; bool isCsrFormat; if (argc < 3) { fprintf(stderr, "Usage: %s [martix-market-filename] [1 for CSR, 2 for Ellpack]\n", argv[0]); exit(1); } else { if ((f = fopen(argv[1], "r")) == NULL) exit(1); if (strcmp(argv[2], "1") == 0) { isCsrFormat = true; } else if (strcmp(argv[2], "2") == 0) { isCsrFormat = false; } else { printf("Second argument should be 1 for CSR or 2 for ELLPACK\n"); exit(1); } } if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } /* This is how one can screen matrix types if their application */ /* only supports a subset of the Matrix Market data types. */ if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode)) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0) exit(1); /* reseve memory for matrices */ I = (int *)malloc(nz * sizeof(int)); J = (int *)malloc(nz * sizeof(int)); val = (double *)malloc(nz * sizeof(double)); /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i = 0; i < nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } if (f != stdin) fclose(f); /************************/ /* now write out matrix */ /************************/ mm_write_banner(stdout, matcode); mm_write_mtx_crd_size(stdout, M, N, nz); for (i = 0; i < nz; i++) fprintf(stdout, "%d %d %20.19g\n", I[i], J[i], val[i]); printf("Columns : %d, rows : %d, non-zeros : %d\n\n", M, N, nz); // CREATING VECTORS double *x = (double *)malloc(sizeof(double) * M); double *y = (double *)malloc(sizeof(double) * M); for (i = 0; i < M; i++) { x[i] = 100.0f * ((double)rand()) / RAND_MAX; } if (isCsrFormat == true) { /*************************/ /* CSR FORMAT CALCULATION*/ /*************************/ int *IRP = (int *)malloc((M + 1) * sizeof(int)); // ASSUMING MATLAB FIRST COLUMN INDEXING IRP[0] = 1; int index = 0; int local_row_nz = 1; for (i = 0; i < nz; i++) { if (I[i] == I[i + 1]) { local_row_nz++; } else { if (index <= M) { IRP[index + 1] = IRP[index] + local_row_nz; local_row_nz = 1; index++; } } } // ----------------------- Host memory initialisation ----------------------- // double *h_y_d = new double[M]; std::cout << "Matrix-vector product: single thread per row version " << std::endl; std::cout << "Test case: " << M << " x " << M << std::endl; // ---------------------- Device memory initialisation ---------------------- // // Allocate memory space on the device. int *d_IRP, *d_J; double *d_val, *d_x, *d_y; checkCudaErrors(cudaMalloc((void **)&d_IRP, (M+1) * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&d_J, nz * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&d_val, nz * sizeof(double))); checkCudaErrors(cudaMalloc((void **)&d_x, M * sizeof(double))); checkCudaErrors(cudaMalloc((void **)&d_y, M * sizeof(double))); // Copy matrices from the host (CPU) to the device (GPU). checkCudaErrors(cudaMemcpy(d_IRP, IRP, (M+1) * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_J, J, nz * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_val, val, nz * sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_x, x, M * sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_y, y, M * sizeof(double), cudaMemcpyHostToDevice)); // ------------------------ Calculations on the CPU ------------------------- // float flopcnt = 2.e-6 * M; // Create the CUDA SDK timer. StopWatchInterface *timer = 0; sdkCreateTimer(&timer); timer->start(); // CpuMatrixVector(nrows, ncols, h_A, h_x, h_y); MatrixVectorCSR(M, IRP, J, val, x, y); timer->stop(); float cpuflops = flopcnt / timer->getTime(); std::cout << " CPU time: " << timer->getTime() << " ms." << " GFLOPS " << cpuflops << std::endl; // ------------------------ Calculations on the GPU ------------------------- // // Calculate the dimension of the grid of blocks (1D) necessary to cover // all rows. const dim3 GRID_DIM((nrows - 1 + BLOCK_DIM.x) / BLOCK_DIM.x, 1); timer->reset(); timer->start(); MatrixVectorCSRParallel<<<GRID_DIM, BLOCK_DIM>>>(M, d_IRP, d_J, d_val, d_x, d_y); checkCudaErrors(cudaDeviceSynchronize()); timer->stop(); float gpuflops = flopcnt / timer->getTime(); std::cout << " GPU time: " << timer->getTime() << " ms." << " GFLOPS " << gpuflops << std::endl; // Download the resulting vector d_y from the device and store it in h_y_d. // checkCudaErrors(cudaMemcpy(h_y_d, d_y, nrows * sizeof(float), cudaMemcpyDeviceToHost)); // // Now let's check if the results are the same. // float reldiff = 0.0f; // float diff = 0.0f; // for (int i = 0; i < M; ++i) // { // float maxabs = std::max(std::abs(y[i]), std::abs(h_y_d[i])); // if (maxabs == 0.0) // maxabs = 1.0; // reldiff = std::max(reldiff, std::abs(y[i] - h_y_d[i]) / maxabs); // diff = std::max(diff, std::abs(y[i] - h_y_d[i])); // } // std::cout << "Max diff = " << diff << " Max rel diff = " << reldiff << std::endl; // Rel diff should be as close as possible to unit roundoff; float // corresponds to IEEE single precision, so unit roundoff is // 1.19e-07 // // ------------------------------- Cleaning up ------------------------------ // delete timer; checkCudaErrors(cudaFree(d_IRP)); checkCudaErrors(cudaFree(d_J)); checkCudaErrors(cudaFree(d_val)); checkCudaErrors(cudaFree(d_x)); checkCudaErrors(cudaFree(d_y)); delete[] h_A; delete[] h_x; delete[] h_y; delete[] h_y_d; } return 0; }
0295a52446d28350c583f5cad22d409e8fffb0ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hb_math_functions.h" void hb_gpu_memcpy(const size_t N, const void *X, void *Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); } }
0295a52446d28350c583f5cad22d409e8fffb0ce.cu
#include "hb_math_functions.h" void hb_gpu_memcpy(const size_t N, const void *X, void *Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); } }
bb163c16cf0f2853b12374a4c0d6e9cbc63a7191.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2016 John Tromp // The edge-trimming memory optimization is due to Dave Andersen // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #ifndef MAXSOLS #define MAXSOLS 1 #endif #define MAXINT (1<<31-1) #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; typedef uint2 edge_t; #define make_edge make_uint2 #else typedef u64 nonce_t; typedef u64 node_t; typedef ulong2 edge_t; #define make_edge make_ulong2 #endif #include <openssl/sha.h> typedef unsigned long long ull; static __device__ __forceinline__ bool operator== (edge_t a, edge_t b) { return a.x == b.x && a.y == b.y; } // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } #else __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #ifndef MAXPATHLEN #define MAXPATHLEN (8 << (SIZESHIFT/3)) #endif #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u64 *bits; __device__ void reset(nonce_t n) { bits[n/64] |= 1LL << (n%64); } __device__ bool test(node_t n) const { return !((bits[n/64] >> (n%64)) & 1); } __device__ u64 block(node_t n) const { return ~bits[n/64]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; u32 nset; void set(node_t u, node_t oldv, node_t newv) { u64 niew = (u64)u << SIZESHIFT | newv; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; return; } } } __device__ bool dset(node_t u, node_t oldv, node_t newv) { u64 old, exp = (oldv ? (u64)u << SIZESHIFT | oldv : 0), nuw = (u64)u << SIZESHIFT | newv; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { old = atomicCAS((ull *)&cuckoo[ui], (ull)exp, (ull)nuw); if (old == exp) { return true; } if ((old >> SIZESHIFT) == (u & KEYMASK)) { return false; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 cu = cuckoo[ui]; if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } __device__ node_t node(node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 cu = cuckoo[ui]; if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; struct noncedge_t { nonce_t nonce; edge_t edge; }; class cuckoo_ctx { public: siphash_ctx sip_ctx; shrinkingset alive; twice_set nonleaf; cuckoo_hash cuckoo; noncedge_t sols[MAXSOLS][PROOFSIZE]; u32 nsols; nonce_t gpu_nonce_lim; u32 nthreads; cuckoo_ctx(const char* header, nonce_t gpulim, u32 n_threads) { setheader(&sip_ctx, header); gpu_nonce_lim = gpulim & ~0x3f; // need multiple of 64 nthreads = n_threads; nsols = 0; } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } __device__ u32 dpath(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo.node(u)) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); return ~0; } us[nu] = u; if (nu>=2 && u==us[nu-2]) return ~0; } us[nu+1] = 0; return nu; } __global__ void find_cycles(cuckoo_ctx *ctx) { int id = blockIdx.x * blockDim.x + threadIdx.x; node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2]; shrinkingset &alive = ctx->alive; siphash_ctx sip_ctx = ctx->sip_ctx; cuckoo_hash &cuckoo = ctx->cuckoo; for (nonce_t block = id*64; block < ctx->gpu_nonce_lim; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0 = dipnode(sip_ctx, nonce, 0)<<1, v0 = dipnode(sip_ctx, nonce, 1)<<1|1; if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; us[0] = u0; vs[0] = v0; int nredo = 0; redo: if (nredo++) printf("redo\n"); node_t u1 = cuckoo.node(u0), v1 = cuckoo.node(v0); u32 nu, nv; nonce_t u=u0; for (nu = 0; u; u = cuckoo.node(u)) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); break; } us[nu] = u; if (nu>=2 && u==us[nu-2]) break; } if (u) { //printf("oops\n"); continue; } us[nu+1] = 0; nonce_t v=v0; for (nv = 0; v; v = cuckoo.node(v)) { if (nv++ >= MAXPATHLEN) { while (nv-- && vs[nv] != v) ; if (nv == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); break; } vs[nv] = v; if (nv>=2 && v==vs[nv-2]) break; } if (v) { //printf("oops\n"); continue; } vs[nv+1] = 0; // u32 nu = dpath(cuckoo, u1, us), nv = dpath(cuckoo, v1, vs); if (nu==~0 || nv==~0) continue; if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at %d:%d%%\n", len, id, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { u32 slot = atomicInc(&ctx->nsols, MAXINT); if (slot < MAXSOLS) { noncedge_t *ne = &ctx->sols[slot][0]; ne++->edge = make_edge(*us, *vs); while (nu--) ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd while (nv--) ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even } } continue; } if (nu < nv) { while (nu--) if (!cuckoo.dset(us[nu+1], us[nu+2], us[nu])) goto redo; if (!cuckoo.dset(u0, u1, v0)) goto redo; } else { while (nv--) if (!cuckoo.dset(vs[nv+1], vs[nv+2], vs[nv])) goto redo; if (!cuckoo.dset(v0, v1, u0)) goto redo; } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); return ~0; } us[nu] = u; if (nu>=2 && u==us[nu-2]) return ~0; } us[nu+1] = 0; return nu; } void find_more_cycles(cuckoo_ctx *ctx, cuckoo_hash &cuckoo, u64 *bits) { node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2]; for (nonce_t block = ctx->gpu_nonce_lim; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs // printf("nonce %d\n", nonce); u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx->sip_ctx, nonce, 0), v0=sipnode(&ctx->sip_ctx, nonce, 1); if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; us[0] = u0; vs[0] = v0; node_t u1 = cuckoo[u0], v1 = cuckoo[v0]; u32 nu = path(cuckoo, u1, us), nv = path(cuckoo, v1, vs); if (nu==~0 || nv==~0) continue; if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at 0:%d%%\n", len, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { u32 slot = ctx->nsols++; if (slot < MAXSOLS) { noncedge_t *ne = &ctx->sols[slot][0]; ne++->edge = make_edge(*us, *vs); while (nu--) ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd while (nv--) ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even } } continue; } if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu+2], us[nu]); cuckoo.set(u0, u1, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv+2], vs[nv]); cuckoo.set(v0, v1, u0); } if (ffs & 64) break; // can't shift by 64 } } } __global__ void find_nonces(cuckoo_ctx *ctx) { int id = blockIdx.x * blockDim.x + threadIdx.x; shrinkingset &alive = ctx->alive; siphash_ctx sip_ctx = ctx->sip_ctx; for (nonce_t block = id * 64; block < HALFSIZE; block += ctx->nthreads * 64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block - 1; alive64;) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; edge_t edge = make_edge(dipnode(sip_ctx,nonce,0)<<1, dipnode(sip_ctx,nonce,1)<<1|1); for (u32 i = 0; i < ctx->nsols; i++) { noncedge_t *sol = ctx->sols[i]; for (u32 j = 0; j < PROOFSIZE; j++) { if (sol[j].edge == edge) sol[j].nonce = nonce; } } } } } int noncedge_cmp(const void *a, const void *b) { return ((noncedge_t *)a)->nonce - ((noncedge_t *)b)->nonce; } #include <unistd.h> int main(int argc, char **argv) { int gpu_pct = 50; int nthreads = 1; int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2; int tpb = 0; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:m:n:g:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': ntrims = atoi(optarg); break; case 'g': gpu_pct = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d%% gpu, %d threads %d per block\n", PROOFSIZE, SIZESHIFT, header, ntrims, gpu_pct, nthreads, tpb); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); nonce_t gpu_lim = HALFSIZE*gpu_pct/100 & ~0x3f; cuckoo_ctx ctx(header, gpu_lim, nthreads); checkCudaErrors(hipMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(hipMemset(ctx.alive.bits, 0, edgeBytes)); checkCudaErrors(hipMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(hipMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice); for (u32 round=0; round < ntrims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(hipMemset(ctx.nonleaf.bits, 0, nodeBytes)); hipLaunchKernelGGL(( count_node_deg), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx, uorv, part); hipLaunchKernelGGL(( kill_leaf_edges), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); hipMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), hipMemcpyDeviceToHost); u64 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100 * cnt / CUCKOO_SIZE); printf("final load %d%%\n", load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } checkCudaErrors(hipFree(ctx.nonleaf.bits)); u32 cuckooBytes = CUCKOO_SIZE * sizeof(u64); checkCudaErrors(hipMalloc((void**)&ctx.cuckoo.cuckoo, cuckooBytes)); checkCudaErrors(hipMemset(ctx.cuckoo.cuckoo, 0, cuckooBytes)); hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice); hipLaunchKernelGGL(( find_cycles), dim3(nthreads/tpb),dim3(tpb), 0, 0, device_ctx); hipMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), hipMemcpyDeviceToHost); cuckoo_hash *cuckoo = new cuckoo_hash(); cuckoo->cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo->cuckoo != 0); hipMemcpy(cuckoo->cuckoo, ctx.cuckoo.cuckoo, cuckooBytes, hipMemcpyDeviceToHost); cnt = 0; for (int i = 0; i < CUCKOO_SIZE; i++) cnt += (cuckoo->cuckoo[i] != 0); printf("%lu gpu edges\n", cnt); find_more_cycles(&ctx, *cuckoo, bits); free(cuckoo->cuckoo); if (ctx.nsols) { hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice); hipLaunchKernelGGL(( find_nonces), dim3(nthreads/tpb), dim3(tpb), 0, 0, device_ctx); hipMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), hipMemcpyDeviceToHost); for (u32 i = 0; i < ctx.nsols; i++) { printf("Solution"); qsort(ctx.sols[i], PROOFSIZE, sizeof(noncedge_t), noncedge_cmp); for (u32 j = 0; j < PROOFSIZE; j++) printf(" %jx", (uintmax_t)ctx.sols[i][j].nonce); printf("\n"); } } checkCudaErrors(hipFree(ctx.cuckoo.cuckoo)); checkCudaErrors(hipFree(ctx.alive.bits)); return 0; }
bb163c16cf0f2853b12374a4c0d6e9cbc63a7191.cu
// Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2016 John Tromp // The edge-trimming memory optimization is due to Dave Andersen // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #ifndef MAXSOLS #define MAXSOLS 1 #endif #define MAXINT (1<<31-1) #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; typedef uint2 edge_t; #define make_edge make_uint2 #else typedef u64 nonce_t; typedef u64 node_t; typedef ulong2 edge_t; #define make_edge make_ulong2 #endif #include <openssl/sha.h> typedef unsigned long long ull; static __device__ __forceinline__ bool operator== (edge_t a, edge_t b) { return a.x == b.x && a.y == b.y; } // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = ctx.v2[0], v1 = ctx.v2[1], v2 = ctx.v2[2], v3 = ctx.v2[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } #else __device__ node_t dipnode(siphash_ctx &ctx, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = ctx.v[0], v1 = ctx.v[1], v2 = ctx.v[2], v3 = ctx.v[3] ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & EDGEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #ifndef MAXPATHLEN #define MAXPATHLEN (8 << (SIZESHIFT/3)) #endif #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u64 *bits; __device__ void reset(nonce_t n) { bits[n/64] |= 1LL << (n%64); } __device__ bool test(node_t n) const { return !((bits[n/64] >> (n%64)) & 1); } __device__ u64 block(node_t n) const { return ~bits[n/64]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; u32 nset; void set(node_t u, node_t oldv, node_t newv) { u64 niew = (u64)u << SIZESHIFT | newv; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; return; } } } __device__ bool dset(node_t u, node_t oldv, node_t newv) { u64 old, exp = (oldv ? (u64)u << SIZESHIFT | oldv : 0), nuw = (u64)u << SIZESHIFT | newv; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { old = atomicCAS((ull *)&cuckoo[ui], (ull)exp, (ull)nuw); if (old == exp) { return true; } if ((old >> SIZESHIFT) == (u & KEYMASK)) { return false; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 cu = cuckoo[ui]; if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } __device__ node_t node(node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { u64 cu = cuckoo[ui]; if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; struct noncedge_t { nonce_t nonce; edge_t edge; }; class cuckoo_ctx { public: siphash_ctx sip_ctx; shrinkingset alive; twice_set nonleaf; cuckoo_hash cuckoo; noncedge_t sols[MAXSOLS][PROOFSIZE]; u32 nsols; nonce_t gpu_nonce_lim; u32 nthreads; cuckoo_ctx(const char* header, nonce_t gpulim, u32 n_threads) { setheader(&sip_ctx, header); gpu_nonce_lim = gpulim & ~0x3f; // need multiple of 64 nthreads = n_threads; nsols = 0; } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_ctx sip_ctx = ctx->sip_ctx; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*64; block < HALFSIZE; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u = dipnode(sip_ctx, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } __device__ u32 dpath(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo.node(u)) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); return ~0; } us[nu] = u; if (nu>=2 && u==us[nu-2]) return ~0; } us[nu+1] = 0; return nu; } __global__ void find_cycles(cuckoo_ctx *ctx) { int id = blockIdx.x * blockDim.x + threadIdx.x; node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2]; shrinkingset &alive = ctx->alive; siphash_ctx sip_ctx = ctx->sip_ctx; cuckoo_hash &cuckoo = ctx->cuckoo; for (nonce_t block = id*64; block < ctx->gpu_nonce_lim; block += ctx->nthreads*64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0 = dipnode(sip_ctx, nonce, 0)<<1, v0 = dipnode(sip_ctx, nonce, 1)<<1|1; if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; us[0] = u0; vs[0] = v0; int nredo = 0; redo: if (nredo++) printf("redo\n"); node_t u1 = cuckoo.node(u0), v1 = cuckoo.node(v0); u32 nu, nv; nonce_t u=u0; for (nu = 0; u; u = cuckoo.node(u)) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); break; } us[nu] = u; if (nu>=2 && u==us[nu-2]) break; } if (u) { //printf("oops\n"); continue; } us[nu+1] = 0; nonce_t v=v0; for (nv = 0; v; v = cuckoo.node(v)) { if (nv++ >= MAXPATHLEN) { while (nv-- && vs[nv] != v) ; if (nv == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); break; } vs[nv] = v; if (nv>=2 && v==vs[nv-2]) break; } if (v) { //printf("oops\n"); continue; } vs[nv+1] = 0; // u32 nu = dpath(cuckoo, u1, us), nv = dpath(cuckoo, v1, vs); if (nu==~0 || nv==~0) continue; if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at %d:%d%%\n", len, id, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { u32 slot = atomicInc(&ctx->nsols, MAXINT); if (slot < MAXSOLS) { noncedge_t *ne = &ctx->sols[slot][0]; ne++->edge = make_edge(*us, *vs); while (nu--) ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd while (nv--) ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even } } continue; } if (nu < nv) { while (nu--) if (!cuckoo.dset(us[nu+1], us[nu+2], us[nu])) goto redo; if (!cuckoo.dset(u0, u1, v0)) goto redo; } else { while (nv--) if (!cuckoo.dset(vs[nv+1], vs[nv+2], vs[nv])) goto redo; if (!cuckoo.dset(v0, v1, u0)) goto redo; } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (nu++ >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); return ~0; } us[nu] = u; if (nu>=2 && u==us[nu-2]) return ~0; } us[nu+1] = 0; return nu; } void find_more_cycles(cuckoo_ctx *ctx, cuckoo_hash &cuckoo, u64 *bits) { node_t us[MAXPATHLEN+2], vs[MAXPATHLEN+2]; for (nonce_t block = ctx->gpu_nonce_lim; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs // printf("nonce %d\n", nonce); u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx->sip_ctx, nonce, 0), v0=sipnode(&ctx->sip_ctx, nonce, 1); if (u0 == 0) // ignore vertex 0 so it can be used as nil for cuckoo[] continue; us[0] = u0; vs[0] = v0; node_t u1 = cuckoo[u0], v1 = cuckoo[v0]; u32 nu = path(cuckoo, u1, us), nv = path(cuckoo, v1, vs); if (nu==~0 || nv==~0) continue; if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("% 4d-cycle found at 0:%d%%\n", len, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { u32 slot = ctx->nsols++; if (slot < MAXSOLS) { noncedge_t *ne = &ctx->sols[slot][0]; ne++->edge = make_edge(*us, *vs); while (nu--) ne++->edge = make_edge(us[(nu + 1)&~1], us[nu | 1]); // u's in even position; v's in odd while (nv--) ne++->edge = make_edge(vs[nv | 1], vs[(nv + 1)&~1]); // u's in odd position; v's in even } } continue; } if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu+2], us[nu]); cuckoo.set(u0, u1, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv+2], vs[nv]); cuckoo.set(v0, v1, u0); } if (ffs & 64) break; // can't shift by 64 } } } __global__ void find_nonces(cuckoo_ctx *ctx) { int id = blockIdx.x * blockDim.x + threadIdx.x; shrinkingset &alive = ctx->alive; siphash_ctx sip_ctx = ctx->sip_ctx; for (nonce_t block = id * 64; block < HALFSIZE; block += ctx->nthreads * 64) { u64 alive64 = alive.block(block); for (nonce_t nonce = block - 1; alive64;) { // -1 compensates for 1-based ffs u32 ffs = __ffsll(alive64); nonce += ffs; alive64 >>= ffs; edge_t edge = make_edge(dipnode(sip_ctx,nonce,0)<<1, dipnode(sip_ctx,nonce,1)<<1|1); for (u32 i = 0; i < ctx->nsols; i++) { noncedge_t *sol = ctx->sols[i]; for (u32 j = 0; j < PROOFSIZE; j++) { if (sol[j].edge == edge) sol[j].nonce = nonce; } } } } } int noncedge_cmp(const void *a, const void *b) { return ((noncedge_t *)a)->nonce - ((noncedge_t *)b)->nonce; } #include <unistd.h> int main(int argc, char **argv) { int gpu_pct = 50; int nthreads = 1; int ntrims = 1 + (PART_BITS+3)*(PART_BITS+4)/2; int tpb = 0; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:m:n:g:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': ntrims = atoi(optarg); break; case 'g': gpu_pct = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\") with 50%% edges, %d trims, %d%% gpu, %d threads %d per block\n", PROOFSIZE, SIZESHIFT, header, ntrims, gpu_pct, nthreads, tpb); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); nonce_t gpu_lim = HALFSIZE*gpu_pct/100 & ~0x3f; cuckoo_ctx ctx(header, gpu_lim, nthreads); checkCudaErrors(cudaMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(cudaMemset(ctx.alive.bits, 0, edgeBytes)); checkCudaErrors(cudaMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(cudaMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice); for (u32 round=0; round < ntrims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(cudaMemset(ctx.nonleaf.bits, 0, nodeBytes)); count_node_deg<<<nthreads/tpb,tpb>>>(device_ctx, uorv, part); kill_leaf_edges<<<nthreads/tpb,tpb>>>(device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); cudaMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), cudaMemcpyDeviceToHost); u64 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100 * cnt / CUCKOO_SIZE); printf("final load %d%%\n", load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } checkCudaErrors(cudaFree(ctx.nonleaf.bits)); u32 cuckooBytes = CUCKOO_SIZE * sizeof(u64); checkCudaErrors(cudaMalloc((void**)&ctx.cuckoo.cuckoo, cuckooBytes)); checkCudaErrors(cudaMemset(ctx.cuckoo.cuckoo, 0, cuckooBytes)); cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice); find_cycles<<<nthreads/tpb,tpb>>>(device_ctx); cudaMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), cudaMemcpyDeviceToHost); cuckoo_hash *cuckoo = new cuckoo_hash(); cuckoo->cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo->cuckoo != 0); cudaMemcpy(cuckoo->cuckoo, ctx.cuckoo.cuckoo, cuckooBytes, cudaMemcpyDeviceToHost); cnt = 0; for (int i = 0; i < CUCKOO_SIZE; i++) cnt += (cuckoo->cuckoo[i] != 0); printf("%lu gpu edges\n", cnt); find_more_cycles(&ctx, *cuckoo, bits); free(cuckoo->cuckoo); if (ctx.nsols) { cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice); find_nonces<<<nthreads/tpb, tpb>>>(device_ctx); cudaMemcpy(&ctx, device_ctx, sizeof(cuckoo_ctx), cudaMemcpyDeviceToHost); for (u32 i = 0; i < ctx.nsols; i++) { printf("Solution"); qsort(ctx.sols[i], PROOFSIZE, sizeof(noncedge_t), noncedge_cmp); for (u32 j = 0; j < PROOFSIZE; j++) printf(" %jx", (uintmax_t)ctx.sols[i][j].nonce); printf("\n"); } } checkCudaErrors(cudaFree(ctx.cuckoo.cuckoo)); checkCudaErrors(cudaFree(ctx.alive.bits)); return 0; }
8e7f9cf7cb947e7df2e9561df35835fb48a796b1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> #include <rocblas.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void DoubleBufferMatMul( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M * 2][BLOCK_SIZE_K]; __shared__ float Bs[BLOCK_SIZE_K * 2][BLOCK_SIZE_N]; // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; const int A_SM_OFFSET = 0; const int B_SM_OFFSET = 0; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i + A_SM_OFFSET][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i + B_SM_OFFSET][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col N )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { frag_a[thread_y] = As[ty * THREAD_SIZE_Y + thread_y + A_SM_OFFSET][k]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k + B_SM_OFFSET][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } // change offset so new block of A and B will not override the previous data // therefore __syncthreads() is not needed; A_SM_OFFSET ^= BLOCK_SIZE_M; B_SM_OFFSET ^= BLOCK_SIZE_K; } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } }
8e7f9cf7cb947e7df2e9561df35835fb48a796b1.cu
#include <stdio.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> #include <cublas_v2.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void DoubleBufferMatMul( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M * 2][BLOCK_SIZE_K]; __shared__ float Bs[BLOCK_SIZE_K * 2][BLOCK_SIZE_N]; // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; const int A_SM_OFFSET = 0; const int B_SM_OFFSET = 0; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i + A_SM_OFFSET][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i + B_SM_OFFSET][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col N )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { frag_a[thread_y] = As[ty * THREAD_SIZE_Y + thread_y + A_SM_OFFSET][k]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k + B_SM_OFFSET][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } // change offset so new block of A and B will not override the previous data // therefore __syncthreads() is not needed; A_SM_OFFSET ^= BLOCK_SIZE_M; B_SM_OFFSET ^= BLOCK_SIZE_K; } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } }
6a279dd94e01af91c56d5b1441e5a1ea5161ab8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { texture<unsigned char, 2> uchar_tex; texture<uchar2, 2> uchar2_tex; texture<uchar4, 2> uchar4_tex; texture<unsigned short, 2> ushort_tex; texture<ushort2, 2> ushort2_tex; texture<ushort4, 2> ushort4_tex; } __global__ void Subsample_Bilinear_uchar2(uchar2 *dst, int dst_width, int dst_height, int dst_pitch2, int src_width, int src_height) { int xo = blockIdx.x * blockDim.x + threadIdx.x; int yo = blockIdx.y * blockDim.y + threadIdx.y; if (yo < dst_height && xo < dst_width) { float hscale = (float)src_width / (float)dst_width; float vscale = (float)src_height / (float)dst_height; float xi = (xo + 0.5f) * hscale; float yi = (yo + 0.5f) * vscale; // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} float dx = wh / (0.5f + wh); float dy = wv / (0.5f + wv); uchar2 c0 = tex2D(uchar2_tex, xi-dx, yi-dy); uchar2 c1 = tex2D(uchar2_tex, xi+dx, yi-dy); uchar2 c2 = tex2D(uchar2_tex, xi-dx, yi+dy); uchar2 c3 = tex2D(uchar2_tex, xi+dx, yi+dy); int2 uv; uv.x = ((int)c0.x+(int)c1.x+(int)c2.x+(int)c3.x+2) >> 2; uv.y = ((int)c0.y+(int)c1.y+(int)c2.y+(int)c3.y+2) >> 2; dst[yo*dst_pitch2+xo] = make_uchar2((unsigned char)uv.x, (unsigned char)uv.y); } }
6a279dd94e01af91c56d5b1441e5a1ea5161ab8e.cu
#include "includes.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { texture<unsigned char, 2> uchar_tex; texture<uchar2, 2> uchar2_tex; texture<uchar4, 2> uchar4_tex; texture<unsigned short, 2> ushort_tex; texture<ushort2, 2> ushort2_tex; texture<ushort4, 2> ushort4_tex; } __global__ void Subsample_Bilinear_uchar2(uchar2 *dst, int dst_width, int dst_height, int dst_pitch2, int src_width, int src_height) { int xo = blockIdx.x * blockDim.x + threadIdx.x; int yo = blockIdx.y * blockDim.y + threadIdx.y; if (yo < dst_height && xo < dst_width) { float hscale = (float)src_width / (float)dst_width; float vscale = (float)src_height / (float)dst_height; float xi = (xo + 0.5f) * hscale; float yi = (yo + 0.5f) * vscale; // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} float dx = wh / (0.5f + wh); float dy = wv / (0.5f + wv); uchar2 c0 = tex2D(uchar2_tex, xi-dx, yi-dy); uchar2 c1 = tex2D(uchar2_tex, xi+dx, yi-dy); uchar2 c2 = tex2D(uchar2_tex, xi-dx, yi+dy); uchar2 c3 = tex2D(uchar2_tex, xi+dx, yi+dy); int2 uv; uv.x = ((int)c0.x+(int)c1.x+(int)c2.x+(int)c3.x+2) >> 2; uv.y = ((int)c0.y+(int)c1.y+(int)c2.y+(int)c3.y+2) >> 2; dst[yo*dst_pitch2+xo] = make_uchar2((unsigned char)uv.x, (unsigned char)uv.y); } }
c94ffc8bd7faf0d03f30c9be3ed90844982be44b.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/Pow.h> namespace at { namespace native { namespace { // SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. // So we need to define the functions with the explicit function signatures. // As for pow, the following signatures are defined as the device function: // pow(float, int) // pow(double, int) // pow(float, float) // pow(double, double) // As for sqrt, the following signatures are defined as the device function: // sqrt(float) // sqrt(double) // As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be // applied to the result of the inline function, and thus the result is incorrect. // e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get // int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1 // However, the correct result is // int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0 #ifdef _MSC_VER // Functions for pow // pow for at::Half static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { return static_cast<at::Half>(::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow (floating, floating/int) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } // pow (integral, integral) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return native::powi(base, exp); } // pow (Otherwise) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return static_cast<Base_type>(::pow(static_cast<double>(base), static_cast<double>(exp))); } // pow (Complex) template<typename B, typename E> static inline __host__ __device__ B complex_pow_(B base, E exp) { return ::pow(base, exp); } // Functions for sqrt // sqrt (floating) template <typename T> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) { return std::sqrt(x); } // sqrt (integral) template <typename T> static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) { return static_cast<T>(std::sqrt(static_cast<double>(x))); } // Function for inverse sqrt // invsqrt (floating) template <typename T> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) { return 1.0 / std::sqrt(x); } // invsqrt (integral) template <typename T> static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) { return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x))); } #else template <typename Base_type, typename Exp_type> static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } template <typename T> static inline __host__ __device__ T sqrt_(T x) { return ::sqrt(x); } template <typename T> static inline __host__ __device__ T invsqrt_(T x) { return 1.0 / ::sqrt(x); } // pow (Otherwise) template<typename B, typename E> static inline __host__ __device__ B complex_pow_(B base, E exp) { return ::pow(base, exp); } #endif void pow_tensor_tensor_kernel(TensorIterator& iter) { if (isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return complex_pow_(base, exp); }); }); } else if (isFloatingType(iter.dtype())) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return pow_(base, exp); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return native::powi(base, exp); }); }); } } template<typename Base_type, typename Exp_type> void pow_tensor_scalar_kernel_impl(TensorIterator& iter, Exp_type exp) { const auto d_exp = static_cast<double>(exp); if (d_exp == 0.5) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return sqrt_(base); }); } else if (d_exp == 2) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return base * base; }); } else if (d_exp == 3) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return base * base * base; }); } else if (d_exp == -0.5) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return invsqrt_(base); }); } else if (d_exp == -1) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return 1.0 / base; }); } else if (d_exp == -2) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return 1.0 / (base * base); }); } else { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return pow_(base, exp); }); } } void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) { if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { const auto exp = exp_scalar.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t { return complex_pow_(base, exp); }); }); } else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "pow_cuda", [&]() { const auto exp = exp_scalar.to<scalar_t>(); pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp); }); } else { const auto exp = exp_scalar.to<float>(); AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp); }); } } } // anonymous namespace REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel); REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel); }} // namespace at::native
c94ffc8bd7faf0d03f30c9be3ed90844982be44b.cu
#include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/Pow.h> namespace at { namespace native { namespace { // SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. // So we need to define the functions with the explicit function signatures. // As for pow, the following signatures are defined as the device function: // pow(float, int) // pow(double, int) // pow(float, float) // pow(double, double) // As for sqrt, the following signatures are defined as the device function: // sqrt(float) // sqrt(double) // As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be // applied to the result of the inline function, and thus the result is incorrect. // e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get // int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1 // However, the correct result is // int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0 #ifdef _MSC_VER // Functions for pow // pow for at::Half static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { return static_cast<at::Half>(std::pow(static_cast<float>(base), static_cast<float>(exp))); } // pow (floating, floating/int) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type pow_(Base_type base, Exp_type exp) { return std::pow(base, exp); } // pow (integral, integral) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return native::powi(base, exp); } // pow (Otherwise) template <typename Base_type, typename Exp_type> static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type pow_(Base_type base, Exp_type exp) { return static_cast<Base_type>(std::pow(static_cast<double>(base), static_cast<double>(exp))); } // pow (Complex) template<typename B, typename E> static inline __host__ __device__ B complex_pow_(B base, E exp) { return std::pow(base, exp); } // Functions for sqrt // sqrt (floating) template <typename T> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) { return std::sqrt(x); } // sqrt (integral) template <typename T> static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) { return static_cast<T>(std::sqrt(static_cast<double>(x))); } // Function for inverse sqrt // invsqrt (floating) template <typename T> static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) { return 1.0 / std::sqrt(x); } // invsqrt (integral) template <typename T> static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) { return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x))); } #else template <typename Base_type, typename Exp_type> static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { return ::pow(base, exp); } template <typename T> static inline __host__ __device__ T sqrt_(T x) { return ::sqrt(x); } template <typename T> static inline __host__ __device__ T invsqrt_(T x) { return 1.0 / ::sqrt(x); } // pow (Otherwise) template<typename B, typename E> static inline __host__ __device__ B complex_pow_(B base, E exp) { return std::pow(base, exp); } #endif void pow_tensor_tensor_kernel(TensorIterator& iter) { if (isComplexType(iter.dtype())) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return complex_pow_(base, exp); }); }); } else if (isFloatingType(iter.dtype())) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return pow_(base, exp); }); }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { return native::powi(base, exp); }); }); } } template<typename Base_type, typename Exp_type> void pow_tensor_scalar_kernel_impl(TensorIterator& iter, Exp_type exp) { const auto d_exp = static_cast<double>(exp); if (d_exp == 0.5) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return sqrt_(base); }); } else if (d_exp == 2) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return base * base; }); } else if (d_exp == 3) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return base * base * base; }); } else if (d_exp == -0.5) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return invsqrt_(base); }); } else if (d_exp == -1) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return 1.0 / base; }); } else if (d_exp == -2) { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return 1.0 / (base * base); }); } else { gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { return pow_(base, exp); }); } } void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) { if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) { AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { const auto exp = exp_scalar.to<scalar_t>(); gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t { return complex_pow_(base, exp); }); }); } else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "pow_cuda", [&]() { const auto exp = exp_scalar.to<scalar_t>(); pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp); }); } else { const auto exp = exp_scalar.to<float>(); AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp); }); } } } // anonymous namespace REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel); REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel); }} // namespace at::native
b50ba5c4cc65a66273dcc016e069bffc2d5c283a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" // Empy kernel to fire up GPU __global__ void emptyKernel(void) { } // RGB 2 YUV kernel __global__ void rgb2yuvKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v, int *n) { int ind = threadIdx.x + blockIdx.x*blockDim.x; unsigned char ny, cb, cr; if (ind < *n) { ny = (unsigned char)( 0.299*r[ind] + 0.587*g[ind] + 0.114*b[ind]); cb = (unsigned char)(-0.169*r[ind] - 0.331*g[ind] + 0.499*b[ind] + 128); cr = (unsigned char)( 0.499*r[ind] - 0.418*g[ind] - 0.0813*b[ind] + 128); y[ind] = ny; u[ind] = cb; v[ind] = cr; } } // YUV 2 RGB kernel __global__ void yuv2rgbKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v, int *n) { int ind = threadIdx.x + blockIdx.x*blockDim.x; if (ind < *n) { int ny = (int)y[ind]; int cb = (int)u[ind] - 128; int cr = (int)v[ind] - 128; int rt = (int)(ny + 1.402*cr); int gt = (int)(ny - 0.344*cb - 0.714*cr); int bt = (int)(ny + 1.772*cb); rt = (rt < 255) ? rt: 255; rt = (rt > 0) ? rt: 0; gt = (gt < 255) ? gt: 255; gt = (gt > 0) ? gt : 0; bt = (bt < 255 )? bt: 255; bt = (bt > 0) ? bt : 0; r[ind] = rt; g[ind] = gt; b[ind] = bt; } } void launchEmptyKernel() { hipLaunchKernelGGL(( emptyKernel), dim3(1), dim3(1), 0, 0, ); } void copyToDevice(PPM_IMG img_in) { // Allocate memory for the PPM_IMG unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); // Copy PPM to device hipMemcpy(img_r_d, img_in.img_r, size, hipMemcpyHostToDevice); hipMemcpy(img_g_d, img_in.img_g, size, hipMemcpyHostToDevice); hipMemcpy(img_b_d, img_in.img_b, size, hipMemcpyHostToDevice); hipFree(img_r_d); hipFree(img_g_d); hipFree(img_b_d); } void copyToDeviceAndBack(PPM_IMG img_in) { // Allocate memory for the PPM_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); // Copy PPM to device hipMemcpy(img_r_d, img_in.img_r, size, hipMemcpyHostToDevice); hipMemcpy(img_g_d, img_in.img_g, size, hipMemcpyHostToDevice); hipMemcpy(img_b_d, img_in.img_b, size, hipMemcpyHostToDevice); // Copy from device to host hipMemcpy(img_in.img_r, img_r_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_in.img_g, img_g_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_in.img_b, img_b_d, size, hipMemcpyDeviceToHost); hipFree(img_r_d); hipFree(img_g_d); hipFree(img_b_d); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int * N_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); hipMalloc((void **) &img_y_d, size); hipMalloc((void **) &img_u_d, size); hipMalloc((void **) &img_v_d, size); hipMalloc((void **) &N_d, sizeof(int)); // Copy PPM to device hipMemcpy(img_r_d, img_in.img_r, size, hipMemcpyHostToDevice); hipMemcpy(img_g_d, img_in.img_g, size, hipMemcpyHostToDevice); hipMemcpy(img_b_d, img_in.img_b, size, hipMemcpyHostToDevice); int N = img_in.w*img_in.h; int M = 512; // number of threads hipMemcpy(N_d, &N, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( rgb2yuvKernel), dim3((N+M-1)/M),dim3(M), 0, 0, img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d, N_d);//Launch the Kernel // Copy from device to host hipMemcpy(img_out.img_y, img_y_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_u, img_u_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_v, img_v_d, size, hipMemcpyDeviceToHost); hipFree(img_r_d); hipFree(img_g_d); hipFree(img_b_d); hipFree(img_y_d); hipFree(img_u_d); hipFree(img_v_d); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int * N_d; int size = img_in.w * img_in.h * sizeof(unsigned char); hipMalloc((void **) &img_r_d, size); hipMalloc((void **) &img_g_d, size); hipMalloc((void **) &img_b_d, size); hipMalloc((void **) &img_y_d, size); hipMalloc((void **) &img_u_d, size); hipMalloc((void **) &img_v_d, size); hipMalloc((void **) &N_d, sizeof(int)); // Copy YUV to device hipMemcpy(img_y_d, img_in.img_y, size, hipMemcpyHostToDevice); hipMemcpy(img_u_d, img_in.img_u, size, hipMemcpyHostToDevice); hipMemcpy(img_v_d, img_in.img_v, size, hipMemcpyHostToDevice); int N = img_in.w*img_in.h; int M = 512; hipMemcpy(N_d, &N, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( yuv2rgbKernel), dim3((N+M-1)/M),dim3(M), 0, 0, img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d, N_d);//Launch the Kernel // Copy from device to host hipMemcpy(img_out.img_r, img_r_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_g, img_g_d, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_b, img_b_d, size, hipMemcpyDeviceToHost); hipFree(img_r_d); hipFree(img_g_d); hipFree(img_b_d); hipFree(img_y_d); hipFree(img_u_d); hipFree(img_v_d); return img_out; } //Convert RGB to YUV444, all components in [0, 255] YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i;//, j; unsigned char r, g, b; unsigned char y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } //Convert YUV to RGB, all components in [0, 255] PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt,gt,bt; int y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
b50ba5c4cc65a66273dcc016e069bffc2d5c283a.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" // Empy kernel to fire up GPU __global__ void emptyKernel(void) { } // RGB 2 YUV kernel __global__ void rgb2yuvKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v, int *n) { int ind = threadIdx.x + blockIdx.x*blockDim.x; unsigned char ny, cb, cr; if (ind < *n) { ny = (unsigned char)( 0.299*r[ind] + 0.587*g[ind] + 0.114*b[ind]); cb = (unsigned char)(-0.169*r[ind] - 0.331*g[ind] + 0.499*b[ind] + 128); cr = (unsigned char)( 0.499*r[ind] - 0.418*g[ind] - 0.0813*b[ind] + 128); y[ind] = ny; u[ind] = cb; v[ind] = cr; } } // YUV 2 RGB kernel __global__ void yuv2rgbKernel(unsigned char *r, unsigned char *g, unsigned char *b, unsigned char *y, unsigned char *u, unsigned char *v, int *n) { int ind = threadIdx.x + blockIdx.x*blockDim.x; if (ind < *n) { int ny = (int)y[ind]; int cb = (int)u[ind] - 128; int cr = (int)v[ind] - 128; int rt = (int)(ny + 1.402*cr); int gt = (int)(ny - 0.344*cb - 0.714*cr); int bt = (int)(ny + 1.772*cb); rt = (rt < 255) ? rt: 255; rt = (rt > 0) ? rt: 0; gt = (gt < 255) ? gt: 255; gt = (gt > 0) ? gt : 0; bt = (bt < 255 )? bt: 255; bt = (bt > 0) ? bt : 0; r[ind] = rt; g[ind] = gt; b[ind] = bt; } } void launchEmptyKernel() { emptyKernel<<<1, 1>>>(); } void copyToDevice(PPM_IMG img_in) { // Allocate memory for the PPM_IMG unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); // Copy PPM to device cudaMemcpy(img_r_d, img_in.img_r, size, cudaMemcpyHostToDevice); cudaMemcpy(img_g_d, img_in.img_g, size, cudaMemcpyHostToDevice); cudaMemcpy(img_b_d, img_in.img_b, size, cudaMemcpyHostToDevice); cudaFree(img_r_d); cudaFree(img_g_d); cudaFree(img_b_d); } void copyToDeviceAndBack(PPM_IMG img_in) { // Allocate memory for the PPM_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); // Copy PPM to device cudaMemcpy(img_r_d, img_in.img_r, size, cudaMemcpyHostToDevice); cudaMemcpy(img_g_d, img_in.img_g, size, cudaMemcpyHostToDevice); cudaMemcpy(img_b_d, img_in.img_b, size, cudaMemcpyHostToDevice); // Copy from device to host cudaMemcpy(img_in.img_r, img_r_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_in.img_g, img_g_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_in.img_b, img_b_d, size, cudaMemcpyDeviceToHost); cudaFree(img_r_d); cudaFree(img_g_d); cudaFree(img_b_d); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int * N_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); cudaMalloc((void **) &img_y_d, size); cudaMalloc((void **) &img_u_d, size); cudaMalloc((void **) &img_v_d, size); cudaMalloc((void **) &N_d, sizeof(int)); // Copy PPM to device cudaMemcpy(img_r_d, img_in.img_r, size, cudaMemcpyHostToDevice); cudaMemcpy(img_g_d, img_in.img_g, size, cudaMemcpyHostToDevice); cudaMemcpy(img_b_d, img_in.img_b, size, cudaMemcpyHostToDevice); int N = img_in.w*img_in.h; int M = 512; // number of threads cudaMemcpy(N_d, &N, sizeof(int), cudaMemcpyHostToDevice); rgb2yuvKernel<<<(N+M-1)/M,M>>>(img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d, N_d);//Launch the Kernel // Copy from device to host cudaMemcpy(img_out.img_y, img_y_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_u, img_u_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_v, img_v_d, size, cudaMemcpyDeviceToHost); cudaFree(img_r_d); cudaFree(img_g_d); cudaFree(img_b_d); cudaFree(img_y_d); cudaFree(img_u_d); cudaFree(img_v_d); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); // Allocate memory for the PPM_IMG & YUV_IMG on the device unsigned char * img_r_d; unsigned char * img_g_d; unsigned char * img_b_d; unsigned char * img_y_d; unsigned char * img_u_d; unsigned char * img_v_d; int * N_d; int size = img_in.w * img_in.h * sizeof(unsigned char); cudaMalloc((void **) &img_r_d, size); cudaMalloc((void **) &img_g_d, size); cudaMalloc((void **) &img_b_d, size); cudaMalloc((void **) &img_y_d, size); cudaMalloc((void **) &img_u_d, size); cudaMalloc((void **) &img_v_d, size); cudaMalloc((void **) &N_d, sizeof(int)); // Copy YUV to device cudaMemcpy(img_y_d, img_in.img_y, size, cudaMemcpyHostToDevice); cudaMemcpy(img_u_d, img_in.img_u, size, cudaMemcpyHostToDevice); cudaMemcpy(img_v_d, img_in.img_v, size, cudaMemcpyHostToDevice); int N = img_in.w*img_in.h; int M = 512; cudaMemcpy(N_d, &N, sizeof(int), cudaMemcpyHostToDevice); yuv2rgbKernel<<<(N+M-1)/M,M>>>(img_r_d, img_g_d, img_b_d, img_y_d, img_u_d, img_v_d, N_d);//Launch the Kernel // Copy from device to host cudaMemcpy(img_out.img_r, img_r_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_g, img_g_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_b, img_b_d, size, cudaMemcpyDeviceToHost); cudaFree(img_r_d); cudaFree(img_g_d); cudaFree(img_b_d); cudaFree(img_y_d); cudaFree(img_u_d); cudaFree(img_v_d); return img_out; } //Convert RGB to YUV444, all components in [0, 255] YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i;//, j; unsigned char r, g, b; unsigned char y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } //Convert YUV to RGB, all components in [0, 255] PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt,gt,bt; int y, cb, cr; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
5a329141a53710796dac22c7e4b58d42c4aca847.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016, National University of Defense Technology // Author: Xuhao Chen <[email protected]> #define BFS_VARIANT "linear_lb" #include "bfs.h" #include "worklistc.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <hipcub/hipcub.hpp> #include "timer.h" typedef hipcub::BlockScan<int, BLOCK_SIZE> BlockScan; __device__ __forceinline__ void process_edge(int depth, int edge, int *column_indices, DistT *dist, Worklist2 &out_queue) { int dst = column_indices[edge]; //if (dist[dst] > depth) { if (dist[dst] == MYINFINITY) { dist[dst] = depth; out_queue.push(dst); } } __device__ void expandByCta(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 &in_queue, Worklist2 &out_queue, int depth) { int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; __shared__ int owner; __shared__ int sh_vertex; owner = -1; int size = 0; if(in_queue.pop_id(id, vertex)) { size = row_offsets[vertex+1] - row_offsets[vertex]; } while(true) { if(size > BLOCK_SIZE) owner = threadIdx.x; __syncthreads(); if(owner == -1) break; __syncthreads(); if(owner == threadIdx.x) { sh_vertex = vertex; in_queue.d_queue[id] = -1; owner = -1; size = 0; } __syncthreads(); int row_begin = row_offsets[sh_vertex]; int row_end = row_offsets[sh_vertex+1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + blockDim.x - 1) / blockDim.x) * blockDim.x; for(int i = threadIdx.x; i < num; i += blockDim.x) { int edge = row_begin + i; int dst = 0; int ncnt = 0; if(i < neighbor_size) { // TODO: push() doesn't work for expandByCta //process_edge(depth, edge, column_indices, dist, out_queue); ///* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); } } } __device__ __forceinline__ unsigned LaneId() { unsigned ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ __forceinline__ void expandByWarp(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 &in_queue, Worklist2 &out_queue, int depth) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned warp_id = threadIdx.x >> LOG_WARP_SIZE; unsigned lane_id = LaneId(); __shared__ int owner[NUM_WARPS]; __shared__ int sh_vertex[NUM_WARPS]; owner[warp_id] = -1; int size = 0; int vertex; if(in_queue.pop_id(id, vertex)) { if (vertex != -1) size = row_offsets[vertex+1] - row_offsets[vertex]; } while(__any(size) >= WARP_SIZE) { if(size >= WARP_SIZE) owner[warp_id] = lane_id; if(owner[warp_id] == lane_id) { sh_vertex[warp_id] = vertex; in_queue.d_queue[id] = -1; owner[warp_id] = -1; size = 0; } int winner = sh_vertex[warp_id]; int row_begin = row_offsets[winner]; int row_end = row_offsets[winner+1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; for(int i = lane_id; i < num; i+= WARP_SIZE) { //int ncnt = 0; //int dst = 0; int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, out_queue); /* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } //out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); } } } __global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 in_queue, Worklist2 out_queue, int depth) { expandByCta(m, row_offsets, column_indices, dist, in_queue, out_queue, depth); expandByWarp(m, row_offsets, column_indices, dist, in_queue, out_queue, depth); int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; const int SCRATCHSIZE = BLOCK_SIZE; __shared__ BlockScan::TempStorage temp_storage; __shared__ int gather_offsets[SCRATCHSIZE]; gather_offsets[threadIdx.x] = 0; int neighbor_size = 0; int neighbor_offset = 0; int scratch_offset = 0; int total_edges = 0; if(in_queue.pop_id(id, vertex)) { if(vertex != -1) { neighbor_offset = row_offsets[vertex]; neighbor_size = row_offsets[vertex+1] - neighbor_offset; } } BlockScan(temp_storage).ExclusiveSum(neighbor_size, scratch_offset, total_edges); int done = 0; int neighbors_done = 0; while(total_edges > 0) { __syncthreads(); int i; for(i = 0; neighbors_done + i < neighbor_size && (scratch_offset + i - done) < SCRATCHSIZE; i++) { gather_offsets[scratch_offset + i - done] = neighbor_offset + neighbors_done + i; } neighbors_done += i; scratch_offset += i; __syncthreads(); //int ncnt = 0; //int dst = 0; int edge = gather_offsets[threadIdx.x]; if(threadIdx.x < total_edges) { process_edge(depth, edge, column_indices, dist, out_queue); /* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } //out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); total_edges -= BLOCK_SIZE; done += BLOCK_SIZE; } } __global__ void insert(int source, Worklist2 in_queue) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id == 0) in_queue.push(source); return; } void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *in_degree, int *h_degree, DistT *h_dist) { //print_device_info(0); DistT zero = 0; int *d_row_offsets, *d_column_indices; CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice)); DistT * d_dist; CUDA_SAFE_CALL(hipMalloc((void **)&d_dist, m * sizeof(DistT))); CUDA_SAFE_CALL(hipMemcpy(d_dist, h_dist, m * sizeof(DistT), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(&d_dist[source], &zero, sizeof(zero), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); Worklist2 queue1(nnz), queue2(nnz); Worklist2 *in_frontier = &queue1, *out_frontier = &queue2; int iter = 0; int nitems = 1; int nthreads = BLOCK_SIZE; int nblocks = (m - 1) / nthreads + 1; printf("Launching CUDA BFS solver (%d threads/CTA) ...\n", nthreads); Timer t; t.Start(); hipLaunchKernelGGL(( insert), dim3(1), dim3(nthreads), 0, 0, source, *in_frontier); nitems = in_frontier->nitems(); do { ++ iter; nblocks = (nitems + nthreads - 1) / nthreads; hipLaunchKernelGGL(( bfs_kernel), dim3(nblocks), dim3(nthreads), 0, 0, m, d_row_offsets, d_column_indices, d_dist, *in_frontier, *out_frontier, iter); CudaTest("solving failed"); nitems = out_frontier->nitems(); //printf("iteration=%d, frontier_size=%d\n", iter, nitems); Worklist2 *tmp = in_frontier; in_frontier = out_frontier; out_frontier = tmp; out_frontier->reset(); } while(nitems > 0); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(hipMemcpy(h_dist, d_dist, m * sizeof(DistT), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_row_offsets)); CUDA_SAFE_CALL(hipFree(d_column_indices)); CUDA_SAFE_CALL(hipFree(d_dist)); return; }
5a329141a53710796dac22c7e4b58d42c4aca847.cu
// Copyright 2016, National University of Defense Technology // Author: Xuhao Chen <[email protected]> #define BFS_VARIANT "linear_lb" #include "bfs.h" #include "worklistc.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include <cub/cub.cuh> #include "timer.h" typedef cub::BlockScan<int, BLOCK_SIZE> BlockScan; __device__ __forceinline__ void process_edge(int depth, int edge, int *column_indices, DistT *dist, Worklist2 &out_queue) { int dst = column_indices[edge]; //if (dist[dst] > depth) { if (dist[dst] == MYINFINITY) { dist[dst] = depth; out_queue.push(dst); } } __device__ void expandByCta(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 &in_queue, Worklist2 &out_queue, int depth) { int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; __shared__ int owner; __shared__ int sh_vertex; owner = -1; int size = 0; if(in_queue.pop_id(id, vertex)) { size = row_offsets[vertex+1] - row_offsets[vertex]; } while(true) { if(size > BLOCK_SIZE) owner = threadIdx.x; __syncthreads(); if(owner == -1) break; __syncthreads(); if(owner == threadIdx.x) { sh_vertex = vertex; in_queue.d_queue[id] = -1; owner = -1; size = 0; } __syncthreads(); int row_begin = row_offsets[sh_vertex]; int row_end = row_offsets[sh_vertex+1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + blockDim.x - 1) / blockDim.x) * blockDim.x; for(int i = threadIdx.x; i < num; i += blockDim.x) { int edge = row_begin + i; int dst = 0; int ncnt = 0; if(i < neighbor_size) { // TODO: push() doesn't work for expandByCta //process_edge(depth, edge, column_indices, dist, out_queue); ///* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); } } } __device__ __forceinline__ unsigned LaneId() { unsigned ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } __device__ __forceinline__ void expandByWarp(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 &in_queue, Worklist2 &out_queue, int depth) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned warp_id = threadIdx.x >> LOG_WARP_SIZE; unsigned lane_id = LaneId(); __shared__ int owner[NUM_WARPS]; __shared__ int sh_vertex[NUM_WARPS]; owner[warp_id] = -1; int size = 0; int vertex; if(in_queue.pop_id(id, vertex)) { if (vertex != -1) size = row_offsets[vertex+1] - row_offsets[vertex]; } while(__any(size) >= WARP_SIZE) { if(size >= WARP_SIZE) owner[warp_id] = lane_id; if(owner[warp_id] == lane_id) { sh_vertex[warp_id] = vertex; in_queue.d_queue[id] = -1; owner[warp_id] = -1; size = 0; } int winner = sh_vertex[warp_id]; int row_begin = row_offsets[winner]; int row_end = row_offsets[winner+1]; int neighbor_size = row_end - row_begin; int num = ((neighbor_size + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; for(int i = lane_id; i < num; i+= WARP_SIZE) { //int ncnt = 0; //int dst = 0; int edge = row_begin + i; if(i < neighbor_size) { process_edge(depth, edge, column_indices, dist, out_queue); /* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } //out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); } } } __global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 in_queue, Worklist2 out_queue, int depth) { expandByCta(m, row_offsets, column_indices, dist, in_queue, out_queue, depth); expandByWarp(m, row_offsets, column_indices, dist, in_queue, out_queue, depth); int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; const int SCRATCHSIZE = BLOCK_SIZE; __shared__ BlockScan::TempStorage temp_storage; __shared__ int gather_offsets[SCRATCHSIZE]; gather_offsets[threadIdx.x] = 0; int neighbor_size = 0; int neighbor_offset = 0; int scratch_offset = 0; int total_edges = 0; if(in_queue.pop_id(id, vertex)) { if(vertex != -1) { neighbor_offset = row_offsets[vertex]; neighbor_size = row_offsets[vertex+1] - neighbor_offset; } } BlockScan(temp_storage).ExclusiveSum(neighbor_size, scratch_offset, total_edges); int done = 0; int neighbors_done = 0; while(total_edges > 0) { __syncthreads(); int i; for(i = 0; neighbors_done + i < neighbor_size && (scratch_offset + i - done) < SCRATCHSIZE; i++) { gather_offsets[scratch_offset + i - done] = neighbor_offset + neighbors_done + i; } neighbors_done += i; scratch_offset += i; __syncthreads(); //int ncnt = 0; //int dst = 0; int edge = gather_offsets[threadIdx.x]; if(threadIdx.x < total_edges) { process_edge(depth, edge, column_indices, dist, out_queue); /* dst = column_indices[edge]; //assert(dst < m); if(dist[dst] == MYINFINITY) { dist[dst] = depth; ncnt = 1; } //*/ } //out_queue.push_1item<BlockScan>(ncnt, dst, BLOCK_SIZE); total_edges -= BLOCK_SIZE; done += BLOCK_SIZE; } } __global__ void insert(int source, Worklist2 in_queue) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id == 0) in_queue.push(source); return; } void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *in_degree, int *h_degree, DistT *h_dist) { //print_device_info(0); DistT zero = 0; int *d_row_offsets, *d_column_indices; CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice)); DistT * d_dist; CUDA_SAFE_CALL(cudaMalloc((void **)&d_dist, m * sizeof(DistT))); CUDA_SAFE_CALL(cudaMemcpy(d_dist, h_dist, m * sizeof(DistT), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&d_dist[source], &zero, sizeof(zero), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); Worklist2 queue1(nnz), queue2(nnz); Worklist2 *in_frontier = &queue1, *out_frontier = &queue2; int iter = 0; int nitems = 1; int nthreads = BLOCK_SIZE; int nblocks = (m - 1) / nthreads + 1; printf("Launching CUDA BFS solver (%d threads/CTA) ...\n", nthreads); Timer t; t.Start(); insert<<<1, nthreads>>>(source, *in_frontier); nitems = in_frontier->nitems(); do { ++ iter; nblocks = (nitems + nthreads - 1) / nthreads; bfs_kernel<<<nblocks, nthreads>>>(m, d_row_offsets, d_column_indices, d_dist, *in_frontier, *out_frontier, iter); CudaTest("solving failed"); nitems = out_frontier->nitems(); //printf("iteration=%d, frontier_size=%d\n", iter, nitems); Worklist2 *tmp = in_frontier; in_frontier = out_frontier; out_frontier = tmp; out_frontier->reset(); } while(nitems > 0); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("\titerations = %d.\n", iter); printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(cudaMemcpy(h_dist, d_dist, m * sizeof(DistT), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_row_offsets)); CUDA_SAFE_CALL(cudaFree(d_column_indices)); CUDA_SAFE_CALL(cudaFree(d_dist)); return; }
9207091e198afc5978b87ec71e038519e56554fc.hip
// !!! This is a file automatically generated by hipify!!! #include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include "cuspAdapter.hu" using namespace thundercat; void CuspAdapter::preprocess( int m, int n, int nnz, int * rowPtr, int * colIndx, double * values) { M = m; N = n; NNZ = nnz; int *devRowPtr; int *devColIndx; double *devValues; hipMalloc(&devRowPtr, (N+1) * sizeof(int)); hipMalloc(&devColIndx, NNZ * sizeof(int)); hipMalloc(&devValues, NNZ * sizeof(double)); hipMalloc(&devX, M * sizeof(double)); hipMalloc(&devY, N * sizeof(double)); hipMemcpy(devRowPtr, rowPtr, (N+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(devColIndx, colIndx, NNZ * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(devValues, values, NNZ * sizeof(double), hipMemcpyHostToDevice); // *NOTE* raw pointers must be wrapped with thrust::device_ptr! thrust::device_ptr<int> wrapped_device_Ap(devRowPtr); thrust::device_ptr<int> wrapped_device_Aj(devColIndx); thrust::device_ptr<double> wrapped_device_Ax(devValues); thrust::device_ptr<double> wrapped_device_x(devX); thrust::device_ptr<double> wrapped_device_y(devY); DeviceIndexArrayView row_offsets(wrapped_device_Ap, wrapped_device_Ap + N + 1); DeviceIndexArrayView column_indices(wrapped_device_Aj, wrapped_device_Aj + NNZ); DeviceValueArrayView values_array (wrapped_device_Ax, wrapped_device_Ax + NNZ); DeviceValueArrayView x_local(wrapped_device_x, wrapped_device_x + M); DeviceValueArrayView y_local(wrapped_device_y, wrapped_device_y + N); DeviceView A_local(M, N, NNZ, row_offsets, column_indices, values_array); A = A_local; x = x_local; y = y_local; } void CuspAdapter::setX(double * v) { hipMemcpy(devX, v, M * sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); } void CuspAdapter::getY(double * w) { hipMemcpy(w, devY, N * sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } void CuspAdapter::spmv() { cusp::multiply(A, x, y); hipDeviceSynchronize(); } CuspAdapter* thundercat::newCuspAdapter() { return new CuspAdapter(); } void thundercat::deleteCuspAdapter(CuspAdapter* handle) { delete handle; }
9207091e198afc5978b87ec71e038519e56554fc.cu
#include <cusp/csr_matrix.h> #include <cusp/multiply.h> #include "cuspAdapter.hu" using namespace thundercat; void CuspAdapter::preprocess( int m, int n, int nnz, int * rowPtr, int * colIndx, double * values) { M = m; N = n; NNZ = nnz; int *devRowPtr; int *devColIndx; double *devValues; cudaMalloc(&devRowPtr, (N+1) * sizeof(int)); cudaMalloc(&devColIndx, NNZ * sizeof(int)); cudaMalloc(&devValues, NNZ * sizeof(double)); cudaMalloc(&devX, M * sizeof(double)); cudaMalloc(&devY, N * sizeof(double)); cudaMemcpy(devRowPtr, rowPtr, (N+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devColIndx, colIndx, NNZ * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devValues, values, NNZ * sizeof(double), cudaMemcpyHostToDevice); // *NOTE* raw pointers must be wrapped with thrust::device_ptr! thrust::device_ptr<int> wrapped_device_Ap(devRowPtr); thrust::device_ptr<int> wrapped_device_Aj(devColIndx); thrust::device_ptr<double> wrapped_device_Ax(devValues); thrust::device_ptr<double> wrapped_device_x(devX); thrust::device_ptr<double> wrapped_device_y(devY); DeviceIndexArrayView row_offsets(wrapped_device_Ap, wrapped_device_Ap + N + 1); DeviceIndexArrayView column_indices(wrapped_device_Aj, wrapped_device_Aj + NNZ); DeviceValueArrayView values_array (wrapped_device_Ax, wrapped_device_Ax + NNZ); DeviceValueArrayView x_local(wrapped_device_x, wrapped_device_x + M); DeviceValueArrayView y_local(wrapped_device_y, wrapped_device_y + N); DeviceView A_local(M, N, NNZ, row_offsets, column_indices, values_array); A = A_local; x = x_local; y = y_local; } void CuspAdapter::setX(double * v) { cudaMemcpy(devX, v, M * sizeof(double), cudaMemcpyHostToDevice); cudaThreadSynchronize(); } void CuspAdapter::getY(double * w) { cudaMemcpy(w, devY, N * sizeof(double), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); } void CuspAdapter::spmv() { cusp::multiply(A, x, y); cudaThreadSynchronize(); } CuspAdapter* thundercat::newCuspAdapter() { return new CuspAdapter(); } void thundercat::deleteCuspAdapter(CuspAdapter* handle) { delete handle; }
82784e620b3a8b3e766490e9af992afd0ddcd31b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <hipcub/hipcub.hpp> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; hipcub::CountingInputIterator<int> itr(0); hipcub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; hipcub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims().vec(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { hipLaunchKernelGGL(( BooleanMaskCopyKernel), dim3(min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; hipLaunchKernelGGL(( repeatedSequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { hipLaunchKernelGGL(( sequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { hipLaunchKernelGGL(( windowMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { hipLaunchKernelGGL(( upperMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { hipLaunchKernelGGL(( lowerMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { hipLaunchKernelGGL(( upperDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { hipLaunchKernelGGL(( lowerDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
82784e620b3a8b3e766490e9af992afd0ddcd31b.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <cub/cub.cuh> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; cub::CountingInputIterator<int> itr(0); cub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; cub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims().vec(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { BooleanMaskCopyKernel<<< min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; repeatedSequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { sequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { windowMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { upperMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { lowerMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { upperDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { lowerDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
a3e2186752e250063bcc99c42a2cde5ee212b2c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ConvolutionLayer.h" ConvolutionLayer::ConvolutionLayer(cudnnHandle_t handle, float* data): handle(handle), input_data(data) { CUDNN_CALL(cudnnCreateTensorDescriptor(&input_descriptor)); CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_descriptor)); CUDNN_CALL(cudnnCreateTensorDescriptor(&output_descriptor)); CUDNN_CALL(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); } void ConvolutionLayer::SetInputDescriptor(int N, int C, int H, int W) { input_n = N; input_c = C; input_h = H; input_w = W; CUDNN_CALL(cudnnSetTensor4dDescriptor(input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); #if DEBUG printf("Convolution Input Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", input_n, input_c, input_h, input_w); #endif } void ConvolutionLayer::SetFilterDescriptor(int N, int C, int H, int W) { filter_n = N; filter_c = C; filter_h = H; filter_w = W; CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filter_n, filter_c, filter_h, filter_w)); #if DEBUG printf("Convolution Filter Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", filter_n, filter_c, filter_h, filter_w); #endif } void ConvolutionLayer::SetOutputDescriptor() { CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor, input_descriptor, filter_descriptor, &output_n, &output_c, &output_h, &output_w)); CUDNN_CALL(cudnnSetTensor4dDescriptor(output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); #if DEBUG printf("Convolution Output Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", output_n, output_c, output_h, output_w); #endif } cudnnTensorDescriptor_t ConvolutionLayer::GetOutputDescriptor() { return output_descriptor; } float* ConvolutionLayer::GetOutputData() { return output_data; } void ConvolutionLayer::SetConvolutionDescriptor(int H_padding, int W_padding, int H_stride, int W_stride, int H_dilation, int W_dilation) { padding_h = H_padding; padding_w = W_padding; stride_h = H_stride; stride_w = W_stride; dilation_h = H_dilation; dilation_w = W_dilation; CUDNN_CALL(cudnnSetConvolution2dDescriptor(convolution_descriptor, padding_h, padding_w, stride_h, stride_w, dilation_h, dilation_w, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT)); #if DEBUG printf("Convolution parameters => Padding h: %d, Padding w: %d, Stride h: %d, Stride w: %d, Dilation h: %d, Dilation w: %d\n", padding_h, padding_w, stride_h, stride_w, dilation_h, dilation_w); #endif } void ConvolutionLayer::SetAlgorithm() { cudnnConvolutionFwdAlgoPerf_t convolution_algo_perf; int algo_count; cudnnGetConvolutionForwardAlgorithm_v7(handle, input_descriptor, filter_descriptor, convolution_descriptor, output_descriptor, /*requested algo count*/1, /*returned algo count*/&algo_count, &convolution_algo_perf); algorithm = convolution_algo_perf.algo; //algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; } void ConvolutionLayer::AllocateMemory() { CUDA_CALL(hipMalloc(&filter_data, filter_n * filter_c * filter_h * filter_w * sizeof(float))); CUDA_CALL(hipMalloc(&output_data, output_n * output_c * output_h * output_w * sizeof(float))); hipLaunchKernelGGL(( fill_constant), dim3(filter_w * filter_h), dim3(filter_n * filter_c), 0, 0, filter_data, 1.f); hipDeviceSynchronize(); } void ConvolutionLayer::AllocateWorkspace() { CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(handle, input_descriptor, filter_descriptor, convolution_descriptor, output_descriptor, algorithm, &workspace_size)); CUDA_CALL(hipMalloc(&workspace_data, workspace_size)); #if DEBUG printf("Workspace allocated: %ld bytes\n", workspace_size); #endif } void ConvolutionLayer::Forward() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); CUDNN_CALL(cudnnConvolutionForward(handle, &alpha, input_descriptor, input_data, filter_descriptor, filter_data, convolution_descriptor, algorithm, workspace_data, workspace_size, &beta, output_descriptor, output_data)); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("%f,", milliseconds); } void ConvolutionLayer::Free() { CUDNN_CALL(cudnnDestroyTensorDescriptor(input_descriptor)); CUDNN_CALL(cudnnDestroyConvolutionDescriptor(convolution_descriptor)); CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_descriptor)); CUDNN_CALL(cudnnDestroyTensorDescriptor(output_descriptor)); CUDA_CALL(hipFree(input_data)); CUDA_CALL(hipFree(filter_data)); CUDA_CALL(hipFree(workspace_data)); }
a3e2186752e250063bcc99c42a2cde5ee212b2c9.cu
#include "ConvolutionLayer.h" ConvolutionLayer::ConvolutionLayer(cudnnHandle_t handle, float* data): handle(handle), input_data(data) { CUDNN_CALL(cudnnCreateTensorDescriptor(&input_descriptor)); CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_descriptor)); CUDNN_CALL(cudnnCreateTensorDescriptor(&output_descriptor)); CUDNN_CALL(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); } void ConvolutionLayer::SetInputDescriptor(int N, int C, int H, int W) { input_n = N; input_c = C; input_h = H; input_w = W; CUDNN_CALL(cudnnSetTensor4dDescriptor(input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w)); #if DEBUG printf("Convolution Input Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", input_n, input_c, input_h, input_w); #endif } void ConvolutionLayer::SetFilterDescriptor(int N, int C, int H, int W) { filter_n = N; filter_c = C; filter_h = H; filter_w = W; CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filter_n, filter_c, filter_h, filter_w)); #if DEBUG printf("Convolution Filter Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", filter_n, filter_c, filter_h, filter_w); #endif } void ConvolutionLayer::SetOutputDescriptor() { CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor, input_descriptor, filter_descriptor, &output_n, &output_c, &output_h, &output_w)); CUDNN_CALL(cudnnSetTensor4dDescriptor(output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, output_h, output_w)); #if DEBUG printf("Convolution Output Shape (NCHW) => N: %d, C: %d, H: %d, W: %d\n", output_n, output_c, output_h, output_w); #endif } cudnnTensorDescriptor_t ConvolutionLayer::GetOutputDescriptor() { return output_descriptor; } float* ConvolutionLayer::GetOutputData() { return output_data; } void ConvolutionLayer::SetConvolutionDescriptor(int H_padding, int W_padding, int H_stride, int W_stride, int H_dilation, int W_dilation) { padding_h = H_padding; padding_w = W_padding; stride_h = H_stride; stride_w = W_stride; dilation_h = H_dilation; dilation_w = W_dilation; CUDNN_CALL(cudnnSetConvolution2dDescriptor(convolution_descriptor, padding_h, padding_w, stride_h, stride_w, dilation_h, dilation_w, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT)); #if DEBUG printf("Convolution parameters => Padding h: %d, Padding w: %d, Stride h: %d, Stride w: %d, Dilation h: %d, Dilation w: %d\n", padding_h, padding_w, stride_h, stride_w, dilation_h, dilation_w); #endif } void ConvolutionLayer::SetAlgorithm() { cudnnConvolutionFwdAlgoPerf_t convolution_algo_perf; int algo_count; cudnnGetConvolutionForwardAlgorithm_v7(handle, input_descriptor, filter_descriptor, convolution_descriptor, output_descriptor, /*requested algo count*/1, /*returned algo count*/&algo_count, &convolution_algo_perf); algorithm = convolution_algo_perf.algo; //algorithm = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; } void ConvolutionLayer::AllocateMemory() { CUDA_CALL(cudaMalloc(&filter_data, filter_n * filter_c * filter_h * filter_w * sizeof(float))); CUDA_CALL(cudaMalloc(&output_data, output_n * output_c * output_h * output_w * sizeof(float))); fill_constant<<<filter_w * filter_h, filter_n * filter_c>>>(filter_data, 1.f); cudaDeviceSynchronize(); } void ConvolutionLayer::AllocateWorkspace() { CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(handle, input_descriptor, filter_descriptor, convolution_descriptor, output_descriptor, algorithm, &workspace_size)); CUDA_CALL(cudaMalloc(&workspace_data, workspace_size)); #if DEBUG printf("Workspace allocated: %ld bytes\n", workspace_size); #endif } void ConvolutionLayer::Forward() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); CUDNN_CALL(cudnnConvolutionForward(handle, &alpha, input_descriptor, input_data, filter_descriptor, filter_data, convolution_descriptor, algorithm, workspace_data, workspace_size, &beta, output_descriptor, output_data)); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("%f,", milliseconds); } void ConvolutionLayer::Free() { CUDNN_CALL(cudnnDestroyTensorDescriptor(input_descriptor)); CUDNN_CALL(cudnnDestroyConvolutionDescriptor(convolution_descriptor)); CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_descriptor)); CUDNN_CALL(cudnnDestroyTensorDescriptor(output_descriptor)); CUDA_CALL(cudaFree(input_data)); CUDA_CALL(cudaFree(filter_data)); CUDA_CALL(cudaFree(workspace_data)); }
2d511198e15e09e1305b6a3c77aa018cf3fca357.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <pthread.h> #include "manager.h" #include "common.h" #define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } __global__ void vectorAdd(const float *A, float *C, int size, int inset) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { C[i] = A[i + inset] + A[i - 1 + inset] + A[i + 1 + inset]; } } // Allocate cuda memory and pin host memory (required for async stream). void alloc_cuda(task_t* task) { hipSetDevice(task->cuda.id); // Allocate the device vectors cudaCheck(hipMalloc((void **)&task->cuda.A, (task->size + 2) * sizeof(float))); // Plus 'imported' neighbours. cudaCheck(hipMalloc((void **)&task->cuda.C, task->size * sizeof(float))); } // Deallocate cuda memory and unpin host memory. void dealloc_cuda(task_t* task) { hipSetDevice(task->cuda.id); // Free device global memory cudaCheck(hipFree(task->cuda.A)); cudaCheck(hipFree(task->cuda.C)); } // Run the cuda task (on the 'thread stream'). void* run_cuda(void* v_task) { task_t* task = (task_t*) v_task; int iteration = task->start_iteration; int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("Setting device: %d\n", task->cuda.id); hipSetDevice(task->cuda.id); printf("(%d) Waiting start cud barrier: size: %d\n", rank, task->start_barrier->get_size()); task->start_barrier->wait(); for(; iteration < CYCLES; iteration++) { // Copy the host input vectors A and B H2D. printf("(%d:%d) Task offset: %d, size: %d\n", rank, task->id, task->offset, task->size); int inset = 0; cudaCheck(hipMemcpy(task->cuda.A, &task->A[-1], (task->size + 2) * sizeof(float), hipMemcpyHostToDevice)); inset = 1; // Launch the vector-add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (task->size + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, task->cuda.A, task->cuda.C, task->size, inset); // Copy the device result vector D2H. cudaCheck(hipMemcpy(task->C, task->cuda.C, task->size * sizeof(float), hipMemcpyDeviceToHost)); cudaCheck(hipDeviceSynchronize()); printf("cuda wait\n"); task->barrier->wait(); // Switch buffers for(int j = 0; j < task->size; j++) { printf("C%d: (%d) [%d] %d: %f\n", iteration, rank, task->id, j, task->C[j]); task->A[j] = task->C[j]; } bool will_split = (iteration == 6 && rank == 0 && task->id == 1) || (iteration == 6 && rank == 0 && task->id == 2); printf("(%d) Updating neighbours\n", rank); std::vector<MPI_Receive_req> requests; std::vector<int> types; fetch_and_update_neighbours(rank, task, requests, types, will_split); MPI_Status statuses[requests.size()]; if(!requests.empty()) { MPI_Recv_all(requests, MPI_COMM_WORLD, statuses); } for(int j = -1; j < task->size + 1; j++) { printf("A @ C%d: (%d) [%d] %d: %f\n", iteration, rank, task->id, j, task->A[j]); } // Split if(will_split) { // Arbitrarily (as a test) decide to split. split(task, rank); } task->barrier->wait(); // MPI wait task->barrier->wait(); for(int i = 0; i < requests.size(); i++) { if(match_tag(-1, -1, WILL_SPLIT, statuses[i].MPI_TAG)) { int tag = construct_tag(task->id, 0, LOOKUP); // Received notification of split of target. Will update refs. if(types[i] == NEXT_TYPE) { printf("(%d:%d) Update nextref\n", rank, id); int start = task->offset + task->size; MPI_Send(&start, 1, MPI_INT, MANAGER_RANK, tag, *task->manager); int package[2]; MPI_Recv(&package, 2, MPI_INT, MANAGER_RANK, tag, *task->manager, MPI_STATUS_IGNORE); task->next.rank = package[0]; task->next.id = package[1]; printf("(%d:%d) New next: %d:%d\n", rank, task->id, task->next.rank, task->next.id); } else if(types[i] == PREV_TYPE) { printf("(%d:%d) Update prevref\n", rank, id); int start = task->offset - 1; MPI_Send(&start, 1, MPI_INT, MANAGER_RANK, tag, *task->manager); int package[2]; MPI_Recv(&package, 2, MPI_INT, MANAGER_RANK, tag, *task->manager, MPI_STATUS_IGNORE); task->prev.rank = package[0]; task->prev.id = package[1]; printf("(%d:%d) New prev: %d:%d\n", rank, task->id, task->prev.rank, task->prev.id); } else { throw std::runtime_error("Invalid SPLIT type received."); } } } printf("(%d) Waiting end cuda start barrier: size: %d\n", rank, task->start_barrier->get_size()); task->start_barrier->wait(); } printf("cuda done\n"); pthread_exit(NULL); } // Get the number of available GPUs. int init_cuda() { int gpu_count; hipError_t cerr = hipGetDeviceCount(&gpu_count); if(cerr == hipErrorNoDevice) { gpu_count = 0; } else { cudaCheck(cerr); } return gpu_count; }
2d511198e15e09e1305b6a3c77aa018cf3fca357.cu
#include <pthread.h> #include "manager.h" #include "common.h" #define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } __global__ void vectorAdd(const float *A, float *C, int size, int inset) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size) { C[i] = A[i + inset] + A[i - 1 + inset] + A[i + 1 + inset]; } } // Allocate cuda memory and pin host memory (required for async stream). void alloc_cuda(task_t* task) { cudaSetDevice(task->cuda.id); // Allocate the device vectors cudaCheck(cudaMalloc((void **)&task->cuda.A, (task->size + 2) * sizeof(float))); // Plus 'imported' neighbours. cudaCheck(cudaMalloc((void **)&task->cuda.C, task->size * sizeof(float))); } // Deallocate cuda memory and unpin host memory. void dealloc_cuda(task_t* task) { cudaSetDevice(task->cuda.id); // Free device global memory cudaCheck(cudaFree(task->cuda.A)); cudaCheck(cudaFree(task->cuda.C)); } // Run the cuda task (on the 'thread stream'). void* run_cuda(void* v_task) { task_t* task = (task_t*) v_task; int iteration = task->start_iteration; int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("Setting device: %d\n", task->cuda.id); cudaSetDevice(task->cuda.id); printf("(%d) Waiting start cud barrier: size: %d\n", rank, task->start_barrier->get_size()); task->start_barrier->wait(); for(; iteration < CYCLES; iteration++) { // Copy the host input vectors A and B H2D. printf("(%d:%d) Task offset: %d, size: %d\n", rank, task->id, task->offset, task->size); int inset = 0; cudaCheck(cudaMemcpy(task->cuda.A, &task->A[-1], (task->size + 2) * sizeof(float), cudaMemcpyHostToDevice)); inset = 1; // Launch the vector-add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = (task->size + threadsPerBlock - 1) / threadsPerBlock; vectorAdd<<<blocksPerGrid, threadsPerBlock, 0>>>(task->cuda.A, task->cuda.C, task->size, inset); // Copy the device result vector D2H. cudaCheck(cudaMemcpy(task->C, task->cuda.C, task->size * sizeof(float), cudaMemcpyDeviceToHost)); cudaCheck(cudaDeviceSynchronize()); printf("cuda wait\n"); task->barrier->wait(); // Switch buffers for(int j = 0; j < task->size; j++) { printf("C%d: (%d) [%d] %d: %f\n", iteration, rank, task->id, j, task->C[j]); task->A[j] = task->C[j]; } bool will_split = (iteration == 6 && rank == 0 && task->id == 1) || (iteration == 6 && rank == 0 && task->id == 2); printf("(%d) Updating neighbours\n", rank); std::vector<MPI_Receive_req> requests; std::vector<int> types; fetch_and_update_neighbours(rank, task, requests, types, will_split); MPI_Status statuses[requests.size()]; if(!requests.empty()) { MPI_Recv_all(requests, MPI_COMM_WORLD, statuses); } for(int j = -1; j < task->size + 1; j++) { printf("A @ C%d: (%d) [%d] %d: %f\n", iteration, rank, task->id, j, task->A[j]); } // Split if(will_split) { // Arbitrarily (as a test) decide to split. split(task, rank); } task->barrier->wait(); // MPI wait task->barrier->wait(); for(int i = 0; i < requests.size(); i++) { if(match_tag(-1, -1, WILL_SPLIT, statuses[i].MPI_TAG)) { int tag = construct_tag(task->id, 0, LOOKUP); // Received notification of split of target. Will update refs. if(types[i] == NEXT_TYPE) { printf("(%d:%d) Update nextref\n", rank, id); int start = task->offset + task->size; MPI_Send(&start, 1, MPI_INT, MANAGER_RANK, tag, *task->manager); int package[2]; MPI_Recv(&package, 2, MPI_INT, MANAGER_RANK, tag, *task->manager, MPI_STATUS_IGNORE); task->next.rank = package[0]; task->next.id = package[1]; printf("(%d:%d) New next: %d:%d\n", rank, task->id, task->next.rank, task->next.id); } else if(types[i] == PREV_TYPE) { printf("(%d:%d) Update prevref\n", rank, id); int start = task->offset - 1; MPI_Send(&start, 1, MPI_INT, MANAGER_RANK, tag, *task->manager); int package[2]; MPI_Recv(&package, 2, MPI_INT, MANAGER_RANK, tag, *task->manager, MPI_STATUS_IGNORE); task->prev.rank = package[0]; task->prev.id = package[1]; printf("(%d:%d) New prev: %d:%d\n", rank, task->id, task->prev.rank, task->prev.id); } else { throw std::runtime_error("Invalid SPLIT type received."); } } } printf("(%d) Waiting end cuda start barrier: size: %d\n", rank, task->start_barrier->get_size()); task->start_barrier->wait(); } printf("cuda done\n"); pthread_exit(NULL); } // Get the number of available GPUs. int init_cuda() { int gpu_count; cudaError_t cerr = cudaGetDeviceCount(&gpu_count); if(cerr == cudaErrorNoDevice) { gpu_count = 0; } else { cudaCheck(cerr); } return gpu_count; }
8590271fd93c18dfb1acc06d14e6eb176bb986c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gauge_field_order.h> #include <quda_matrix.h> #include <index_helper.cuh> #include <generics/ldg.h> #include <tune_quda.h> namespace quda { #ifdef GPU_GAUGE_FORCE template <typename Mom, typename Gauge> struct GaugeForceArg { Mom mom; const Gauge u; int threads; int X[4]; // the regular volume parameters int E[4]; // the extended volume parameters int border[4]; // radius of border int num_paths; int path_max_length; double coeff; const int *input_path_d[4]; const int *length_d; const double *path_coeff_d; int count; // equal to sum of all path lengths. Used a convenience for computing perf GaugeForceArg(Mom &mom, const Gauge &u, int num_paths, int path_max_length, double coeff, int **input_path_d, const int *length_d, const double* path_coeff_d, int count, const GaugeField &meta_mom, const GaugeField &meta_u) : mom(mom), u(u), threads(meta_mom.VolumeCB()), num_paths(num_paths), path_max_length(path_max_length), coeff(coeff), input_path_d{ input_path_d[0], input_path_d[1], input_path_d[2], input_path_d[3] }, length_d(length_d), path_coeff_d(path_coeff_d), count(count) { for(int i=0; i<4; i++) { X[i] = meta_mom.X()[i]; E[i] = meta_u.X()[i]; border[i] = (E[i] - X[i])/2; } } virtual ~GaugeForceArg() { } }; __device__ __host__ inline static int flipDir(int dir) { return (7-dir); } __device__ __host__ inline static bool isForwards(int dir) { return (dir <= 3); } // this ensures that array elements are held in cache template <typename T> __device__ __host__ inline static T cache(const T *ptr, int idx) { #ifdef __CUDA_ARCH__ return __ldg(ptr+idx); #else return ptr[idx]; #endif } template<typename Float, typename Arg, int dir> __device__ __host__ inline void GaugeForceKernel(Arg &arg, int idx, int parity) { typedef Matrix<complex<Float>,3> Link; int x[4] = {0, 0, 0, 0}; getCoords(x, idx, arg.X, parity); for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates //linkA: current matrix //linkB: the loaded matrix in this round Link linkA, linkB, staple; #ifdef __CUDA_ARCH__ extern __shared__ int s[]; int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; s[tid] = 0; signed char *dx = (signed char*)&s[tid]; #else int dx[4] = {0, 0, 0, 0}; #endif for (int i=0; i<arg.num_paths; i++) { Float coeff = cache(arg.path_coeff_d,i); if (coeff == 0) continue; const int* path = arg.input_path_d[dir] + i*arg.path_max_length; // start from end of link in direction dir int nbr_oddbit = (parity^1); dx[dir]++; int path0 = cache(path,0); int lnkdir = isForwards(path0) ? path0 : flipDir(path0); if (isForwards(path0)) { linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkB; dx[lnkdir]++; // now have to update location nbr_oddbit = nbr_oddbit^1; } else { dx[lnkdir]--; // if we are going backwards the link is on the adjacent site nbr_oddbit = nbr_oddbit^1; linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = conj(linkB); } for (int j=1; j<cache(arg.length_d,i); j++) { int pathj = cache(path,j); int lnkdir = isForwards(pathj) ? pathj : flipDir(pathj); if (isForwards(pathj)) { linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkA * linkB; dx[lnkdir]++; // now have to update to new location nbr_oddbit = nbr_oddbit^1; } else { dx[lnkdir]--; // if we are going backwards the link is on the adjacent site nbr_oddbit = nbr_oddbit^1; linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkA * conj(linkB); } } //j staple = staple + coeff*linkA; } //i // multiply by U(x) linkA = arg.u(dir, linkIndex(x,arg.E), parity); linkA = linkA * staple; // update mom(x) Link mom = arg.mom(dir, idx, parity); mom = mom - arg.coeff * linkA; makeAntiHerm(mom); arg.mom(dir, idx, parity) = mom; return; } template <typename Float, typename Arg> void GaugeForceCPU(Arg &arg) { for (int dir=0; dir<4; dir++) { for (int parity=0; parity<2; parity++) { for (int idx=0; idx<arg.threads; idx++) { switch(dir) { case 0: GaugeForceKernel<Float,Arg,0>(arg, idx, parity); break; case 1: GaugeForceKernel<Float,Arg,1>(arg, idx, parity); break; case 2: GaugeForceKernel<Float,Arg,2>(arg, idx, parity); break; case 3: GaugeForceKernel<Float,Arg,3>(arg, idx, parity); break; } } } } return; } template <typename Float, typename Arg> __global__ void GaugeForceGPU(Arg arg) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int dir = blockIdx.z * blockDim.z + threadIdx.z; switch(dir) { case 0: GaugeForceKernel<Float,Arg,0>(arg, idx, parity); break; case 1: GaugeForceKernel<Float,Arg,1>(arg, idx, parity); break; case 2: GaugeForceKernel<Float,Arg,2>(arg, idx, parity); break; case 3: GaugeForceKernel<Float,Arg,3>(arg, idx, parity); break; } return; } template <typename Float, typename Arg> class GaugeForce : public TunableVectorY { private: Arg &arg; QudaFieldLocation location; const char *vol_str; unsigned int sharedBytesPerThread() const { return 4; } // for dynamic indexing array unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: GaugeForce(Arg &arg, const GaugeField &meta_mom, const GaugeField &meta_u) : TunableVectorY(2), arg(arg), location(meta_mom.Location()), vol_str(meta_mom.VolString()) { } virtual ~GaugeForce() { } void apply(const hipStream_t &stream) { if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( GaugeForceGPU<Float,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); } else { GaugeForceCPU<Float,Arg>(arg); } } void preTune() { arg.mom.save(); } void postTune() { arg.mom.load(); } long long flops() const { return (arg.count - arg.num_paths + 1) * 198ll * 2 * arg.mom.volumeCB * 4; } long long bytes() const { return ((arg.count + 1ll) * arg.u.Bytes() + 2ll*arg.mom.Bytes()) * 2 * arg.mom.volumeCB * 4; } TuneKey tuneKey() const { std::stringstream aux; char comm[5]; comm[0] = (commDimPartitioned(0) ? '1' : '0'); comm[1] = (commDimPartitioned(1) ? '1' : '0'); comm[2] = (commDimPartitioned(2) ? '1' : '0'); comm[3] = (commDimPartitioned(3) ? '1' : '0'); comm[4] = '\0'; aux << "comm=" << comm << ",threads=" << arg.threads << ",num_paths=" << arg.num_paths; return TuneKey(vol_str, typeid(*this).name(), aux.str().c_str()); } bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z *= 2; param.grid.z = 4 / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.z = 1; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { TunableVectorY::defaultTuneParam(param); param.block.z = 1; param.grid.z = 4; } }; template <typename Float, typename Mom, typename Gauge> void gaugeForce(Mom mom, const Gauge &u, GaugeField& meta_mom, const GaugeField& meta_u, const double coeff, int ***input_path, const int* length_h, const double* path_coeff_h, const int num_paths, const int path_max_length) { size_t bytes = num_paths*path_max_length*sizeof(int); int *input_path_d[4]; int count = 0; for (int dir=0; dir<4; dir++) { input_path_d[dir] = (int*)pool_device_malloc(bytes); hipMemset(input_path_d[dir], 0, bytes); int* input_path_h = (int*)safe_malloc(bytes); memset(input_path_h, 0, bytes); // flatten the input_path array for copying to the device for (int i=0; i < num_paths; i++) { for (int j=0; j < length_h[i]; j++) { input_path_h[i*path_max_length + j] = input_path[dir][i][j]; if (dir==0) count++; } } qudaMemcpy(input_path_d[dir], input_path_h, bytes, hipMemcpyHostToDevice); host_free(input_path_h); } //length int* length_d = (int*)pool_device_malloc(num_paths*sizeof(int)); qudaMemcpy(length_d, length_h, num_paths*sizeof(int), hipMemcpyHostToDevice); //path_coeff double* path_coeff_d = (double*)pool_device_malloc(num_paths*sizeof(double)); qudaMemcpy(path_coeff_d, path_coeff_h, num_paths*sizeof(double), hipMemcpyHostToDevice); GaugeForceArg<Mom,Gauge> arg(mom, u, num_paths, path_max_length, coeff, input_path_d, length_d, path_coeff_d, count, meta_mom, meta_u); GaugeForce<Float,GaugeForceArg<Mom,Gauge> > gauge_force(arg, meta_mom, meta_u); gauge_force.apply(0); checkCudaError(); pool_device_free(length_d); pool_device_free(path_coeff_d); for (int dir=0; dir<4; dir++) pool_device_free(input_path_d[dir]); qudaDeviceSynchronize(); } template <typename Float> void gaugeForce(GaugeField& mom, const GaugeField& u, const double coeff, int ***input_path, const int* length, const double* path_coeff, const int num_paths, const int max_length) { if (mom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Reconstruction type %d not supported", mom.Reconstruct()); if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { typedef typename gauge::FloatNOrder<Float,18,2,11> M; if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); } else { errorQuda("Reconstruction type %d not supported", u.Reconstruct()); } } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } } #endif // GPU_GAUGE_FORCE void gaugeForce(GaugeField& mom, const GaugeField& u, double coeff, int ***input_path, int *length, double *path_coeff, int num_paths, int max_length) { #ifdef GPU_GAUGE_FORCE if (mom.Precision() != u.Precision()) errorQuda("Mixed precision not supported"); if (mom.Location() != u.Location()) errorQuda("Mixed field locations not supported"); switch(mom.Precision()) { case QUDA_DOUBLE_PRECISION: gaugeForce<double>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); break; case QUDA_SINGLE_PRECISION: gaugeForce<float>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); break; default: errorQuda("Unsupported precision %d", mom.Precision()); } #else errorQuda("Gauge force has not been built"); #endif // GPU_GAUGE_FORCE } } // namespace quda
8590271fd93c18dfb1acc06d14e6eb176bb986c5.cu
#include <gauge_field_order.h> #include <quda_matrix.h> #include <index_helper.cuh> #include <generics/ldg.h> #include <tune_quda.h> namespace quda { #ifdef GPU_GAUGE_FORCE template <typename Mom, typename Gauge> struct GaugeForceArg { Mom mom; const Gauge u; int threads; int X[4]; // the regular volume parameters int E[4]; // the extended volume parameters int border[4]; // radius of border int num_paths; int path_max_length; double coeff; const int *input_path_d[4]; const int *length_d; const double *path_coeff_d; int count; // equal to sum of all path lengths. Used a convenience for computing perf GaugeForceArg(Mom &mom, const Gauge &u, int num_paths, int path_max_length, double coeff, int **input_path_d, const int *length_d, const double* path_coeff_d, int count, const GaugeField &meta_mom, const GaugeField &meta_u) : mom(mom), u(u), threads(meta_mom.VolumeCB()), num_paths(num_paths), path_max_length(path_max_length), coeff(coeff), input_path_d{ input_path_d[0], input_path_d[1], input_path_d[2], input_path_d[3] }, length_d(length_d), path_coeff_d(path_coeff_d), count(count) { for(int i=0; i<4; i++) { X[i] = meta_mom.X()[i]; E[i] = meta_u.X()[i]; border[i] = (E[i] - X[i])/2; } } virtual ~GaugeForceArg() { } }; __device__ __host__ inline static int flipDir(int dir) { return (7-dir); } __device__ __host__ inline static bool isForwards(int dir) { return (dir <= 3); } // this ensures that array elements are held in cache template <typename T> __device__ __host__ inline static T cache(const T *ptr, int idx) { #ifdef __CUDA_ARCH__ return __ldg(ptr+idx); #else return ptr[idx]; #endif } template<typename Float, typename Arg, int dir> __device__ __host__ inline void GaugeForceKernel(Arg &arg, int idx, int parity) { typedef Matrix<complex<Float>,3> Link; int x[4] = {0, 0, 0, 0}; getCoords(x, idx, arg.X, parity); for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates //linkA: current matrix //linkB: the loaded matrix in this round Link linkA, linkB, staple; #ifdef __CUDA_ARCH__ extern __shared__ int s[]; int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x; s[tid] = 0; signed char *dx = (signed char*)&s[tid]; #else int dx[4] = {0, 0, 0, 0}; #endif for (int i=0; i<arg.num_paths; i++) { Float coeff = cache(arg.path_coeff_d,i); if (coeff == 0) continue; const int* path = arg.input_path_d[dir] + i*arg.path_max_length; // start from end of link in direction dir int nbr_oddbit = (parity^1); dx[dir]++; int path0 = cache(path,0); int lnkdir = isForwards(path0) ? path0 : flipDir(path0); if (isForwards(path0)) { linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkB; dx[lnkdir]++; // now have to update location nbr_oddbit = nbr_oddbit^1; } else { dx[lnkdir]--; // if we are going backwards the link is on the adjacent site nbr_oddbit = nbr_oddbit^1; linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = conj(linkB); } for (int j=1; j<cache(arg.length_d,i); j++) { int pathj = cache(path,j); int lnkdir = isForwards(pathj) ? pathj : flipDir(pathj); if (isForwards(pathj)) { linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkA * linkB; dx[lnkdir]++; // now have to update to new location nbr_oddbit = nbr_oddbit^1; } else { dx[lnkdir]--; // if we are going backwards the link is on the adjacent site nbr_oddbit = nbr_oddbit^1; linkB = arg.u(lnkdir, linkIndexShift(x,dx,arg.E), nbr_oddbit); linkA = linkA * conj(linkB); } } //j staple = staple + coeff*linkA; } //i // multiply by U(x) linkA = arg.u(dir, linkIndex(x,arg.E), parity); linkA = linkA * staple; // update mom(x) Link mom = arg.mom(dir, idx, parity); mom = mom - arg.coeff * linkA; makeAntiHerm(mom); arg.mom(dir, idx, parity) = mom; return; } template <typename Float, typename Arg> void GaugeForceCPU(Arg &arg) { for (int dir=0; dir<4; dir++) { for (int parity=0; parity<2; parity++) { for (int idx=0; idx<arg.threads; idx++) { switch(dir) { case 0: GaugeForceKernel<Float,Arg,0>(arg, idx, parity); break; case 1: GaugeForceKernel<Float,Arg,1>(arg, idx, parity); break; case 2: GaugeForceKernel<Float,Arg,2>(arg, idx, parity); break; case 3: GaugeForceKernel<Float,Arg,3>(arg, idx, parity); break; } } } } return; } template <typename Float, typename Arg> __global__ void GaugeForceGPU(Arg arg) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int dir = blockIdx.z * blockDim.z + threadIdx.z; switch(dir) { case 0: GaugeForceKernel<Float,Arg,0>(arg, idx, parity); break; case 1: GaugeForceKernel<Float,Arg,1>(arg, idx, parity); break; case 2: GaugeForceKernel<Float,Arg,2>(arg, idx, parity); break; case 3: GaugeForceKernel<Float,Arg,3>(arg, idx, parity); break; } return; } template <typename Float, typename Arg> class GaugeForce : public TunableVectorY { private: Arg &arg; QudaFieldLocation location; const char *vol_str; unsigned int sharedBytesPerThread() const { return 4; } // for dynamic indexing array unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } // don't tune the grid dimension public: GaugeForce(Arg &arg, const GaugeField &meta_mom, const GaugeField &meta_u) : TunableVectorY(2), arg(arg), location(meta_mom.Location()), vol_str(meta_mom.VolString()) { } virtual ~GaugeForce() { } void apply(const cudaStream_t &stream) { if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); GaugeForceGPU<Float,Arg><<<tp.grid,tp.block,tp.shared_bytes>>>(arg); } else { GaugeForceCPU<Float,Arg>(arg); } } void preTune() { arg.mom.save(); } void postTune() { arg.mom.load(); } long long flops() const { return (arg.count - arg.num_paths + 1) * 198ll * 2 * arg.mom.volumeCB * 4; } long long bytes() const { return ((arg.count + 1ll) * arg.u.Bytes() + 2ll*arg.mom.Bytes()) * 2 * arg.mom.volumeCB * 4; } TuneKey tuneKey() const { std::stringstream aux; char comm[5]; comm[0] = (commDimPartitioned(0) ? '1' : '0'); comm[1] = (commDimPartitioned(1) ? '1' : '0'); comm[2] = (commDimPartitioned(2) ? '1' : '0'); comm[3] = (commDimPartitioned(3) ? '1' : '0'); comm[4] = '\0'; aux << "comm=" << comm << ",threads=" << arg.threads << ",num_paths=" << arg.num_paths; return TuneKey(vol_str, typeid(*this).name(), aux.str().c_str()); } bool advanceBlockDim(TuneParam &param) const { dim3 block = param.block; dim3 grid = param.grid; bool rtn = TunableVectorY::advanceBlockDim(param); param.block.z = block.z; param.grid.z = grid.z; if (!rtn) { if (param.block.z < 4) { param.block.z *= 2; param.grid.z = 4 / param.block.z; rtn = true; } else { param.block.z = 1; param.grid.z = 4; rtn = false; } } return rtn; } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.block.z = 1; param.grid.z = 4; } void defaultTuneParam(TuneParam &param) const { TunableVectorY::defaultTuneParam(param); param.block.z = 1; param.grid.z = 4; } }; template <typename Float, typename Mom, typename Gauge> void gaugeForce(Mom mom, const Gauge &u, GaugeField& meta_mom, const GaugeField& meta_u, const double coeff, int ***input_path, const int* length_h, const double* path_coeff_h, const int num_paths, const int path_max_length) { size_t bytes = num_paths*path_max_length*sizeof(int); int *input_path_d[4]; int count = 0; for (int dir=0; dir<4; dir++) { input_path_d[dir] = (int*)pool_device_malloc(bytes); cudaMemset(input_path_d[dir], 0, bytes); int* input_path_h = (int*)safe_malloc(bytes); memset(input_path_h, 0, bytes); // flatten the input_path array for copying to the device for (int i=0; i < num_paths; i++) { for (int j=0; j < length_h[i]; j++) { input_path_h[i*path_max_length + j] = input_path[dir][i][j]; if (dir==0) count++; } } qudaMemcpy(input_path_d[dir], input_path_h, bytes, cudaMemcpyHostToDevice); host_free(input_path_h); } //length int* length_d = (int*)pool_device_malloc(num_paths*sizeof(int)); qudaMemcpy(length_d, length_h, num_paths*sizeof(int), cudaMemcpyHostToDevice); //path_coeff double* path_coeff_d = (double*)pool_device_malloc(num_paths*sizeof(double)); qudaMemcpy(path_coeff_d, path_coeff_h, num_paths*sizeof(double), cudaMemcpyHostToDevice); GaugeForceArg<Mom,Gauge> arg(mom, u, num_paths, path_max_length, coeff, input_path_d, length_d, path_coeff_d, count, meta_mom, meta_u); GaugeForce<Float,GaugeForceArg<Mom,Gauge> > gauge_force(arg, meta_mom, meta_u); gauge_force.apply(0); checkCudaError(); pool_device_free(length_d); pool_device_free(path_coeff_d); for (int dir=0; dir<4; dir++) pool_device_free(input_path_d[dir]); qudaDeviceSynchronize(); } template <typename Float> void gaugeForce(GaugeField& mom, const GaugeField& u, const double coeff, int ***input_path, const int* length, const double* path_coeff, const int num_paths, const int max_length) { if (mom.Reconstruct() != QUDA_RECONSTRUCT_10) errorQuda("Reconstruction type %d not supported", mom.Reconstruct()); if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) { typedef typename gauge::FloatNOrder<Float,18,2,11> M; if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); } else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; gaugeForce<Float,M,G>(M(mom), G(u), mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); } else { errorQuda("Reconstruction type %d not supported", u.Reconstruct()); } } else { errorQuda("Gauge Field order %d not supported", mom.Order()); } } #endif // GPU_GAUGE_FORCE void gaugeForce(GaugeField& mom, const GaugeField& u, double coeff, int ***input_path, int *length, double *path_coeff, int num_paths, int max_length) { #ifdef GPU_GAUGE_FORCE if (mom.Precision() != u.Precision()) errorQuda("Mixed precision not supported"); if (mom.Location() != u.Location()) errorQuda("Mixed field locations not supported"); switch(mom.Precision()) { case QUDA_DOUBLE_PRECISION: gaugeForce<double>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); break; case QUDA_SINGLE_PRECISION: gaugeForce<float>(mom, u, coeff, input_path, length, path_coeff, num_paths, max_length); break; default: errorQuda("Unsupported precision %d", mom.Precision()); } #else errorQuda("Gauge force has not been built"); #endif // GPU_GAUGE_FORCE } } // namespace quda
2f4b49033e7896491219182331ee7a8aefc336a4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define GIGABYTE 1000000000 struct entry { int origIndex; float xValue, yValue; };//entry int h_binarySearchLB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue <= val) left = mid + 1; else right = mid; }//while return left; }//binarySearchLB int h_binarySearchUB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue >= val) right = mid; else left = mid + 1; }//while return left; }//binarySearchUB __device__ int binarySearchLB(entry * data, float val, int n)//val is x val +/- tuning parameter { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue <= val) left = mid + 1; else right = mid; }//while return left; }//binarySearchLB __device__ int binarySearchUB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue >= val) right = mid; else left = mid + 1; }//while return left; }//binarySearchUB __global__ void kernel1(entry * array, int n, float h) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int lowerBound = binarySearchLB(array, array[idx].xValue-h, n);//binsearchlb device func int upperBound = binarySearchUB(array, array[idx].xValue+h, n);//ub is device func float avg = 0; //calculate y average for (int i=lowerBound; i<upperBound; i++) avg += array[i].yValue; avg = avg/((float) (upperBound-lowerBound)); //yValue stores the avg array[idx].yValue = avg; }//kernel1 __global__ void kernel2(entry * array, int n) { float avg = 0; for (int i=0; i<n; i++) avg += array[i].yValue; avg = avg / (float) n; array[0].yValue = avg; }//kernel2 void merge(entry * a, int low, int high) { int pivot = (low+high)/2; int i = 0; int j = low; int k = pivot+1; entry * temp = new entry[high-low+1]; while ((j <= pivot) && (k <= high)) { if (a[j].xValue < a[k].xValue) temp[i++] = a[j++]; else temp[i++] = a[k++]; }//while while (j <= pivot) temp[i++] = a[j++]; while (k <= high) temp[i++] = a[k++]; for (int h=low; h<= high; h++) a[h] = temp[h-low]; delete [] temp; }//merge void mergeSort(entry * a, int low, int high) { int pivot; if (low < high) { pivot = (low+high)/2; mergeSort(a, low, pivot); mergeSort(a, pivot+1, high); merge(a, low, high); }//if }//mergeSort void smoothc(float * x, float * y, float * m, int n, float h) { entry * array = new entry[n]; entry * deviceArray; int * countArray = new int[n];// should not be there, mem leak int blockSize = 1024;//num thread per block //creat array of structs for (int i=0; i<n; i++) { entry temp; temp.origIndex = i; temp.xValue = x[i]; temp.yValue = y[i]; array[i] = temp; }//for //sort by xValue mergeSort(array, 0, n-1); if (n < GIGABYTE/sizeof(entry))// if fits into 1 gig of mem hard code in line 5 { //put array onto device array hipMalloc(&deviceArray, sizeof(entry) * n); hipMemcpy(deviceArray, array, sizeof(entry) * n, hipMemcpyHostToDevice); dim3 dimBlock(blockSize); dim3 dimGrid(ceil(n/blockSize)); //stores smoothed average in yValue hipLaunchKernelGGL(( kernel1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceArray, n, h);//send to line 96 hipMemcpy(array, deviceArray, sizeof(entry) * n, hipMemcpyDeviceToHost); //rearrange array in original order for (int i=0; i<n; i++) m[array[i].origIndex] = array[i].yValue; hipFree(deviceArray); }//if else //have to chunk up data { int lb, ub; for (int i=0; i<n; i++) { lb = h_binarySearchLB(array, array[i].xValue-h, n); ub = h_binarySearchUB(array, array[i].xValue+h, n); entry * chunkArray = new entry[ub-lb]; for (int j=0; j<ub-lb; j++) chunkArray[j] = array[lb+j]; hipMalloc(&deviceArray, sizeof(entry) * (ub-lb)); hipMemcpy(deviceArray, chunkArray, sizeof(entry) * (ub-lb), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel2) , dim3(1), dim3(1) , 0, 0, deviceArray, ub-lb); hipMemcpy(chunkArray, deviceArray, sizeof(entry) * (ub-lb), hipMemcpyDeviceToHost); m[array[i].origIndex] = chunkArray[0].yValue;//store y avg hipFree(deviceArray); delete [] chunkArray; }//for }//else delete [] array; }//smoothc /* int main() { int n = 200; float * x = new float[n]; float * y = new float[n]; float * m = new float[n]; float h = 2; for (int i=0; i<n; i++) { x[i] = rand() % 100; y[i] = rand() % 100; }//for float x[20] = {1, 1,2,2, 3,3, 4,4, 5,5, 6,6, 7,7, 8,8, 9,9, 10,10}; float y[20] = {11,11, 12,12, 13,13, 14,14, 15,15, 16,16, 17,17, 18,18, 19,19, 20,20}; float m[20]; int n = 20; float h = 2; smoothc(x, y, m, n, h); // delete [] x; // delete [] y; // delete [] m; }//main */
2f4b49033e7896491219182331ee7a8aefc336a4.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define GIGABYTE 1000000000 struct entry { int origIndex; float xValue, yValue; };//entry int h_binarySearchLB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue <= val) left = mid + 1; else right = mid; }//while return left; }//binarySearchLB int h_binarySearchUB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue >= val) right = mid; else left = mid + 1; }//while return left; }//binarySearchUB __device__ int binarySearchLB(entry * data, float val, int n)//val is x val +/- tuning parameter { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue <= val) left = mid + 1; else right = mid; }//while return left; }//binarySearchLB __device__ int binarySearchUB(entry * data, float val, int n) { //return index of greatest leftmost xValue that is greater than val int left = 0; int right = n; int mid; while (left != right) { mid = (left+right)/2; if (data[mid].xValue >= val) right = mid; else left = mid + 1; }//while return left; }//binarySearchUB __global__ void kernel1(entry * array, int n, float h) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int lowerBound = binarySearchLB(array, array[idx].xValue-h, n);//binsearchlb device func int upperBound = binarySearchUB(array, array[idx].xValue+h, n);//ub is device func float avg = 0; //calculate y average for (int i=lowerBound; i<upperBound; i++) avg += array[i].yValue; avg = avg/((float) (upperBound-lowerBound)); //yValue stores the avg array[idx].yValue = avg; }//kernel1 __global__ void kernel2(entry * array, int n) { float avg = 0; for (int i=0; i<n; i++) avg += array[i].yValue; avg = avg / (float) n; array[0].yValue = avg; }//kernel2 void merge(entry * a, int low, int high) { int pivot = (low+high)/2; int i = 0; int j = low; int k = pivot+1; entry * temp = new entry[high-low+1]; while ((j <= pivot) && (k <= high)) { if (a[j].xValue < a[k].xValue) temp[i++] = a[j++]; else temp[i++] = a[k++]; }//while while (j <= pivot) temp[i++] = a[j++]; while (k <= high) temp[i++] = a[k++]; for (int h=low; h<= high; h++) a[h] = temp[h-low]; delete [] temp; }//merge void mergeSort(entry * a, int low, int high) { int pivot; if (low < high) { pivot = (low+high)/2; mergeSort(a, low, pivot); mergeSort(a, pivot+1, high); merge(a, low, high); }//if }//mergeSort void smoothc(float * x, float * y, float * m, int n, float h) { entry * array = new entry[n]; entry * deviceArray; int * countArray = new int[n];// should not be there, mem leak int blockSize = 1024;//num thread per block //creat array of structs for (int i=0; i<n; i++) { entry temp; temp.origIndex = i; temp.xValue = x[i]; temp.yValue = y[i]; array[i] = temp; }//for //sort by xValue mergeSort(array, 0, n-1); if (n < GIGABYTE/sizeof(entry))// if fits into 1 gig of mem hard code in line 5 { //put array onto device array cudaMalloc(&deviceArray, sizeof(entry) * n); cudaMemcpy(deviceArray, array, sizeof(entry) * n, cudaMemcpyHostToDevice); dim3 dimBlock(blockSize); dim3 dimGrid(ceil(n/blockSize)); //stores smoothed average in yValue kernel1 <<< dimGrid, dimBlock >>> (deviceArray, n, h);//send to line 96 cudaMemcpy(array, deviceArray, sizeof(entry) * n, cudaMemcpyDeviceToHost); //rearrange array in original order for (int i=0; i<n; i++) m[array[i].origIndex] = array[i].yValue; cudaFree(deviceArray); }//if else //have to chunk up data { int lb, ub; for (int i=0; i<n; i++) { lb = h_binarySearchLB(array, array[i].xValue-h, n); ub = h_binarySearchUB(array, array[i].xValue+h, n); entry * chunkArray = new entry[ub-lb]; for (int j=0; j<ub-lb; j++) chunkArray[j] = array[lb+j]; cudaMalloc(&deviceArray, sizeof(entry) * (ub-lb)); cudaMemcpy(deviceArray, chunkArray, sizeof(entry) * (ub-lb), cudaMemcpyHostToDevice); kernel2 <<< 1, 1 >>> (deviceArray, ub-lb); cudaMemcpy(chunkArray, deviceArray, sizeof(entry) * (ub-lb), cudaMemcpyDeviceToHost); m[array[i].origIndex] = chunkArray[0].yValue;//store y avg cudaFree(deviceArray); delete [] chunkArray; }//for }//else delete [] array; }//smoothc /* int main() { int n = 200; float * x = new float[n]; float * y = new float[n]; float * m = new float[n]; float h = 2; for (int i=0; i<n; i++) { x[i] = rand() % 100; y[i] = rand() % 100; }//for float x[20] = {1, 1,2,2, 3,3, 4,4, 5,5, 6,6, 7,7, 8,8, 9,9, 10,10}; float y[20] = {11,11, 12,12, 13,13, 14,14, 15,15, 16,16, 17,17, 18,18, 19,19, 20,20}; float m[20]; int n = 20; float h = 2; smoothc(x, y, m, n, h); // delete [] x; // delete [] y; // delete [] m; }//main */
4a859833c7984acef1bb66970eb8fc6141f60814.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2010-2012, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev * */ #include <pcl/gpu/people/tree.h> #include <pcl/gpu/people/label_common.h> #include <pcl/gpu/utils/safe_call.hpp> #include <pcl/gpu/utils/texture_binder.hpp> #include <stdio.h> #include <limits> #include <assert.h> #include "internal.h" using pcl::gpu::people::trees::Node; using pcl::gpu::people::trees::Label; using pcl::gpu::people::trees::AttribLocation; using pcl::gpu::people::trees::Attrib; using pcl::gpu::people::trees::focal; using pcl::gpu::people::trees::NUM_LABELS; using namespace std; using uint = unsigned int; #ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code #define __global__ #define __device__ #define __shared__ #define __forceinline__ #define __constant__ #define __float2int_rn #endif namespace pcl { namespace device { /** * \brief This combines two probabilities into a single according to their weight = p(d|{I,x}) **/ __global__ void KernelCUDA_CombineProb (PtrStepSz<prob_histogram> probIn1, float weight1, PtrStepSz<prob_histogram> probIn2, float weight2, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= probIn1.cols || v >= probIn1.rows ) return; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: replace this with a histogram copy probOut.ptr(v)[u].probs[l] = weight1 * probIn1.ptr(v)[u].probs[l] + weight2 * probIn2.ptr(v)[u].probs[l]; } } /** * \brief This sums a probabilities into a another according to its weight = p(d|{I,x}) **/ __global__ void KernelCUDA_WeightedSumProb (PtrStepSz<prob_histogram> probIn, float weight, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= probIn.cols || v >= probIn.rows ) return; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: replace this with a histogram copy probOut.ptr(v)[u].probs[l] += weight * probIn.ptr(v)[u].probs[l]; } } /** \brief This merges the histogram of probabilities into a final label **/ __global__ void KernelCUDA_SelectLabel (PtrStepSz<Label> labels, PtrStepSz<prob_histogram> Prob) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= labels.cols || v >= labels.rows ) return; float maxValue = 0; int maxID = 31; // 31 equals to NOLABEL in label_common.h for some reason not resolved here prob_histogram p = Prob.ptr(v)[u]; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: problem is that this one doesn't handle ties very well if(maxValue < p.probs[l]) { maxValue = p.probs[l]; maxID = l; } if(maxValue == p.probs[l]) { //DAMN WE HAVE A TIE //TODO: solve this //Workflow // 1) test if this is actually the largest value in the histogram // 2a) if not take the other one and continue // 2b) if it is, take the 1 neighbourhood } } labels.ptr(v)[u] = maxID; } /** * \brief Does Gaussian Blur in the horizontal row direction * \param[in] kernelSize needs to be odd! This should be fetched in the calling function before calling this method * TODO: replace this with OpenCV or NPP implementation **/ __global__ void KernelCUDA_GaussianBlurHor (PtrStepSz<prob_histogram> probIn, const float* kernel, const int kernelSize, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates a single pixel for each thread int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; // Skip when outside the image if( u >= probIn.cols || v >= probIn.rows ) return; // Do this for all the labels of this pixel for(int l = 0; l< NUM_LABELS; l++) { float sum = 0; // This contains the kernel convolution int j = 0; // This contains the offset in the kernel // KernelSize needs to be odd! This should be fetched in the calling function before calling this method for(int i = -__float2int_rn(kernelSize/2); i < __float2int_rn(kernelSize/2); i++) { // check if index goes outside image, pixels are skipped if((u+i) < 0 || (u+i) > probIn.cols) { j++; // skip to the next point } else { //int k = u+i; // This line fails, why?? sum += probIn.ptr(v)[u+i].probs[l] * kernel[j]; j++; } } probOut.ptr(v)[u].probs[l] = sum; //probOut.ptr(v)[u].probs[l] = probIn.ptr(v)[u].probs[l]; } } /** * \brief Does Gaussian Blur in the horizontal row direction * \param[in] kernelSize needs to be odd! This should be fetched in the calling function before calling this method * TODO: replace this with OpenCV or NPP implementation * **/ __global__ void KernelCUDA_GaussianBlurVer (PtrStepSz<prob_histogram> probIn, const float* kernel, const int kernelSize, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates a single pixel for each thread int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; // Skip when outside the image if( u >= probIn.cols || v >= probIn.rows ) return; // Do this for all the labels of this pixel for(int l = 0; l< NUM_LABELS; l++) { float sum = 0; // This contains the kernel convolution int j = 0; // This contains the offset in the kernel // KernelSize needs to be odd! This should be fetched in the calling function before calling this method for(int i = -__float2int_rn(kernelSize/2); i < __float2int_rn(kernelSize/2); i++) { // check if index goes outside image, pixels are skipped if((v+i) < 0 || (v+i) > probIn.rows) { j++; // skip to the next point } else { sum += probIn.ptr(v+i)[u].probs[l] * kernel[j]; j++; } } probOut.ptr(v)[u].probs[l] = sum; } } /** \brief This will merge the votes from the different trees into one final vote, including probabilistic's **/ void ProbabilityProc::CUDA_SelectLabel ( const Depth& depth, Labels& labels, LabelProbability& probabilities) { std::cout << "[pcl::device::ProbabilityProc::CUDA_SelectLabel] : (I) : Called" << std::endl; //labels.create(depth.rows(), depth.cols()); //probabilities.create(depth.rows(), depth.cols()); dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); hipLaunchKernelGGL(( KernelCUDA_SelectLabel), dim3(grid), dim3(block) , 0, 0, labels, probabilities ); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } /** \brief This will combine two probabilities according their weight **/ void ProbabilityProc::CUDA_CombineProb ( const Depth& depth, LabelProbability& probIn1, float weight1, LabelProbability& probIn2, float weight2, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); // CUDA kernel call hipLaunchKernelGGL(( KernelCUDA_CombineProb), dim3(grid), dim3(block) , 0, 0, probIn1, weight1, probIn2, weight2, probOut ); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } /** \brief This will combine two probabilities according their weight **/ void ProbabilityProc::CUDA_WeightedSumProb ( const Depth& depth, LabelProbability& probIn, float weight, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); // CUDA kernel call hipLaunchKernelGGL(( KernelCUDA_WeightedSumProb), dim3(grid), dim3(block) , 0, 0, probIn, weight, probOut ); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } /** \brief This will blur the input labelprobability with the given kernel **/ int ProbabilityProc::CUDA_GaussianBlur( const Depth& depth, LabelProbability& probIn, DeviceArray<float>& kernel, LabelProbability& probOut) { // Allocate the memory LabelProbability probTemp(depth.rows(), depth.cols()); // Call the method return CUDA_GaussianBlur(depth, probIn, kernel, probTemp, probOut); } /** \brief This will blur the input labelprobability with the given kernel, this version avoids extended allocation **/ int ProbabilityProc::CUDA_GaussianBlur( const Depth& depth, LabelProbability& probIn, DeviceArray<float>& kernel, LabelProbability& probTemp, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); if(kernel.size()/sizeof(float) % 2 == 0) //kernelSize is even, should be odd return -1; std::cout << "[pcl::device::ProbabilityProc::CUDA_GaussianBlur] : (I) : called c: " << probIn.cols() << " r: " << probIn.rows() << std::endl; //PCL_INFO("[pcl::device::ProbabilityProc::CUDA_GaussianBlur] : (I) : called c: %d r: %d\n", probIn.cols(), probIn.rows()); // CUDA kernel call Vertical hipLaunchKernelGGL(( KernelCUDA_GaussianBlurVer), dim3(grid), dim3(block) , 0, 0, probIn, kernel, kernel.size(), probTemp ); //KernelCUDA_GaussianBlurVer<<< grid, block >>>( probIn, kernel, kernel.size(), probOut ); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); // CUDA kernel call Horizontal hipLaunchKernelGGL(( KernelCUDA_GaussianBlurHor), dim3(grid), dim3(block) , 0, 0, probTemp, kernel, kernel.size(), probOut ); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); return 1; } } }
4a859833c7984acef1bb66970eb8fc6141f60814.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2010-2012, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev * */ #include <pcl/gpu/people/tree.h> #include <pcl/gpu/people/label_common.h> #include <pcl/gpu/utils/safe_call.hpp> #include <pcl/gpu/utils/texture_binder.hpp> #include <stdio.h> #include <limits> #include <assert.h> #include "internal.h" using pcl::gpu::people::trees::Node; using pcl::gpu::people::trees::Label; using pcl::gpu::people::trees::AttribLocation; using pcl::gpu::people::trees::Attrib; using pcl::gpu::people::trees::focal; using pcl::gpu::people::trees::NUM_LABELS; using namespace std; using uint = unsigned int; #ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code #define __global__ #define __device__ #define __shared__ #define __forceinline__ #define __constant__ #define __float2int_rn #endif namespace pcl { namespace device { /** * \brief This combines two probabilities into a single according to their weight = p(d|{I,x}) **/ __global__ void KernelCUDA_CombineProb (PtrStepSz<prob_histogram> probIn1, float weight1, PtrStepSz<prob_histogram> probIn2, float weight2, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= probIn1.cols || v >= probIn1.rows ) return; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: replace this with a histogram copy probOut.ptr(v)[u].probs[l] = weight1 * probIn1.ptr(v)[u].probs[l] + weight2 * probIn2.ptr(v)[u].probs[l]; } } /** * \brief This sums a probabilities into a another according to its weight = p(d|{I,x}) **/ __global__ void KernelCUDA_WeightedSumProb (PtrStepSz<prob_histogram> probIn, float weight, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= probIn.cols || v >= probIn.rows ) return; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: replace this with a histogram copy probOut.ptr(v)[u].probs[l] += weight * probIn.ptr(v)[u].probs[l]; } } /** \brief This merges the histogram of probabilities into a final label **/ __global__ void KernelCUDA_SelectLabel (PtrStepSz<Label> labels, PtrStepSz<prob_histogram> Prob) { // map block and thread onto image coordinates int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; if( u >= labels.cols || v >= labels.rows ) return; float maxValue = 0; int maxID = 31; // 31 equals to NOLABEL in label_common.h for some reason not resolved here prob_histogram p = Prob.ptr(v)[u]; for(int l = 0; l < NUM_LABELS; ++l) { // TODO: problem is that this one doesn't handle ties very well if(maxValue < p.probs[l]) { maxValue = p.probs[l]; maxID = l; } if(maxValue == p.probs[l]) { //DAMN WE HAVE A TIE //TODO: solve this //Workflow // 1) test if this is actually the largest value in the histogram // 2a) if not take the other one and continue // 2b) if it is, take the 1 neighbourhood } } labels.ptr(v)[u] = maxID; } /** * \brief Does Gaussian Blur in the horizontal row direction * \param[in] kernelSize needs to be odd! This should be fetched in the calling function before calling this method * TODO: replace this with OpenCV or NPP implementation **/ __global__ void KernelCUDA_GaussianBlurHor (PtrStepSz<prob_histogram> probIn, const float* kernel, const int kernelSize, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates a single pixel for each thread int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; // Skip when outside the image if( u >= probIn.cols || v >= probIn.rows ) return; // Do this for all the labels of this pixel for(int l = 0; l< NUM_LABELS; l++) { float sum = 0; // This contains the kernel convolution int j = 0; // This contains the offset in the kernel // KernelSize needs to be odd! This should be fetched in the calling function before calling this method for(int i = -__float2int_rn(kernelSize/2); i < __float2int_rn(kernelSize/2); i++) { // check if index goes outside image, pixels are skipped if((u+i) < 0 || (u+i) > probIn.cols) { j++; // skip to the next point } else { //int k = u+i; // This line fails, why?? sum += probIn.ptr(v)[u+i].probs[l] * kernel[j]; j++; } } probOut.ptr(v)[u].probs[l] = sum; //probOut.ptr(v)[u].probs[l] = probIn.ptr(v)[u].probs[l]; } } /** * \brief Does Gaussian Blur in the horizontal row direction * \param[in] kernelSize needs to be odd! This should be fetched in the calling function before calling this method * TODO: replace this with OpenCV or NPP implementation * **/ __global__ void KernelCUDA_GaussianBlurVer (PtrStepSz<prob_histogram> probIn, const float* kernel, const int kernelSize, PtrStepSz<prob_histogram> probOut) { // map block and thread onto image coordinates a single pixel for each thread int u = blockIdx.x * blockDim.x + threadIdx.x; int v = blockIdx.y * blockDim.y + threadIdx.y; // Skip when outside the image if( u >= probIn.cols || v >= probIn.rows ) return; // Do this for all the labels of this pixel for(int l = 0; l< NUM_LABELS; l++) { float sum = 0; // This contains the kernel convolution int j = 0; // This contains the offset in the kernel // KernelSize needs to be odd! This should be fetched in the calling function before calling this method for(int i = -__float2int_rn(kernelSize/2); i < __float2int_rn(kernelSize/2); i++) { // check if index goes outside image, pixels are skipped if((v+i) < 0 || (v+i) > probIn.rows) { j++; // skip to the next point } else { sum += probIn.ptr(v+i)[u].probs[l] * kernel[j]; j++; } } probOut.ptr(v)[u].probs[l] = sum; } } /** \brief This will merge the votes from the different trees into one final vote, including probabilistic's **/ void ProbabilityProc::CUDA_SelectLabel ( const Depth& depth, Labels& labels, LabelProbability& probabilities) { std::cout << "[pcl::device::ProbabilityProc::CUDA_SelectLabel] : (I) : Called" << std::endl; //labels.create(depth.rows(), depth.cols()); //probabilities.create(depth.rows(), depth.cols()); dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); KernelCUDA_SelectLabel<<< grid, block >>>( labels, probabilities ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } /** \brief This will combine two probabilities according their weight **/ void ProbabilityProc::CUDA_CombineProb ( const Depth& depth, LabelProbability& probIn1, float weight1, LabelProbability& probIn2, float weight2, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); // CUDA kernel call KernelCUDA_CombineProb<<< grid, block >>>( probIn1, weight1, probIn2, weight2, probOut ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } /** \brief This will combine two probabilities according their weight **/ void ProbabilityProc::CUDA_WeightedSumProb ( const Depth& depth, LabelProbability& probIn, float weight, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); // CUDA kernel call KernelCUDA_WeightedSumProb<<< grid, block >>>( probIn, weight, probOut ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } /** \brief This will blur the input labelprobability with the given kernel **/ int ProbabilityProc::CUDA_GaussianBlur( const Depth& depth, LabelProbability& probIn, DeviceArray<float>& kernel, LabelProbability& probOut) { // Allocate the memory LabelProbability probTemp(depth.rows(), depth.cols()); // Call the method return CUDA_GaussianBlur(depth, probIn, kernel, probTemp, probOut); } /** \brief This will blur the input labelprobability with the given kernel, this version avoids extended allocation **/ int ProbabilityProc::CUDA_GaussianBlur( const Depth& depth, LabelProbability& probIn, DeviceArray<float>& kernel, LabelProbability& probTemp, LabelProbability& probOut) { dim3 block(32, 8); dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) ); if(kernel.size()/sizeof(float) % 2 == 0) //kernelSize is even, should be odd return -1; std::cout << "[pcl::device::ProbabilityProc::CUDA_GaussianBlur] : (I) : called c: " << probIn.cols() << " r: " << probIn.rows() << std::endl; //PCL_INFO("[pcl::device::ProbabilityProc::CUDA_GaussianBlur] : (I) : called c: %d r: %d\n", probIn.cols(), probIn.rows()); // CUDA kernel call Vertical KernelCUDA_GaussianBlurVer<<< grid, block >>>( probIn, kernel, kernel.size(), probTemp ); //KernelCUDA_GaussianBlurVer<<< grid, block >>>( probIn, kernel, kernel.size(), probOut ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); // CUDA kernel call Horizontal KernelCUDA_GaussianBlurHor<<< grid, block >>>( probTemp, kernel, kernel.size(), probOut ); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); return 1; } } }
a7cb0b20041c23b7224ed2e79e06dabb9142db5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "cuda/components/sorting.cuh" #include <memory> #include <random> #include <gtest/gtest.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/executor.hpp> #include "cuda/test/utils.hpp" namespace { using gko::kernels::cuda::bitonic_sort; using gko::kernels::cuda::config; constexpr int num_elements = 2048; constexpr int num_local = 4; constexpr auto num_threads = num_elements / num_local; __global__ void test_sort_shared(gko::int32* data) { gko::int32 local[num_local]; __shared__ gko::int32 sh_local[num_elements]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<num_elements, num_local>(local, sh_local); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } __global__ void test_sort_warp(gko::int32* data) { gko::int32 local[num_local]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<config::warp_size * num_local, num_local>( local, static_cast<gko::int32*>(nullptr)); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } class Sorting : public ::testing::Test { protected: Sorting() : ref(gko::ReferenceExecutor::create()), cuda(gko::CudaExecutor::create(0, ref)), rng(123456), ref_shared(ref, num_elements), ref_warp(ref), ddata(cuda) { // we want some duplicate elements std::uniform_int_distribution<gko::int32> dist(0, num_elements / 2); for (int i = 0; i < num_elements; ++i) { ref_shared.get_data()[i] = dist(rng); } ddata = gko::Array<gko::int32>{cuda, ref_shared}; ref_warp = ref_shared; std::sort(ref_shared.get_data(), ref_shared.get_data() + num_elements); std::sort(ref_warp.get_data(), ref_warp.get_data() + (config::warp_size * num_local)); } std::shared_ptr<gko::ReferenceExecutor> ref; std::shared_ptr<gko::CudaExecutor> cuda; std::default_random_engine rng; gko::Array<gko::int32> ref_shared; gko::Array<gko::int32> ref_warp; gko::Array<gko::int32> ddata; }; TEST_F(Sorting, CudaBitonicSortWarp) { hipLaunchKernelGGL(( test_sort_warp), dim3(1), dim3(config::warp_size), 0, 0, ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_warp.get_const_data(); ASSERT_TRUE(std::equal(data_ptr, data_ptr + (num_local * config::warp_size), ref_ptr)); } TEST_F(Sorting, CudaBitonicSortShared) { hipLaunchKernelGGL(( test_sort_shared), dim3(1), dim3(num_threads), 0, 0, ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_shared.get_const_data(); ASSERT_TRUE(std::equal(data_ptr, data_ptr + num_elements, ref_ptr)); } } // namespace
a7cb0b20041c23b7224ed2e79e06dabb9142db5b.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "cuda/components/sorting.cuh" #include <memory> #include <random> #include <gtest/gtest.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/executor.hpp> #include "cuda/test/utils.hpp" namespace { using gko::kernels::cuda::bitonic_sort; using gko::kernels::cuda::config; constexpr int num_elements = 2048; constexpr int num_local = 4; constexpr auto num_threads = num_elements / num_local; __global__ void test_sort_shared(gko::int32* data) { gko::int32 local[num_local]; __shared__ gko::int32 sh_local[num_elements]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<num_elements, num_local>(local, sh_local); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } __global__ void test_sort_warp(gko::int32* data) { gko::int32 local[num_local]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<config::warp_size * num_local, num_local>( local, static_cast<gko::int32*>(nullptr)); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } class Sorting : public ::testing::Test { protected: Sorting() : ref(gko::ReferenceExecutor::create()), cuda(gko::CudaExecutor::create(0, ref)), rng(123456), ref_shared(ref, num_elements), ref_warp(ref), ddata(cuda) { // we want some duplicate elements std::uniform_int_distribution<gko::int32> dist(0, num_elements / 2); for (int i = 0; i < num_elements; ++i) { ref_shared.get_data()[i] = dist(rng); } ddata = gko::Array<gko::int32>{cuda, ref_shared}; ref_warp = ref_shared; std::sort(ref_shared.get_data(), ref_shared.get_data() + num_elements); std::sort(ref_warp.get_data(), ref_warp.get_data() + (config::warp_size * num_local)); } std::shared_ptr<gko::ReferenceExecutor> ref; std::shared_ptr<gko::CudaExecutor> cuda; std::default_random_engine rng; gko::Array<gko::int32> ref_shared; gko::Array<gko::int32> ref_warp; gko::Array<gko::int32> ddata; }; TEST_F(Sorting, CudaBitonicSortWarp) { test_sort_warp<<<1, config::warp_size>>>(ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_warp.get_const_data(); ASSERT_TRUE(std::equal(data_ptr, data_ptr + (num_local * config::warp_size), ref_ptr)); } TEST_F(Sorting, CudaBitonicSortShared) { test_sort_shared<<<1, num_threads>>>(ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_shared.get_const_data(); ASSERT_TRUE(std::equal(data_ptr, data_ptr + num_elements, ref_ptr)); } } // namespace
90daad2ae350ccfdf00a4345248964fb4b0ab7a5.hip
// !!! This is a file automatically generated by hipify!!! #include "head.h" int k_t = (tfinal/dt); float *Ax, *Ay, *V1, *V2, *W, *F, *V_tmp, *W_tmp, *ue; float *b, *x, *y, *ut, *Vt; float *t; //GPU variable float *d_Ax, *d_Ay, *d_V1, *d_V2; float *d_b, *d_ut, *d_Vt; //GPU tam variable float *d_a_tam, *d_b_tam,*d_c_tam; float *d_c_new_tam,*d_d_new_tam; //GPU RK variable float *d_F, *d_x, *d_y, *d_V_tmp; float *d_t; void Allocate(){ size_t size; size = Np*Np*sizeof(float); Ax = (float*)malloc(size); Ay = (float*)malloc(size); V1 = (float*)malloc(size); V2 = (float*)malloc(size); W = (float*)malloc(size); F = (float*)malloc(size); V_tmp = (float*)malloc(size); W_tmp = (float*)malloc(size); ue = (float*)malloc(size); hipError_t Error; Error = hipMalloc((void**)&d_Ax, size); if(Error != hipSuccess) printf("CUDA error(malloc d_Ax) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_Ay, size); if(Error != hipSuccess) printf("CUDA error(malloc d_Ay) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_V1, size); if(Error != hipSuccess) printf("CUDA error(malloc d_V1) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_V2, size); if(Error != hipSuccess) printf("CUDA error(malloc d_V2) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_V_tmp, size); if(Error != hipSuccess) printf("CUDA error(malloc d_V_tmp) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_F, size); if(Error != hipSuccess) printf("CUDA error(malloc d_F) = %s\n", hipGetErrorString(Error)); size = Np*sizeof(float); b = (float*)malloc(size); x = (float*)malloc(size); y = (float*)malloc(size); ut = (float*)malloc(size); Vt = (float*)malloc(size); Error = hipMalloc((void**)&d_b, size); if(Error != hipSuccess) printf("CUDA error(malloc d_b) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_x, size); if(Error != hipSuccess) printf("CUDA error(malloc d_x) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_y, size); if(Error != hipSuccess) printf("CUDA error(malloc d_y) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_ut, size); if(Error != hipSuccess) printf("CUDA error(malloc d_ut) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_Vt, size); if(Error != hipSuccess) printf("CUDA error(malloc d_Vt) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_a_tam, size); if(Error != hipSuccess) printf("CUDA error(malloc d_a_tam) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_b_tam, size); if(Error != hipSuccess) printf("CUDA error(malloc d_b_tam) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_c_tam, size); if(Error != hipSuccess) printf("CUDA error(malloc d_c_tam) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_c_new_tam, size); if(Error != hipSuccess) printf("CUDA error(malloc d_c_new_tam) = %s\n", hipGetErrorString(Error)); Error = hipMalloc((void**)&d_d_new_tam, size); if(Error != hipSuccess) printf("CUDA error(malloc d_d_new_tam) = %s\n", hipGetErrorString(Error)); size = 1*sizeof(float); t = (float*)malloc(size); Error = hipMalloc((void**)&d_t, size); if(Error != hipSuccess) printf("CUDA error(malloc d_t) = %s\n", hipGetErrorString(Error)); } void Save_Result(){ FILE *pFile; int i,j; int index; int n; n = Np; pFile = fopen("V1.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V1[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); pFile = fopen("V2.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V2[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); /* pFile = fopen("V_tmp.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V_tmp[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); pFile = fopen("b.txt","w+"); for (i = 0; i < n; i++) { fprintf(pFile, "%g", b[i]); fprintf(pFile, "\t"); } fclose(pFile); pFile = fopen("x.txt","w+"); for (i = 0; i < n; i++) { fprintf(pFile, "%g", x[i]); fprintf(pFile, "\t"); } fclose(pFile); */ } void Free(){ free(Ax);free(Ay);free(V1);free(V2); free(W);free(F);free(V_tmp);free(W_tmp);free(ue); free(b);free(x);free(y);free(ut);free(Vt); free(t); hipFree(d_Ax);hipFree(d_Ay);hipFree(d_V1);hipFree(d_V2); hipFree(d_V_tmp);hipFree(d_F); hipFree(d_t); hipFree(d_b);hipFree(d_x);hipFree(d_y);hipFree(d_Vt);hipFree(d_ut); hipFree(d_a_tam);hipFree(d_b_tam);hipFree(d_c_tam); hipFree(d_c_new_tam);hipFree(d_d_new_tam); } void Send_to_Device(){ hipError_t Error; size_t size; size = Np*Np*sizeof(float); Error = hipMemcpy(d_V1, V1, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy V1->d_V1) = %s\n",hipGetErrorString(Error)); /* Error = hipMemcpy(d_V2, V2, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy V2->d_V2) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_Ax, Ax, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy Ax->d_Ax) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_Ay, Ay, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy Ay->d_Ay) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_F, F, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy F->d_F) = %s\n",hipGetErrorString(Error)); */ size = Np*sizeof(float); Error = hipMemcpy(d_x, x, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy x->d_x) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_y, y, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy y->d_y) = %s\n",hipGetErrorString(Error)); size = 1*sizeof(float); Error = hipMemcpy(d_t, t, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy t->d_t) = %s\n",hipGetErrorString(Error)); /* size = Np*sizeof(float); Error = hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy b->d_b) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_ut, ut, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy ut->d_ut) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(d_Vt, Vt, size, hipMemcpyHostToDevice); if (Error != hipSuccess) printf("CUDA error(copy Vt->d_Vt) = %s\n",hipGetErrorString(Error)); */ } void Send_to_Host(){ hipError_t Error; size_t size; size = Np*Np*sizeof(float); Error = hipMemcpy(V1, d_V1, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_V1->V1) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(V2, d_V2, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_V2->V2) = %s\n",hipGetErrorString(Error)); /* Error = hipMemcpy(F, d_F, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_F->F) = %s\n",hipGetErrorString(Error)); size = 1*sizeof(float); Error = hipMemcpy(t, d_t, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_t->t) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(V_tmp, d_V_tmp, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_V_tmp->V_tmp) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(Ax, d_Ax, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_Ax->Ax) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(Ay, d_Ay, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_Ay->Ay) = %s\n",hipGetErrorString(Error)); size = Np*sizeof(float); Error = hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_b->b) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(ut, d_ut, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_ut->ut) = %s\n",hipGetErrorString(Error)); Error = hipMemcpy(Vt, d_Vt, size, hipMemcpyDeviceToHost); if (Error != hipSuccess) printf("CUDA error(copy d_Vt->Vt) = %s\n",hipGetErrorString(Error)); */ }
90daad2ae350ccfdf00a4345248964fb4b0ab7a5.cu
#include "head.h" int k_t = (tfinal/dt); float *Ax, *Ay, *V1, *V2, *W, *F, *V_tmp, *W_tmp, *ue; float *b, *x, *y, *ut, *Vt; float *t; //GPU variable float *d_Ax, *d_Ay, *d_V1, *d_V2; float *d_b, *d_ut, *d_Vt; //GPU tam variable float *d_a_tam, *d_b_tam,*d_c_tam; float *d_c_new_tam,*d_d_new_tam; //GPU RK variable float *d_F, *d_x, *d_y, *d_V_tmp; float *d_t; void Allocate(){ size_t size; size = Np*Np*sizeof(float); Ax = (float*)malloc(size); Ay = (float*)malloc(size); V1 = (float*)malloc(size); V2 = (float*)malloc(size); W = (float*)malloc(size); F = (float*)malloc(size); V_tmp = (float*)malloc(size); W_tmp = (float*)malloc(size); ue = (float*)malloc(size); cudaError_t Error; Error = cudaMalloc((void**)&d_Ax, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_Ax) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_Ay, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_Ay) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_V1, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_V1) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_V2, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_V2) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_V_tmp, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_V_tmp) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_F, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_F) = %s\n", cudaGetErrorString(Error)); size = Np*sizeof(float); b = (float*)malloc(size); x = (float*)malloc(size); y = (float*)malloc(size); ut = (float*)malloc(size); Vt = (float*)malloc(size); Error = cudaMalloc((void**)&d_b, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_b) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_x, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_x) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_y, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_y) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_ut, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_ut) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_Vt, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_Vt) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_a_tam, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_a_tam) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_b_tam, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_b_tam) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_c_tam, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_c_tam) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_c_new_tam, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_c_new_tam) = %s\n", cudaGetErrorString(Error)); Error = cudaMalloc((void**)&d_d_new_tam, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_d_new_tam) = %s\n", cudaGetErrorString(Error)); size = 1*sizeof(float); t = (float*)malloc(size); Error = cudaMalloc((void**)&d_t, size); if(Error != cudaSuccess) printf("CUDA error(malloc d_t) = %s\n", cudaGetErrorString(Error)); } void Save_Result(){ FILE *pFile; int i,j; int index; int n; n = Np; pFile = fopen("V1.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V1[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); pFile = fopen("V2.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V2[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); /* pFile = fopen("V_tmp.txt","w+"); // Save the matrix A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { index = i*n + j; fprintf(pFile, "%g", V_tmp[index]); if (j == (n-1)) { fprintf(pFile, "\n"); }else{ fprintf(pFile, "\t"); } } } fclose(pFile); pFile = fopen("b.txt","w+"); for (i = 0; i < n; i++) { fprintf(pFile, "%g", b[i]); fprintf(pFile, "\t"); } fclose(pFile); pFile = fopen("x.txt","w+"); for (i = 0; i < n; i++) { fprintf(pFile, "%g", x[i]); fprintf(pFile, "\t"); } fclose(pFile); */ } void Free(){ free(Ax);free(Ay);free(V1);free(V2); free(W);free(F);free(V_tmp);free(W_tmp);free(ue); free(b);free(x);free(y);free(ut);free(Vt); free(t); cudaFree(d_Ax);cudaFree(d_Ay);cudaFree(d_V1);cudaFree(d_V2); cudaFree(d_V_tmp);cudaFree(d_F); cudaFree(d_t); cudaFree(d_b);cudaFree(d_x);cudaFree(d_y);cudaFree(d_Vt);cudaFree(d_ut); cudaFree(d_a_tam);cudaFree(d_b_tam);cudaFree(d_c_tam); cudaFree(d_c_new_tam);cudaFree(d_d_new_tam); } void Send_to_Device(){ cudaError_t Error; size_t size; size = Np*Np*sizeof(float); Error = cudaMemcpy(d_V1, V1, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy V1->d_V1) = %s\n",cudaGetErrorString(Error)); /* Error = cudaMemcpy(d_V2, V2, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy V2->d_V2) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_Ax, Ax, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy Ax->d_Ax) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_Ay, Ay, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy Ay->d_Ay) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_F, F, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy F->d_F) = %s\n",cudaGetErrorString(Error)); */ size = Np*sizeof(float); Error = cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy x->d_x) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy y->d_y) = %s\n",cudaGetErrorString(Error)); size = 1*sizeof(float); Error = cudaMemcpy(d_t, t, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy t->d_t) = %s\n",cudaGetErrorString(Error)); /* size = Np*sizeof(float); Error = cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy b->d_b) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_ut, ut, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy ut->d_ut) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(d_Vt, Vt, size, cudaMemcpyHostToDevice); if (Error != cudaSuccess) printf("CUDA error(copy Vt->d_Vt) = %s\n",cudaGetErrorString(Error)); */ } void Send_to_Host(){ cudaError_t Error; size_t size; size = Np*Np*sizeof(float); Error = cudaMemcpy(V1, d_V1, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_V1->V1) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(V2, d_V2, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_V2->V2) = %s\n",cudaGetErrorString(Error)); /* Error = cudaMemcpy(F, d_F, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_F->F) = %s\n",cudaGetErrorString(Error)); size = 1*sizeof(float); Error = cudaMemcpy(t, d_t, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_t->t) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(V_tmp, d_V_tmp, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_V_tmp->V_tmp) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(Ax, d_Ax, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_Ax->Ax) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(Ay, d_Ay, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_Ay->Ay) = %s\n",cudaGetErrorString(Error)); size = Np*sizeof(float); Error = cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_b->b) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(ut, d_ut, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_ut->ut) = %s\n",cudaGetErrorString(Error)); Error = cudaMemcpy(Vt, d_Vt, size, cudaMemcpyDeviceToHost); if (Error != cudaSuccess) printf("CUDA error(copy d_Vt->Vt) = %s\n",cudaGetErrorString(Error)); */ }
f805ab78c6974c01324c690ccf6a5c0000a46a9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> #include <GL/glut.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "kernel.h" #include "util.h" #include "gui.h" extern struct options opt; extern bool *map; extern bool *map_d; extern bool *updated_map_d; // Globales declaradas en game.cpp __device__ unsigned int mod(int a, int m) { int r = a % m; while (r < 0) { r += m; } return r; } // Actualizacion del mapa en la GPU. Tiene dos fases, separadas por syncthreads: // 1 - Copiar en una matriz compartida por bloque las celdas correspondientes a cada // hilo y el "marco" que envuelve al bloque. As evitamos leer repetidamente de // memoria global. // 2 - Cada hilo, usando la copia en memoria compartida, analiza lo que tiene alrededor // y guarda su estado en la matriz de resultados. // No podemos sincronizar entre bloques, y un bloque podra pisar el mapa antes de que // los hilos de otro pudiesen calcular su estado, por lo que usamos una segunda matriz // para guardar el resultado. __global__ void update_map_d(bool *map_d, bool *updated_map_d, const unsigned int rows, const unsigned int cols, const unsigned int block_width, const unsigned int block_height) { unsigned int alive = 0; bool status; int i, j; const unsigned int shared_rows = block_height + 2; const unsigned int shared_cols = block_width + 2; extern __shared__ float shared_tile_ptr[]; bool *shared_tile = (bool *)shared_tile_ptr; status = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; shared_tile[(threadIdx.y + 1) * shared_cols + threadIdx.x + 1] = status; // Copiamos a la matriz compartida las celdas correspondientes al bloque y su entorno if (threadIdx.x == 0) { shared_tile[(threadIdx.y + 1) * shared_cols] = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == block_width - 1) { shared_tile[(threadIdx.y + 2) * shared_cols - 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } // Columna izquierda o derecha del borde del bloque? if (threadIdx.y == 0) { shared_tile[threadIdx.x + 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; } else if (threadIdx.y == block_height - 1) { shared_tile[(threadIdx.y + 2) * shared_cols + threadIdx.x + 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; } // Fila superior o inferior del borde del bloque? if (threadIdx.x == 0 && threadIdx.y == 0) { shared_tile[0] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == 0 && threadIdx.y == block_height - 1) { shared_tile[shared_cols * (shared_rows - 1)] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == block_width - 1 && threadIdx.y == 0) { shared_tile[threadIdx.x + 2] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } else if (threadIdx.x == block_width - 1 && threadIdx.y == block_height - 1) { shared_tile[shared_rows * shared_cols - 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } // Alguna de las esquinas del bloque? __syncthreads(); for (i = -1; i < 2; i++) { for (j = -1; j < 2; j++) { if (shared_tile[(threadIdx.y + 1 + j) * shared_cols + threadIdx.x + 1 + i] == true) { alive++; } } } if (status == true) { // Una celda viva sobrevive si no tiene 2 o 3 vivas alrededor. alive--; // Habiamos contado la propia celda if (alive != 2 && alive != 3) { status = false; } } else { // Una celda muerta revive si tiene 3 vivas alrededor if (alive == 3) { status = true; } } updated_map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)] = status; } // Actualizar el mapa desde la CPU, llama a la GPU. void update_map(int foo) { hipError_t ret; bool *tmp; if (opt.paused == false) { // En modo GUI esta funcion se llama cada cierto tiempo, por lo que hay que ver si se // esta pausado para no hacer nada. update_map_d << <opt.gridSize, opt.blockSize, (opt.blockSize.x + 2) * (opt.blockSize.y + 2) * sizeof(bool) >> >( map_d, updated_map_d, opt.rows, opt.cols, opt.blockSize.x, opt.blockSize.y); ret = hipDeviceSynchronize(); checkCudaRet(ret, "update_map_d launch", __FILE__, __LINE__); ret = hipMemcpy(map, updated_map_d, opt.rows * opt.cols * sizeof(bool), hipMemcpyDeviceToHost); checkCudaRet(ret, "cudaMemCpy", __FILE__, __LINE__); tmp = map_d; map_d = updated_map_d; updated_map_d = tmp; // Intercambiamos map_d y updated_map_d, el mapa generado en una ronda sera la base de la siguiente. if (opt.gui == true) { glutPostRedisplay(); } // Tras actualizar la matriz, refrescamos el lienzo en modo grafico. if (opt.automatic == true) { glutTimerFunc(opt.period, update_map, 0); } // Si estamos en modo automatico, hacemos que se vuelva a llamar pasado el periodo } }
f805ab78c6974c01324c690ccf6a5c0000a46a9b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> #include <GL/glut.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "kernel.h" #include "util.h" #include "gui.h" extern struct options opt; extern bool *map; extern bool *map_d; extern bool *updated_map_d; // Globales declaradas en game.cpp __device__ unsigned int mod(int a, int m) { int r = a % m; while (r < 0) { r += m; } return r; } // Actualizacion del mapa en la GPU. Tiene dos fases, separadas por syncthreads: // 1 - Copiar en una matriz compartida por bloque las celdas correspondientes a cada // hilo y el "marco" que envuelve al bloque. Así evitamos leer repetidamente de // memoria global. // 2 - Cada hilo, usando la copia en memoria compartida, analiza lo que tiene alrededor // y guarda su estado en la matriz de resultados. // No podemos sincronizar entre bloques, y un bloque podría pisar el mapa antes de que // los hilos de otro pudiesen calcular su estado, por lo que usamos una segunda matriz // para guardar el resultado. __global__ void update_map_d(bool *map_d, bool *updated_map_d, const unsigned int rows, const unsigned int cols, const unsigned int block_width, const unsigned int block_height) { unsigned int alive = 0; bool status; int i, j; const unsigned int shared_rows = block_height + 2; const unsigned int shared_cols = block_width + 2; extern __shared__ float shared_tile_ptr[]; bool *shared_tile = (bool *)shared_tile_ptr; status = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; shared_tile[(threadIdx.y + 1) * shared_cols + threadIdx.x + 1] = status; // Copiamos a la matriz compartida las celdas correspondientes al bloque y su entorno if (threadIdx.x == 0) { shared_tile[(threadIdx.y + 1) * shared_cols] = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == block_width - 1) { shared_tile[(threadIdx.y + 2) * shared_cols - 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } // Columna izquierda o derecha del borde del bloque? if (threadIdx.y == 0) { shared_tile[threadIdx.x + 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; } else if (threadIdx.y == block_height - 1) { shared_tile[(threadIdx.y + 2) * shared_cols + threadIdx.x + 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)]; } // Fila superior o inferior del borde del bloque? if (threadIdx.x == 0 && threadIdx.y == 0) { shared_tile[0] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == 0 && threadIdx.y == block_height - 1) { shared_tile[shared_cols * (shared_rows - 1)] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x - 1, cols)]; } else if (threadIdx.x == block_width - 1 && threadIdx.y == 0) { shared_tile[threadIdx.x + 2] = map_d[mod((blockIdx.y * block_height + threadIdx.y - 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } else if (threadIdx.x == block_width - 1 && threadIdx.y == block_height - 1) { shared_tile[shared_rows * shared_cols - 1] = map_d[mod((blockIdx.y * block_height + threadIdx.y + 1), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x + 1, cols)]; } // Alguna de las esquinas del bloque? __syncthreads(); for (i = -1; i < 2; i++) { for (j = -1; j < 2; j++) { if (shared_tile[(threadIdx.y + 1 + j) * shared_cols + threadIdx.x + 1 + i] == true) { alive++; } } } if (status == true) { // Una celda viva sobrevive si no tiene 2 o 3 vivas alrededor. alive--; // Habiamos contado la propia celda if (alive != 2 && alive != 3) { status = false; } } else { // Una celda muerta revive si tiene 3 vivas alrededor if (alive == 3) { status = true; } } updated_map_d[mod((blockIdx.y * block_height + threadIdx.y), rows) * cols + mod(blockIdx.x * block_width + threadIdx.x, cols)] = status; } // Actualizar el mapa desde la CPU, llama a la GPU. void update_map(int foo) { cudaError_t ret; bool *tmp; if (opt.paused == false) { // En modo GUI esta funcion se llama cada cierto tiempo, por lo que hay que ver si se // esta pausado para no hacer nada. update_map_d << <opt.gridSize, opt.blockSize, (opt.blockSize.x + 2) * (opt.blockSize.y + 2) * sizeof(bool) >> >( map_d, updated_map_d, opt.rows, opt.cols, opt.blockSize.x, opt.blockSize.y); ret = cudaDeviceSynchronize(); checkCudaRet(ret, "update_map_d launch", __FILE__, __LINE__); ret = cudaMemcpy(map, updated_map_d, opt.rows * opt.cols * sizeof(bool), cudaMemcpyDeviceToHost); checkCudaRet(ret, "cudaMemCpy", __FILE__, __LINE__); tmp = map_d; map_d = updated_map_d; updated_map_d = tmp; // Intercambiamos map_d y updated_map_d, el mapa generado en una ronda sera la base de la siguiente. if (opt.gui == true) { glutPostRedisplay(); } // Tras actualizar la matriz, refrescamos el lienzo en modo grafico. if (opt.automatic == true) { glutTimerFunc(opt.period, update_map, 0); } // Si estamos en modo automatico, hacemos que se vuelva a llamar pasado el periodo } }
10d56cf545c069c2c283bd0b5159910794a63973.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_unitests.cu * * @brief Main test driver for all googletests. * @source * https://github.com/google/googletest/blob/master/googletest/docs/Primer.md */ #include <stdio.h> #include <gunrock/gunrock.h> #include <gunrock/app/hello/hello_app.cu> #include <gunrock/app/test_base.cuh> #include <gtest/gtest.h> /** * @brief: Gunrock: Google tests -- list of tests * found in this directory, testing core functionality * of gunrock: primitives, operators, device intrinsics, * etc. * */ // bug:: malloc_consolidate(): invalid chunk size //#include "test_lib_pr.h" // Tests Subgraph Matching #include "test_lib_sm.h" // Tests the RepeatFor Operator #include "test_repeatfor.h" // Tests Segmented Reduction (device) #include "test_segreduce.h" // Tests Binary Search #include "test_binarysearch.h" #include "test_pointer_location.h" using namespace gunrock; hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(parameters.Use<bool>("googletest", util::OPTIONAL_PARAMETER, true, "Example parameter for googletest", __FILE__, __LINE__)); return retval; } /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return hipError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type hipError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { // CLI parameters bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); hipError_t retval = hipSuccess; return retval; } }; int main(int argc, char **argv) { hipError_t retval = hipSuccess; util::Parameters parameters("test unittests"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(UseParameters(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return hipSuccess; } // Run all tests using the google tests // framework. ::testing::InitGoogleTest(&argc, argv); RUN_ALL_TESTS(); return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F32B | // app::VALUET_F64B | app::DIRECTED | app::UNDIRECTED>(parameters, main_struct()); }
10d56cf545c069c2c283bd0b5159910794a63973.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_unitests.cu * * @brief Main test driver for all googletests. * @source * https://github.com/google/googletest/blob/master/googletest/docs/Primer.md */ #include <stdio.h> #include <gunrock/gunrock.h> #include <gunrock/app/hello/hello_app.cu> #include <gunrock/app/test_base.cuh> #include <gtest/gtest.h> /** * @brief: Gunrock: Google tests -- list of tests * found in this directory, testing core functionality * of gunrock: primitives, operators, device intrinsics, * etc. * */ // bug:: malloc_consolidate(): invalid chunk size //#include "test_lib_pr.h" // Tests Subgraph Matching #include "test_lib_sm.h" // Tests the RepeatFor Operator #include "test_repeatfor.h" // Tests Segmented Reduction (device) #include "test_segreduce.h" // Tests Binary Search #include "test_binarysearch.h" #include "test_pointer_location.h" using namespace gunrock; cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(parameters.Use<bool>("googletest", util::OPTIONAL_PARAMETER, true, "Example parameter for googletest", __FILE__, __LINE__)); return retval; } /****************************************************************************** * Main ******************************************************************************/ /** * @brief Enclosure to the main function */ struct main_struct { /** * @brief the actual main function, after type switching * @tparam VertexT Type of vertex identifier * @tparam SizeT Type of graph size, i.e. type of edge identifier * @tparam ValueT Type of edge values * @param parameters Command line parameters * @param v,s,val Place holders for type deduction * \return cudaError_t error message(s), if any */ template <typename VertexT, // Use int as the vertex identifier typename SizeT, // Use int as the graph size type typename ValueT> // Use int as the value type cudaError_t operator()(util::Parameters &parameters, VertexT v, SizeT s, ValueT val) { // CLI parameters bool quick = parameters.Get<bool>("quick"); bool quiet = parameters.Get<bool>("quiet"); cudaError_t retval = cudaSuccess; return retval; } }; int main(int argc, char **argv) { cudaError_t retval = cudaSuccess; util::Parameters parameters("test unittests"); GUARD_CU(graphio::UseParameters(parameters)); GUARD_CU(app::UseParameters_test(parameters)); GUARD_CU(UseParameters(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } // Run all tests using the google tests // framework. ::testing::InitGoogleTest(&argc, argv); RUN_ALL_TESTS(); return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B | app::SIZET_U32B | // app::SIZET_U64B | app::VALUET_F32B | // app::VALUET_F64B | app::DIRECTED | app::UNDIRECTED>(parameters, main_struct()); }
calculate_energy.hip
// !!! This is a file automatically generated by hipify!!! #include "calculate_energy.cuh" #include <numeric> #include <vector> #include <hip/hip_runtime.h> #include <iostream> #include <cmath> #include "cuda_utils.cuh" #include "rocblas.h" /** * Calculates the energy of Phi, and reduces it along the gamma axis. Result is stored as a 2D image. */ __global__ void computeenergykernel(const View3D<float> phi, const View3D<float> rho, float step_gamma, float *en) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; auto dims = phi.dims(); int w = dims.x; int h = dims.y; int nblayers = dims.z; if (x < w && y < h) { float xgrad = 0; float ygrad = 0; float ggrad = 0; int ix = x + 1; int iy = y + 1; int iz; float tphi; float sum = 0; int temp; temp = min(x, nblayers); for (int z = 0; z < temp; z++) { iz = z + 1; tphi = phi(x, y, z); if (x < w - 1) { xgrad = phi(ix, y, z) - tphi; } if (y < h - 1) { ygrad = phi(x, iy, z) - tphi; } if (z < nblayers - 1) { ggrad = (phi(x, y, iz) - tphi) / step_gamma; } sum = sum + sqrt(xgrad * xgrad + ygrad * ygrad) + rho(x, y, z) * fabs(ggrad); xgrad = 0; ygrad = 0; ggrad = 0; } en[x + w * y] = sum; } } /** * Calculates the energy of Phi and scales it by the number of pixels of Phi. * The scaling makes the resulting energy independent of the image size and value of gamma max. */ float calculate_energy(const View3D<float> &phi, const View3D<float> &rho, float step_gamma) { auto dims = phi.dims(); size_t w = dims.x; size_t h = dims.y; float *d_energy = NULL; hipMalloc(&d_energy, w * h * sizeof(float)); CUDA_CHECK; dim3 block(32, 8, 1); auto grid = computeGrid2D(block, w, h); hipLaunchKernelGGL(( computeenergykernel), dim3(grid), dim3(block), 0, 0, phi, rho, step_gamma, d_energy); CUDA_CHECK; hipDeviceSynchronize(); // Reduction of the remaining 2D image using CublasSaSum float sum = 0; int n = w * h; hipblasHandle_t handle; hipblasStatus_t s1; s1 = hipblasCreate(&handle); float b1; s1 = hipblasSasum(handle, n, d_energy, 1, &b1); sum = b1; hipFree(d_energy); return sum / (dims.x * dims.y * dims.z); }
calculate_energy.cu
#include "calculate_energy.cuh" #include <numeric> #include <vector> #include <cuda_runtime.h> #include <iostream> #include <cmath> #include "cuda_utils.cuh" #include "cublas_v2.h" /** * Calculates the energy of Phi, and reduces it along the gamma axis. Result is stored as a 2D image. */ __global__ void computeenergykernel(const View3D<float> phi, const View3D<float> rho, float step_gamma, float *en) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; auto dims = phi.dims(); int w = dims.x; int h = dims.y; int nblayers = dims.z; if (x < w && y < h) { float xgrad = 0; float ygrad = 0; float ggrad = 0; int ix = x + 1; int iy = y + 1; int iz; float tphi; float sum = 0; int temp; temp = min(x, nblayers); for (int z = 0; z < temp; z++) { iz = z + 1; tphi = phi(x, y, z); if (x < w - 1) { xgrad = phi(ix, y, z) - tphi; } if (y < h - 1) { ygrad = phi(x, iy, z) - tphi; } if (z < nblayers - 1) { ggrad = (phi(x, y, iz) - tphi) / step_gamma; } sum = sum + sqrt(xgrad * xgrad + ygrad * ygrad) + rho(x, y, z) * fabs(ggrad); xgrad = 0; ygrad = 0; ggrad = 0; } en[x + w * y] = sum; } } /** * Calculates the energy of Phi and scales it by the number of pixels of Phi. * The scaling makes the resulting energy independent of the image size and value of gamma max. */ float calculate_energy(const View3D<float> &phi, const View3D<float> &rho, float step_gamma) { auto dims = phi.dims(); size_t w = dims.x; size_t h = dims.y; float *d_energy = NULL; cudaMalloc(&d_energy, w * h * sizeof(float)); CUDA_CHECK; dim3 block(32, 8, 1); auto grid = computeGrid2D(block, w, h); computeenergykernel<<<grid, block>>>(phi, rho, step_gamma, d_energy); CUDA_CHECK; cudaDeviceSynchronize(); // Reduction of the remaining 2D image using CublasSaSum float sum = 0; int n = w * h; cublasHandle_t handle; cublasStatus_t s1; s1 = cublasCreate(&handle); float b1; s1 = cublasSasum(handle, n, d_energy, 1, &b1); sum = b1; cudaFree(d_energy); return sum / (dims.x * dims.y * dims.z); }
2d22da66800672d1dad0ea2d5b518e6f69ed78ee.hip
// !!! This is a file automatically generated by hipify!!! #include"cpml.h" #include"../gpu.h" #include<cstdio> CPML::CPML(int deviceid, CPML &cpml) { npml=cpml.npml; nx=cpml.nx; nz=cpml.nz; pml_dt=cpml.pml_dt; pml_r=cpml.pml_r; pml_v=cpml.pml_v; pml_fc=cpml.pml_fc; #define ALLOC_CPML(psi,comp,n)\ safecall(hipMalloc((void**)&psi.comp,sizeof(double)*n));\ safecall(hipMemcpy(psi.comp,cpml.psi.comp,sizeof(double)*n,hipMemcpyHostToDevice)); ALLOC_CPML(psi,Txx_x,2*npml*nz); ALLOC_CPML(psi,Txz_x,2*npml*nz); ALLOC_CPML(psi, U_x,2*npml*nz); ALLOC_CPML(psi, W_x,2*npml*nz); ALLOC_CPML(psi,Tzz_z,2*npml*nx); ALLOC_CPML(psi,Txz_z,2*npml*nx); ALLOC_CPML(psi, U_z,2*npml*nx); ALLOC_CPML(psi, W_z,2*npml*nx); ALLOC_CPML(b,Txx_x,nx); ALLOC_CPML(b,Txz_x,nx); ALLOC_CPML(b, U_x,nx); ALLOC_CPML(b, W_x,nx); ALLOC_CPML(b,Tzz_z,nz); ALLOC_CPML(b,Txz_z,nz); ALLOC_CPML(b, U_z,nz); ALLOC_CPML(b, W_z,nz); ALLOC_CPML(c,Txx_x,nx); ALLOC_CPML(c,Txz_x,nx); ALLOC_CPML(c, U_x,nx); ALLOC_CPML(c, W_x,nx); ALLOC_CPML(c,Tzz_z,nz); ALLOC_CPML(c,Txz_z,nz); ALLOC_CPML(c, U_z,nz); ALLOC_CPML(c, W_z,nz); ALLOC_CPML(k,Txx_x,nx); ALLOC_CPML(k,Txz_x,nx); ALLOC_CPML(k, U_x,nx); ALLOC_CPML(k, W_x,nx); ALLOC_CPML(k,Tzz_z,nz); ALLOC_CPML(k,Txz_z,nz); ALLOC_CPML(k, U_z,nz); ALLOC_CPML(k, W_z,nz); /* P is the position of \partial{U}/\partial{x}, U_x, P-------U P------U-------- | | | | | | | | 0:pml_pos nx-1-pml_pos:nx-1 distance from boundary pml_pos-i i-(nx-1-pml_pos) for example, nx=100 looks from U grid, pml boundary is at position 11, 0 : 11, 100-1-11 : 100-1 looks from P grid, pml boundary is at position 11.5, 0 : 11, W---------Txz | | | | | | Txx(Tzz)---U ------ pml boundary | | | | pml boundary 0------npml-0.5 */ }
2d22da66800672d1dad0ea2d5b518e6f69ed78ee.cu
#include"cpml.h" #include"../gpu.h" #include<cstdio> CPML::CPML(int deviceid, CPML &cpml) { npml=cpml.npml; nx=cpml.nx; nz=cpml.nz; pml_dt=cpml.pml_dt; pml_r=cpml.pml_r; pml_v=cpml.pml_v; pml_fc=cpml.pml_fc; #define ALLOC_CPML(psi,comp,n)\ safecall(cudaMalloc((void**)&psi.comp,sizeof(double)*n));\ safecall(cudaMemcpy(psi.comp,cpml.psi.comp,sizeof(double)*n,cudaMemcpyHostToDevice)); ALLOC_CPML(psi,Txx_x,2*npml*nz); ALLOC_CPML(psi,Txz_x,2*npml*nz); ALLOC_CPML(psi, U_x,2*npml*nz); ALLOC_CPML(psi, W_x,2*npml*nz); ALLOC_CPML(psi,Tzz_z,2*npml*nx); ALLOC_CPML(psi,Txz_z,2*npml*nx); ALLOC_CPML(psi, U_z,2*npml*nx); ALLOC_CPML(psi, W_z,2*npml*nx); ALLOC_CPML(b,Txx_x,nx); ALLOC_CPML(b,Txz_x,nx); ALLOC_CPML(b, U_x,nx); ALLOC_CPML(b, W_x,nx); ALLOC_CPML(b,Tzz_z,nz); ALLOC_CPML(b,Txz_z,nz); ALLOC_CPML(b, U_z,nz); ALLOC_CPML(b, W_z,nz); ALLOC_CPML(c,Txx_x,nx); ALLOC_CPML(c,Txz_x,nx); ALLOC_CPML(c, U_x,nx); ALLOC_CPML(c, W_x,nx); ALLOC_CPML(c,Tzz_z,nz); ALLOC_CPML(c,Txz_z,nz); ALLOC_CPML(c, U_z,nz); ALLOC_CPML(c, W_z,nz); ALLOC_CPML(k,Txx_x,nx); ALLOC_CPML(k,Txz_x,nx); ALLOC_CPML(k, U_x,nx); ALLOC_CPML(k, W_x,nx); ALLOC_CPML(k,Tzz_z,nz); ALLOC_CPML(k,Txz_z,nz); ALLOC_CPML(k, U_z,nz); ALLOC_CPML(k, W_z,nz); /* P is the position of \partial{U}/\partial{x}, U_x, P-------U P------U-------- | | | | | | | | 0:pml_pos nx-1-pml_pos:nx-1 distance from boundary pml_pos-i i-(nx-1-pml_pos) for example, nx=100 looks from U grid, pml boundary is at position 11, 0 : 11, 100-1-11 : 100-1 looks from P grid, pml boundary is at position 11.5, 0 : 11, W---------Txz | | | | | | Txx(Tzz)---U ------ pml boundary | | | | pml boundary 0------npml-0.5 */ }
bdd43f0ce452bc720b356be0ce6aefe0197442d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file phi3D_fsm.c * @brief Source file for 3D Phi Function that implements the parallel fast * sweeping method for solving the Eikonal equation in CUDA The * algorithm implemented for parallel fast sweeping method is from a * paper in the Journal of Computational Physics titled "A parallel * fast sweeping method for the Eikonal Equation" by Miles Detrixhe, * Federic Gibou, and Chohong Min. * * @author Shrestha, Anup * @date 09 OCT 2015 * * * * @see http://www.sciencedirect.com/science/article/pii/S002199911200722X * * Copyright (c) 2016 * Mechanical and Bio-medical Engineering Department * Boise State University * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "phi3D.h" #include "phi3D_fsm.h" #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) // Private method definitions static void fast_sweep(Phi *p, int itr, hipPitchedPtr dPitchPtr); static void _cudaMemcpy3D(hipPitchedPtr src, hipPitchedPtr dst, hipExtent dExt, hipMemcpyKind kind); static int iDivUp(int a, int b); // CUDA functions __global__ void fast_sweep_kernel(hipPitchedPtr dPitchPtr, SweepInfo s); __device__ double solve_eikonal(double cur_dist, double minX, double minY, double minZ, double dx, double dy, double dz); /** * @brief Calls the fast sweeping method a number of times specified by * the iterations argument. * * @param[in,out] pf Pointer to phi function. * @param[in] iterations Max iterations. */ void run_fsm(Phi *pf, int iterations) { int max_x = pf->x + 2; int max_y = pf->y + 2; int max_z = pf->z + 2; /*********************** CUDA ***********************/ // time cuda code hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipPitchedPtr hostPtr = make_hipPitchedPtr(pf->distance, max_x * sizeof(double), max_x, max_y); hipPitchedPtr devicePtr; hipExtent dExt = make_hipExtent(max_x * sizeof(double), max_y, max_z); cudaCheckError(); hipMalloc3D(&devicePtr, dExt); cudaCheckError(); _cudaMemcpy3D(hostPtr, devicePtr, dExt, hipMemcpyHostToDevice); cudaCheckError(); fast_sweep(pf, iterations, devicePtr); _cudaMemcpy3D(devicePtr, hostPtr, dExt, hipMemcpyDeviceToHost); cudaCheckError(); hipFree(devicePtr.ptr); hipFree(hostPtr.ptr); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Parallel FSM time: %f s.\n", elapsedTime / 1000.0); /****************************************************/ } /** * @brief Calculates the distance field for a 3D grid by solving the * Eikonal equation at each grid point using the parallel Fast * Sweeping Method. * * Sweeping Directions: * (1) i = 1:I, j = 1:J, k = 1:K * (2) i = I:1, j = 1:J, k = K:1 * (3) i = I:1, j = 1:J, k = 1:K * (4) i = 1:I, j = 1:J, k = K:1 * (5) i = I:1, j = J:1, k = K:1 * (6) i = 1:I, j = J:1, k = 1:K * (7) i = 1:I, j = J:1, k = K:1 * (8) i = I:1, j = J:1, k = 1:K * * @param[in.out] p Pointer to phi function. * @param[in] itr Max iterations. * @param[in,out] dPitchPtr Pointer to distance array in device memory. */ static void fast_sweep(Phi *p, int itr, hipPitchedPtr dPitchPtr) { // Information regarding sweeping and linear indexing int meshDim = 3; SweepInfo sw; sw.xDim = p->x; sw.dx = p->dx; sw.yDim = p->y; sw.dy = p->dy; sw.zDim = p->z; sw.dz = p->dz; int totalLevels = sw.xDim + sw.yDim + sw.zDim; // loop till the number of times to sweep int loop = 1; while (loop <= itr) { printf("Please wait. Sweeping...[%d/%d]\n", loop, itr); for (int swCount = 1; swCount <= 8; ++swCount) { int start = (swCount == 2 || swCount == 5 || swCount == 7 || swCount == 8) ? totalLevels : meshDim; int end = (start == meshDim) ? totalLevels + 1 : meshDim - 1; int incr = (start == meshDim) ? true : false; // sweep offset is used for translating the 3D coordinates // to perform sweeps from different directions sw.xSweepOff = (swCount == 4 || swCount == 8) ? sw.xDim + 1 : 0; sw.ySweepOff = (swCount == 2 || swCount == 6) ? sw.yDim + 1 : 0; sw.zSweepOff = (swCount == 3 || swCount == 7) ? sw.zDim + 1 : 0; for (int level = start; level != end; level = (incr) ? level + 1 : level - 1) { int xs = max(1, level - (sw.yDim + sw.zDim)), ys = max(1, level - (sw.xDim + sw.zDim)); int xe = min(sw.xDim, level - (meshDim - 1)), ye = min(sw.yDim, level - (meshDim - 1)); int xr = xe - xs + 1, yr = ye - ys + 1; int tth = xr * yr; // Total number of threads needed dim3 bs(16, 16, 1); if (tth < 256) { bs.x = xr; bs.y = yr; } dim3 gs(iDivUp(xr, bs.x), iDivUp(yr, bs.y), 1); sw.level = level; sw.xOffSet = xs; sw.yOffset = ys; fast_sweep_kernel << <gs, bs>>> (dPitchPtr, sw); hipDeviceSynchronize(); cudaCheckError(); } } printf("Sweeping finished!......[%d/%d]\n", loop, itr); ++loop; } } /** * @brief CUDA kernel for the fast sweeping method. * * @param[in,out] dPitchPtr Pointer to distance array in device memory. * @param[in] s Sweep information. */ __global__ void fast_sweep_kernel(hipPitchedPtr dPitchPtr, SweepInfo s) { int x = (blockIdx.x * blockDim.x + threadIdx.x) + s.xOffSet; int y = (blockIdx.y * blockDim.y + threadIdx.y) + s.yOffset; if (x <= s.xDim && y <= s.yDim) { int z = s.level - (x + y); if (z > 0 && z <= s.zDim) { int i = abs(z - s.zSweepOff); int j = abs(y - s.ySweepOff); int k = abs(x - s.xSweepOff); char *devPtr = (char *)dPitchPtr.ptr; size_t pitch = dPitchPtr.pitch; size_t slicePitch = pitch * (s.yDim + 2); double *c_row = (double *)((devPtr + i * slicePitch) + j * pitch); // center row double center = c_row[k]; // center distance double left = c_row[k - 1]; // left distance double right = c_row[k + 1]; // right distance double up = ((double *)((devPtr + i * slicePitch) + (j - 1) * pitch))[k]; // upper distance double down = ((double *)((devPtr + i * slicePitch) + (j + 1) * pitch))[k]; // lower distance double front = ((double *)((devPtr + (i - 1) * slicePitch) + j * pitch))[k]; // front distance double back = ((double *)((devPtr + (i + 1) * slicePitch) + j * pitch))[k]; // back distance double minX = min(left, right); double minY = min(up, down); double minZ = min(front, back); c_row[k] = solve_eikonal(center, minX, minY, minZ, s.dx, s.dy, s.dz); } } } /** * @brief Solves Eikonal equation at linearized 3D index. Returns the * minimum of calculated and old distance values. * * @param[in] cur_dist Current distance value. * @param[in] minX Minimum distance in the x-direction. * @param[in] minY Minimum distance in the y-direction. * @param[in] minZ Minimum distance in the z-direction. * @param[in] dx Spacing in the x-direction. * @param[in] dy Spacing in the y-direction. * @param[in] dz Spacing in the z-direction. * * @return Minimum value of the solution at given index. */ __device__ double solve_eikonal(double cur_dist, double minX, double minY, double minZ, double dx, double dy, double dz) { double dist_new = 0; double m[] = { minX, minY, minZ }; double d[] = { dx, dy, dz }; // sort the mins for (int i = 1; i < 3; i++) { for (int j = 0; j < 3 - i; j++) { if (m[j] > m[j + 1]) { double tmp_m = m[j]; double tmp_d = d[j]; m[j] = m[j + 1]; d[j] = d[j + 1]; m[j + 1] = tmp_m; d[j + 1] = tmp_d; } } } // simplifying the variables double m_0 = m[0], m_1 = m[1], m_2 = m[2]; double d_0 = d[0], d_1 = d[1], d_2 = d[2]; double m2_0 = m_0 * m_0, m2_1 = m_1 * m_1, m2_2 = m_2 * m_2; double d2_0 = d_0 * d_0, d2_1 = d_1 * d_1, d2_2 = d_2 * d_2; dist_new = m_0 + d_0; if (dist_new > m_1) { double s = sqrt(-m2_0 + 2 * m_0 * m_1 - m2_1 + d2_0 + d2_1); dist_new = (m_1 * d2_0 + m_0 * d2_1 + d_0 * d_1 * s) / (d2_0 + d2_1); if (dist_new > m_2) { double a = sqrt(-m2_0 * d2_1 - m2_0 * d2_2 + 2 * m_0 * m_1 * d2_2 - m2_1 * d2_0 - m2_1 * d2_2 + 2 * m_0 * m_2 * d2_1 - m2_2 * d2_0 - m2_2 * d2_1 + 2 * m_1 * m_2 * d2_0 + d2_0 * d2_1 + d2_0 * d2_2 + d2_1 * d2_2); dist_new = (m_2 * d2_0 * d2_1 + m_1 * d2_0 * d2_2 + m_0 * d2_1 * d2_2 + d_0 * d_1 * d_2 * a) / (d2_0 * d2_1 + d2_0 * d2_2 + d2_1 * d2_2); } } return min(cur_dist, dist_new); } /* * Copies 3D memory from host to device and device to host. * * Arguments: * hipPitchedPtr [in] - pointer to distance array * hipPitchedPtr [out] - pointer to distance array * hipMemcpyKind [in] - specifies the direction of copy * Returns: * */ /** * @brief Copies 3D memory from host to device and device to host. * * @param[in] src Pointer to source distance array. * @param[out] dst Pointer to destination disance array * @param[in] dExt Cuda extent. * @param[in] kind Specifies the direction of copy. */ static void _cudaMemcpy3D(hipPitchedPtr src, hipPitchedPtr dst, hipExtent dExt, hipMemcpyKind kind) { hipMemcpy3DParms mcp = { 0 }; mcp.kind = kind; mcp.extent = dExt; mcp.srcPtr = src; mcp.dstPtr = dst; hipMemcpy3D(&mcp); cudaCheckError(); } /** * @brief Calculates number of threads in each dimension of a thread block * * @param[in] a * @param[in] b * * @return { description_of_the_return_value } */ static int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
bdd43f0ce452bc720b356be0ce6aefe0197442d8.cu
/** * @file phi3D_fsm.c * @brief Source file for 3D Phi Function that implements the parallel fast * sweeping method for solving the Eikonal equation in CUDA The * algorithm implemented for parallel fast sweeping method is from a * paper in the Journal of Computational Physics titled "A parallel * fast sweeping method for the Eikonal Equation" by Miles Detrixhe, * Federic Gibou, and Chohong Min. * * @author Shrestha, Anup * @date 09 OCT 2015 * * * * @see http://www.sciencedirect.com/science/article/pii/S002199911200722X * * Copyright (c) 2016 * Mechanical and Bio-medical Engineering Department * Boise State University * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "phi3D.h" #include "phi3D_fsm.h" #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) // Private method definitions static void fast_sweep(Phi *p, int itr, cudaPitchedPtr dPitchPtr); static void _cudaMemcpy3D(cudaPitchedPtr src, cudaPitchedPtr dst, cudaExtent dExt, cudaMemcpyKind kind); static int iDivUp(int a, int b); // CUDA functions __global__ void fast_sweep_kernel(cudaPitchedPtr dPitchPtr, SweepInfo s); __device__ double solve_eikonal(double cur_dist, double minX, double minY, double minZ, double dx, double dy, double dz); /** * @brief Calls the fast sweeping method a number of times specified by * the iterations argument. * * @param[in,out] pf Pointer to phi function. * @param[in] iterations Max iterations. */ void run_fsm(Phi *pf, int iterations) { int max_x = pf->x + 2; int max_y = pf->y + 2; int max_z = pf->z + 2; /*********************** CUDA ***********************/ // time cuda code cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaPitchedPtr hostPtr = make_cudaPitchedPtr(pf->distance, max_x * sizeof(double), max_x, max_y); cudaPitchedPtr devicePtr; cudaExtent dExt = make_cudaExtent(max_x * sizeof(double), max_y, max_z); cudaCheckError(); cudaMalloc3D(&devicePtr, dExt); cudaCheckError(); _cudaMemcpy3D(hostPtr, devicePtr, dExt, cudaMemcpyHostToDevice); cudaCheckError(); fast_sweep(pf, iterations, devicePtr); _cudaMemcpy3D(devicePtr, hostPtr, dExt, cudaMemcpyDeviceToHost); cudaCheckError(); cudaFree(devicePtr.ptr); cudaFree(hostPtr.ptr); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Parallel FSM time: %f s.\n", elapsedTime / 1000.0); /****************************************************/ } /** * @brief Calculates the distance field for a 3D grid by solving the * Eikonal equation at each grid point using the parallel Fast * Sweeping Method. * * Sweeping Directions: * (1) i = 1:I, j = 1:J, k = 1:K * (2) i = I:1, j = 1:J, k = K:1 * (3) i = I:1, j = 1:J, k = 1:K * (4) i = 1:I, j = 1:J, k = K:1 * (5) i = I:1, j = J:1, k = K:1 * (6) i = 1:I, j = J:1, k = 1:K * (7) i = 1:I, j = J:1, k = K:1 * (8) i = I:1, j = J:1, k = 1:K * * @param[in.out] p Pointer to phi function. * @param[in] itr Max iterations. * @param[in,out] dPitchPtr Pointer to distance array in device memory. */ static void fast_sweep(Phi *p, int itr, cudaPitchedPtr dPitchPtr) { // Information regarding sweeping and linear indexing int meshDim = 3; SweepInfo sw; sw.xDim = p->x; sw.dx = p->dx; sw.yDim = p->y; sw.dy = p->dy; sw.zDim = p->z; sw.dz = p->dz; int totalLevels = sw.xDim + sw.yDim + sw.zDim; // loop till the number of times to sweep int loop = 1; while (loop <= itr) { printf("Please wait. Sweeping...[%d/%d]\n", loop, itr); for (int swCount = 1; swCount <= 8; ++swCount) { int start = (swCount == 2 || swCount == 5 || swCount == 7 || swCount == 8) ? totalLevels : meshDim; int end = (start == meshDim) ? totalLevels + 1 : meshDim - 1; int incr = (start == meshDim) ? true : false; // sweep offset is used for translating the 3D coordinates // to perform sweeps from different directions sw.xSweepOff = (swCount == 4 || swCount == 8) ? sw.xDim + 1 : 0; sw.ySweepOff = (swCount == 2 || swCount == 6) ? sw.yDim + 1 : 0; sw.zSweepOff = (swCount == 3 || swCount == 7) ? sw.zDim + 1 : 0; for (int level = start; level != end; level = (incr) ? level + 1 : level - 1) { int xs = max(1, level - (sw.yDim + sw.zDim)), ys = max(1, level - (sw.xDim + sw.zDim)); int xe = min(sw.xDim, level - (meshDim - 1)), ye = min(sw.yDim, level - (meshDim - 1)); int xr = xe - xs + 1, yr = ye - ys + 1; int tth = xr * yr; // Total number of threads needed dim3 bs(16, 16, 1); if (tth < 256) { bs.x = xr; bs.y = yr; } dim3 gs(iDivUp(xr, bs.x), iDivUp(yr, bs.y), 1); sw.level = level; sw.xOffSet = xs; sw.yOffset = ys; fast_sweep_kernel << <gs, bs>>> (dPitchPtr, sw); cudaThreadSynchronize(); cudaCheckError(); } } printf("Sweeping finished!......[%d/%d]\n", loop, itr); ++loop; } } /** * @brief CUDA kernel for the fast sweeping method. * * @param[in,out] dPitchPtr Pointer to distance array in device memory. * @param[in] s Sweep information. */ __global__ void fast_sweep_kernel(cudaPitchedPtr dPitchPtr, SweepInfo s) { int x = (blockIdx.x * blockDim.x + threadIdx.x) + s.xOffSet; int y = (blockIdx.y * blockDim.y + threadIdx.y) + s.yOffset; if (x <= s.xDim && y <= s.yDim) { int z = s.level - (x + y); if (z > 0 && z <= s.zDim) { int i = abs(z - s.zSweepOff); int j = abs(y - s.ySweepOff); int k = abs(x - s.xSweepOff); char *devPtr = (char *)dPitchPtr.ptr; size_t pitch = dPitchPtr.pitch; size_t slicePitch = pitch * (s.yDim + 2); double *c_row = (double *)((devPtr + i * slicePitch) + j * pitch); // center row double center = c_row[k]; // center distance double left = c_row[k - 1]; // left distance double right = c_row[k + 1]; // right distance double up = ((double *)((devPtr + i * slicePitch) + (j - 1) * pitch))[k]; // upper distance double down = ((double *)((devPtr + i * slicePitch) + (j + 1) * pitch))[k]; // lower distance double front = ((double *)((devPtr + (i - 1) * slicePitch) + j * pitch))[k]; // front distance double back = ((double *)((devPtr + (i + 1) * slicePitch) + j * pitch))[k]; // back distance double minX = min(left, right); double minY = min(up, down); double minZ = min(front, back); c_row[k] = solve_eikonal(center, minX, minY, minZ, s.dx, s.dy, s.dz); } } } /** * @brief Solves Eikonal equation at linearized 3D index. Returns the * minimum of calculated and old distance values. * * @param[in] cur_dist Current distance value. * @param[in] minX Minimum distance in the x-direction. * @param[in] minY Minimum distance in the y-direction. * @param[in] minZ Minimum distance in the z-direction. * @param[in] dx Spacing in the x-direction. * @param[in] dy Spacing in the y-direction. * @param[in] dz Spacing in the z-direction. * * @return Minimum value of the solution at given index. */ __device__ double solve_eikonal(double cur_dist, double minX, double minY, double minZ, double dx, double dy, double dz) { double dist_new = 0; double m[] = { minX, minY, minZ }; double d[] = { dx, dy, dz }; // sort the mins for (int i = 1; i < 3; i++) { for (int j = 0; j < 3 - i; j++) { if (m[j] > m[j + 1]) { double tmp_m = m[j]; double tmp_d = d[j]; m[j] = m[j + 1]; d[j] = d[j + 1]; m[j + 1] = tmp_m; d[j + 1] = tmp_d; } } } // simplifying the variables double m_0 = m[0], m_1 = m[1], m_2 = m[2]; double d_0 = d[0], d_1 = d[1], d_2 = d[2]; double m2_0 = m_0 * m_0, m2_1 = m_1 * m_1, m2_2 = m_2 * m_2; double d2_0 = d_0 * d_0, d2_1 = d_1 * d_1, d2_2 = d_2 * d_2; dist_new = m_0 + d_0; if (dist_new > m_1) { double s = sqrt(-m2_0 + 2 * m_0 * m_1 - m2_1 + d2_0 + d2_1); dist_new = (m_1 * d2_0 + m_0 * d2_1 + d_0 * d_1 * s) / (d2_0 + d2_1); if (dist_new > m_2) { double a = sqrt(-m2_0 * d2_1 - m2_0 * d2_2 + 2 * m_0 * m_1 * d2_2 - m2_1 * d2_0 - m2_1 * d2_2 + 2 * m_0 * m_2 * d2_1 - m2_2 * d2_0 - m2_2 * d2_1 + 2 * m_1 * m_2 * d2_0 + d2_0 * d2_1 + d2_0 * d2_2 + d2_1 * d2_2); dist_new = (m_2 * d2_0 * d2_1 + m_1 * d2_0 * d2_2 + m_0 * d2_1 * d2_2 + d_0 * d_1 * d_2 * a) / (d2_0 * d2_1 + d2_0 * d2_2 + d2_1 * d2_2); } } return min(cur_dist, dist_new); } /* * Copies 3D memory from host to device and device to host. * * Arguments: * cudaPitchedPtr [in] - pointer to distance array * cudaPitchedPtr [out] - pointer to distance array * cudaMemcpyKind [in] - specifies the direction of copy * Returns: * */ /** * @brief Copies 3D memory from host to device and device to host. * * @param[in] src Pointer to source distance array. * @param[out] dst Pointer to destination disance array * @param[in] dExt Cuda extent. * @param[in] kind Specifies the direction of copy. */ static void _cudaMemcpy3D(cudaPitchedPtr src, cudaPitchedPtr dst, cudaExtent dExt, cudaMemcpyKind kind) { cudaMemcpy3DParms mcp = { 0 }; mcp.kind = kind; mcp.extent = dExt; mcp.srcPtr = src; mcp.dstPtr = dst; cudaMemcpy3D(&mcp); cudaCheckError(); } /** * @brief Calculates number of threads in each dimension of a thread block * * @param[in] a * @param[in] b * * @return { description_of_the_return_value } */ static int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
83d0580bdb8a0d59dceb5047e3b06b0535869891.hip
// !!! This is a file automatically generated by hipify!!! /* K Nearest Neighbours using CUDA Authors: Harshul Gupta; Utkarsh Singh */ #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include<cmath> #include<float.h> #include "config.h" #include <chrono> int* d_train; int* d_test; int* pixelnorm; int* sum; int *d_norm_array; int *d_k_norms; int *d_labels; int dim = DIM; int *d_odata; int *h_odata = (int*)malloc(sizeof(int) * SUM_OUT); int *norm_array = (int*)malloc(sizeof(int) * WSIZE); __shared__ int s_sum[SUM_BLK_SIZE]; __shared__ int radix[WSIZE]; __shared__ int p_dist[WSIZE]; /*Initialize CUDA memories*/ void knn_init(int K) { checkCudaErrors(hipMalloc((void **)&d_train, (sizeof(int) * DIM * DIM))); checkCudaErrors(hipMalloc((void **)&d_test, (sizeof(int) * DIM * DIM))); checkCudaErrors(hipMalloc((void **)&pixelnorm, (sizeof(int) * DIM * DIM))); checkCudaErrors(hipMalloc((void **)&sum, (sizeof(int) * 2))); checkCudaErrors(hipMalloc((void **)&d_norm_array, (sizeof(int) * WSIZE))); checkCudaErrors(hipMalloc((void **)&d_k_norms, (sizeof(int) * NUMCLASSES * K))); checkCudaErrors(hipMalloc((void **)&d_labels, (sizeof(int) * NUMCLASSES * K))); checkCudaErrors(hipMalloc((void **)&d_odata, (sizeof(int) * SUM_OUT))); } /*one time transfer of test image to GPU*/ void transfer_testimage(int *test_image) { checkCudaErrors(hipMemcpy(d_test, test_image, ((sizeof(int)) * DIM * DIM), hipMemcpyHostToDevice)); } /*Radix Sort Unrolled by 2*/ __global__ void RadixSort(int *arr, int nx) { int idx = threadIdx.x + blockDim.x*blockIdx.x; int value, zeros, scan, final_sum, temp, out; radix[idx] = arr[idx]; if (idx < nx) { for (int bit = LOWER_BIT; bit < UPPER_BIT; bit+=2) { value = radix[idx]; zeros = __ballot(!((value >> bit) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> bit) & 0x1) ? temp : scan; radix[out] = value; value = radix[idx]; zeros = __ballot(!((value >> (bit+1)) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> (bit+1)) & 0x1) ? temp : scan; radix[out] = value; } } arr[idx] = radix[idx]; } __global__ void RadixSortLabels(int *arr, int *labels, int nx) { int idx = threadIdx.x + blockDim.x*blockIdx.x; int value, zeros, scan, final_sum, temp, out, lbl; if (idx < nx) { for (int bit = LOWER_BIT; bit < UPPER_BIT; bit++) { value = arr[idx]; lbl = labels[idx]; zeros = __ballot(!((value >> bit) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> (bit)) & 0x1) ? temp : scan; arr[out] = value; labels[out] = lbl; } } } /*Insertion Sort for Comparison Purposes*/ __global__ void InsertionSort(int* arr, int nx) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < nx) { p_dist[idx] = arr[idx]; #pragma unroll for (int i = 1; i<nx; i++) { int curr_dist = p_dist[i]; int j = i-1; while (j >= 0 && p_dist[(j)] > curr_dist) { p_dist[j+1] = p_dist[(j)]; --j; } p_dist[j+1] = curr_dist; } } arr[idx] = p_dist[idx]; } /*Calculate L1 distance*/ __global__ void knn_distance_L1(int *d_train, int *d_test, int *pixelnorm, int nx, int ny) { int ix = threadIdx.x + blockDim.x*blockIdx.x; if (ix < nx*ny) { pixelnorm[ix] = fabsf(d_test[ix] - d_train[ix]); } } /*Calculate L2 distance*/ __global__ void knn_distance_L2(int *d_train, int *d_test, int *pixelnorm, int nx, int ny) { int ix = threadIdx.x + blockDim.x*blockIdx.x; pixelnorm[ix] = (d_test[ix] - d_train[ix]) * (d_test[ix] - d_train[ix]); } /*Calculate Warp Level Sum*/ template <unsigned int iBlockSize> __global__ void WarpSumUnroll8(int *pixels, int *output, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; int localSum = 0; if (idx + 7 * blockDim.x < n) { int a1 = pixels[idx]; int a2 = pixels[idx + blockDim.x]; int a3 = pixels[idx + 2 * blockDim.x]; int a4 = pixels[idx + 3 * blockDim.x]; int b1 = pixels[idx + 4 * blockDim.x]; int b2 = pixels[idx + 5 * blockDim.x]; int b3 = pixels[idx + 6 * blockDim.x]; int b4 = pixels[idx + 7 * blockDim.x]; localSum = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } s_sum[tid] = localSum; __syncthreads(); if (iBlockSize >= 1024 && tid < 512) s_sum[tid] += s_sum[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) s_sum[tid] += s_sum[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) s_sum[tid] += s_sum[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) s_sum[tid] += s_sum[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vsmem = s_sum; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) output[blockIdx.x] = s_sum[0]; } /*Labels*/ __global__ void add_labels(int *d_labels, int lbl, int index) { int idx = threadIdx.x + blockDim.x*blockIdx.x; d_labels[idx + index] = lbl; } void knn_sort(int K, int &index, int lbl) { checkCudaErrors(hipMemcpy(d_norm_array, norm_array, ((sizeof(int))*WSIZE), hipMemcpyHostToDevice)); #if(SORTING == RADIX) RadixSort << <1, WSIZE >> >(d_norm_array, WSIZE); #elif(SORTING == INSERTION) InsertionSort << <1, WSIZE >> >(d_norm_array, WSIZE); #endif hipMemcpy((d_k_norms + index), d_norm_array, (sizeof(int)*K), hipMemcpyDeviceToDevice); add_labels << <1, K >> > (d_labels, lbl, index); } int perform_classification(int K, int num_classes) { int *cpu_labels = (int*)calloc(num_classes*K, sizeof(int)); RadixSortLabels << <1, num_classes*K >> >(d_k_norms, d_labels, num_classes*K); checkCudaErrors(hipMemcpy(cpu_labels, d_labels, ((sizeof(int)) * num_classes*K), hipMemcpyDeviceToHost)); int *count = (int*)calloc(num_classes, sizeof(int)); for (int i = 0; i < K; i++) { count[cpu_labels[i]] += 1; } int max = 0, f_lbl = 0; for (int i = 0; i < num_classes; i++) { if (count[i] > max) { max = count[i]; f_lbl = i; } } return f_lbl; } void knn_cuda(int *train_image, int dist_index) { checkCudaErrors(hipMemcpy(d_train, train_image, ((sizeof(int))*DIM * DIM), hipMemcpyHostToDevice)); dim3 block(THREADS, 1); dim3 grid((DIM * DIM + block.x - 1) / block.x, 1); auto start = std::chrono::high_resolution_clock::now(); #if(METRIC == L1) knn_distance_L1 << < grid, block>> >(d_train, d_test, pixelnorm, DIM, DIM); #elif(METRIC == L2) knn_distance_L2 << < grid, block >> >(d_train, d_test, pixelnorm, DIM, DIM); #endif block.x = SUM_BLK_SIZE; grid.x = ((DIM * DIM) + block.x - 1) / block.x; WarpSumUnroll8<SUM_BLK_SIZE> << <grid.x / 8, block >> >(pixelnorm, d_odata, DIM * DIM); checkCudaErrors(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); int gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; #if(METRIC == L1) norm_array[dist_index] = gpu_sum; #elif(METRIC == L2) norm_array[dist_index] = sqrt(gpu_sum); #endif } void cuda_deallocation() { hipFree(d_train); hipFree(d_test); hipFree(pixelnorm); hipFree(sum); hipFree(norm_array); hipFree(d_k_norms); hipFree(d_labels); }
83d0580bdb8a0d59dceb5047e3b06b0535869891.cu
/* K Nearest Neighbours using CUDA Authors: Harshul Gupta; Utkarsh Singh */ #include <cuda_fp16.h> #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include<cmath> #include<float.h> #include "config.h" #include <chrono> int* d_train; int* d_test; int* pixelnorm; int* sum; int *d_norm_array; int *d_k_norms; int *d_labels; int dim = DIM; int *d_odata; int *h_odata = (int*)malloc(sizeof(int) * SUM_OUT); int *norm_array = (int*)malloc(sizeof(int) * WSIZE); __shared__ int s_sum[SUM_BLK_SIZE]; __shared__ int radix[WSIZE]; __shared__ int p_dist[WSIZE]; /*Initialize CUDA memories*/ void knn_init(int K) { checkCudaErrors(cudaMalloc((void **)&d_train, (sizeof(int) * DIM * DIM))); checkCudaErrors(cudaMalloc((void **)&d_test, (sizeof(int) * DIM * DIM))); checkCudaErrors(cudaMalloc((void **)&pixelnorm, (sizeof(int) * DIM * DIM))); checkCudaErrors(cudaMalloc((void **)&sum, (sizeof(int) * 2))); checkCudaErrors(cudaMalloc((void **)&d_norm_array, (sizeof(int) * WSIZE))); checkCudaErrors(cudaMalloc((void **)&d_k_norms, (sizeof(int) * NUMCLASSES * K))); checkCudaErrors(cudaMalloc((void **)&d_labels, (sizeof(int) * NUMCLASSES * K))); checkCudaErrors(cudaMalloc((void **)&d_odata, (sizeof(int) * SUM_OUT))); } /*one time transfer of test image to GPU*/ void transfer_testimage(int *test_image) { checkCudaErrors(cudaMemcpy(d_test, test_image, ((sizeof(int)) * DIM * DIM), cudaMemcpyHostToDevice)); } /*Radix Sort Unrolled by 2*/ __global__ void RadixSort(int *arr, int nx) { int idx = threadIdx.x + blockDim.x*blockIdx.x; int value, zeros, scan, final_sum, temp, out; radix[idx] = arr[idx]; if (idx < nx) { for (int bit = LOWER_BIT; bit < UPPER_BIT; bit+=2) { value = radix[idx]; zeros = __ballot(!((value >> bit) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> bit) & 0x1) ? temp : scan; radix[out] = value; value = radix[idx]; zeros = __ballot(!((value >> (bit+1)) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> (bit+1)) & 0x1) ? temp : scan; radix[out] = value; } } arr[idx] = radix[idx]; } __global__ void RadixSortLabels(int *arr, int *labels, int nx) { int idx = threadIdx.x + blockDim.x*blockIdx.x; int value, zeros, scan, final_sum, temp, out, lbl; if (idx < nx) { for (int bit = LOWER_BIT; bit < UPPER_BIT; bit++) { value = arr[idx]; lbl = labels[idx]; zeros = __ballot(!((value >> bit) & 0x1)); scan = __popc(zeros&((1 << idx) - 1)); final_sum = (zeros >> (nx - 1) & 0x1) + __shfl(scan, (nx - 1)); temp = (idx - scan + final_sum); out = ((value >> (bit)) & 0x1) ? temp : scan; arr[out] = value; labels[out] = lbl; } } } /*Insertion Sort for Comparison Purposes*/ __global__ void InsertionSort(int* arr, int nx) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < nx) { p_dist[idx] = arr[idx]; #pragma unroll for (int i = 1; i<nx; i++) { int curr_dist = p_dist[i]; int j = i-1; while (j >= 0 && p_dist[(j)] > curr_dist) { p_dist[j+1] = p_dist[(j)]; --j; } p_dist[j+1] = curr_dist; } } arr[idx] = p_dist[idx]; } /*Calculate L1 distance*/ __global__ void knn_distance_L1(int *d_train, int *d_test, int *pixelnorm, int nx, int ny) { int ix = threadIdx.x + blockDim.x*blockIdx.x; if (ix < nx*ny) { pixelnorm[ix] = fabsf(d_test[ix] - d_train[ix]); } } /*Calculate L2 distance*/ __global__ void knn_distance_L2(int *d_train, int *d_test, int *pixelnorm, int nx, int ny) { int ix = threadIdx.x + blockDim.x*blockIdx.x; pixelnorm[ix] = (d_test[ix] - d_train[ix]) * (d_test[ix] - d_train[ix]); } /*Calculate Warp Level Sum*/ template <unsigned int iBlockSize> __global__ void WarpSumUnroll8(int *pixels, int *output, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; int localSum = 0; if (idx + 7 * blockDim.x < n) { int a1 = pixels[idx]; int a2 = pixels[idx + blockDim.x]; int a3 = pixels[idx + 2 * blockDim.x]; int a4 = pixels[idx + 3 * blockDim.x]; int b1 = pixels[idx + 4 * blockDim.x]; int b2 = pixels[idx + 5 * blockDim.x]; int b3 = pixels[idx + 6 * blockDim.x]; int b4 = pixels[idx + 7 * blockDim.x]; localSum = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } s_sum[tid] = localSum; __syncthreads(); if (iBlockSize >= 1024 && tid < 512) s_sum[tid] += s_sum[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) s_sum[tid] += s_sum[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) s_sum[tid] += s_sum[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) s_sum[tid] += s_sum[tid + 64]; __syncthreads(); if (tid < 32) { volatile int *vsmem = s_sum; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) output[blockIdx.x] = s_sum[0]; } /*Labels*/ __global__ void add_labels(int *d_labels, int lbl, int index) { int idx = threadIdx.x + blockDim.x*blockIdx.x; d_labels[idx + index] = lbl; } void knn_sort(int K, int &index, int lbl) { checkCudaErrors(cudaMemcpy(d_norm_array, norm_array, ((sizeof(int))*WSIZE), cudaMemcpyHostToDevice)); #if(SORTING == RADIX) RadixSort << <1, WSIZE >> >(d_norm_array, WSIZE); #elif(SORTING == INSERTION) InsertionSort << <1, WSIZE >> >(d_norm_array, WSIZE); #endif cudaMemcpy((d_k_norms + index), d_norm_array, (sizeof(int)*K), cudaMemcpyDeviceToDevice); add_labels << <1, K >> > (d_labels, lbl, index); } int perform_classification(int K, int num_classes) { int *cpu_labels = (int*)calloc(num_classes*K, sizeof(int)); RadixSortLabels << <1, num_classes*K >> >(d_k_norms, d_labels, num_classes*K); checkCudaErrors(cudaMemcpy(cpu_labels, d_labels, ((sizeof(int)) * num_classes*K), cudaMemcpyDeviceToHost)); int *count = (int*)calloc(num_classes, sizeof(int)); for (int i = 0; i < K; i++) { count[cpu_labels[i]] += 1; } int max = 0, f_lbl = 0; for (int i = 0; i < num_classes; i++) { if (count[i] > max) { max = count[i]; f_lbl = i; } } return f_lbl; } void knn_cuda(int *train_image, int dist_index) { checkCudaErrors(cudaMemcpy(d_train, train_image, ((sizeof(int))*DIM * DIM), cudaMemcpyHostToDevice)); dim3 block(THREADS, 1); dim3 grid((DIM * DIM + block.x - 1) / block.x, 1); auto start = std::chrono::high_resolution_clock::now(); #if(METRIC == L1) knn_distance_L1 << < grid, block>> >(d_train, d_test, pixelnorm, DIM, DIM); #elif(METRIC == L2) knn_distance_L2 << < grid, block >> >(d_train, d_test, pixelnorm, DIM, DIM); #endif block.x = SUM_BLK_SIZE; grid.x = ((DIM * DIM) + block.x - 1) / block.x; WarpSumUnroll8<SUM_BLK_SIZE> << <grid.x / 8, block >> >(pixelnorm, d_odata, DIM * DIM); checkCudaErrors(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); int gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; #if(METRIC == L1) norm_array[dist_index] = gpu_sum; #elif(METRIC == L2) norm_array[dist_index] = sqrt(gpu_sum); #endif } void cuda_deallocation() { cudaFree(d_train); cudaFree(d_test); cudaFree(pixelnorm); cudaFree(sum); cudaFree(norm_array); cudaFree(d_k_norms); cudaFree(d_labels); }
ae7c730f68678425a98032387f82039af40e108e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "FullyConnectedAdjustMemoryKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *weightsGradPtr = NULL; hipMalloc(&weightsGradPtr, XSIZE*YSIZE); float *biasGradPtr = NULL; hipMalloc(&biasGradPtr, XSIZE*YSIZE); float *weightGradCurvePtr = NULL; hipMalloc(&weightGradCurvePtr, XSIZE*YSIZE); float *biasGradCurvePtr = NULL; hipMalloc(&biasGradCurvePtr, XSIZE*YSIZE); float *avgWeightGradPtr = NULL; hipMalloc(&avgWeightGradPtr, XSIZE*YSIZE); float *avgBiasGradPtr = NULL; hipMalloc(&avgBiasGradPtr, XSIZE*YSIZE); float *avgWeightGradVarPtr = NULL; hipMalloc(&avgWeightGradVarPtr, XSIZE*YSIZE); float *avgBiasGradVarPtr = NULL; hipMalloc(&avgBiasGradVarPtr, XSIZE*YSIZE); float *avgWeightGradCurvePtr = NULL; hipMalloc(&avgWeightGradCurvePtr, XSIZE*YSIZE); float *avgBiasGradCurvePtr = NULL; hipMalloc(&avgBiasGradCurvePtr, XSIZE*YSIZE); float *avgWeightGradCurveVarPtr = NULL; hipMalloc(&avgWeightGradCurveVarPtr, XSIZE*YSIZE); float *avgBiasGradCurveVarPtr = NULL; hipMalloc(&avgBiasGradCurveVarPtr, XSIZE*YSIZE); float *weightMemorySizePtr = NULL; hipMalloc(&weightMemorySizePtr, XSIZE*YSIZE); float *biasMemorySizePtr = NULL; hipMalloc(&biasMemorySizePtr, XSIZE*YSIZE); float *dropoutMaskPtr = NULL; hipMalloc(&dropoutMaskPtr, XSIZE*YSIZE); int prevLayerSize = XSIZE*YSIZE; int thisLayerSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( FullyConnectedAdjustMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( FullyConnectedAdjustMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( FullyConnectedAdjustMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ae7c730f68678425a98032387f82039af40e108e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "FullyConnectedAdjustMemoryKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *weightsGradPtr = NULL; cudaMalloc(&weightsGradPtr, XSIZE*YSIZE); float *biasGradPtr = NULL; cudaMalloc(&biasGradPtr, XSIZE*YSIZE); float *weightGradCurvePtr = NULL; cudaMalloc(&weightGradCurvePtr, XSIZE*YSIZE); float *biasGradCurvePtr = NULL; cudaMalloc(&biasGradCurvePtr, XSIZE*YSIZE); float *avgWeightGradPtr = NULL; cudaMalloc(&avgWeightGradPtr, XSIZE*YSIZE); float *avgBiasGradPtr = NULL; cudaMalloc(&avgBiasGradPtr, XSIZE*YSIZE); float *avgWeightGradVarPtr = NULL; cudaMalloc(&avgWeightGradVarPtr, XSIZE*YSIZE); float *avgBiasGradVarPtr = NULL; cudaMalloc(&avgBiasGradVarPtr, XSIZE*YSIZE); float *avgWeightGradCurvePtr = NULL; cudaMalloc(&avgWeightGradCurvePtr, XSIZE*YSIZE); float *avgBiasGradCurvePtr = NULL; cudaMalloc(&avgBiasGradCurvePtr, XSIZE*YSIZE); float *avgWeightGradCurveVarPtr = NULL; cudaMalloc(&avgWeightGradCurveVarPtr, XSIZE*YSIZE); float *avgBiasGradCurveVarPtr = NULL; cudaMalloc(&avgBiasGradCurveVarPtr, XSIZE*YSIZE); float *weightMemorySizePtr = NULL; cudaMalloc(&weightMemorySizePtr, XSIZE*YSIZE); float *biasMemorySizePtr = NULL; cudaMalloc(&biasMemorySizePtr, XSIZE*YSIZE); float *dropoutMaskPtr = NULL; cudaMalloc(&dropoutMaskPtr, XSIZE*YSIZE); int prevLayerSize = XSIZE*YSIZE; int thisLayerSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); FullyConnectedAdjustMemoryKernel<<<gridBlock,threadBlock>>>(weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { FullyConnectedAdjustMemoryKernel<<<gridBlock,threadBlock>>>(weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { FullyConnectedAdjustMemoryKernel<<<gridBlock,threadBlock>>>(weightsGradPtr,biasGradPtr,weightGradCurvePtr,biasGradCurvePtr,avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,avgWeightGradCurvePtr,avgBiasGradCurvePtr,avgWeightGradCurveVarPtr,avgBiasGradCurveVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
080bc2627d1481518cb7b87fa157fb663940f97e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N (1024*1024) #define FULL_DATA_SIZE (N*20) // 20chunk? __global__ void kernel(int *a, int *b, int *c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main() { hipDeviceProp_t prop; int whichDevice; hipGetDevice(&whichDevice); hipGetDeviceProperties(&prop, whichDevice); if (!prop.deviceOverlap) { printf("Device will not not handle overlaps, so no speed up from streams\n"); return 0; } hipEvent_t start, stop; float elapsedTime; // start the timers hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // initialize the streams hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; hipMalloc((void**)&dev_a0, N * sizeof(int)); hipMalloc((void**)&dev_b0, N * sizeof(int)); hipMalloc((void**)&dev_c0, N * sizeof(int)); hipMalloc((void**)&dev_a1, N * sizeof(int)); hipMalloc((void**)&dev_b1, N * sizeof(int)); hipMalloc((void**)&dev_c1, N * sizeof(int)); hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault); for (int i = 0; i < FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } #if 0 for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) { hipMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0); kernel << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0); hipMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1); hipMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1); kernel << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1); hipMemcpyAsync(host_c + i + N, dev_c1, N*sizeof(int), hipMemcpyDeviceToHost, stream1); } #else for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) { hipMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1); hipMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1); kernel << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0); kernel << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1); hipMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1); } #endif hipStreamSynchronize(stream0); hipStreamSynchronize(stream1); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Time taken: %3.1f ms\n", elapsedTime); // cleanup the streams and memory hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_c); hipFree(dev_a0); hipFree(dev_a1); hipFree(dev_b0); hipFree(dev_b1); hipFree(dev_c0); hipFree(dev_c1); hipStreamDestroy(stream0); hipStreamDestroy(stream1); return 0; }
080bc2627d1481518cb7b87fa157fb663940f97e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N (1024*1024) #define FULL_DATA_SIZE (N*20) // ·Ö³É20¸öchunk? __global__ void kernel(int *a, int *b, int *c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main() { cudaDeviceProp prop; int whichDevice; cudaGetDevice(&whichDevice); cudaGetDeviceProperties(&prop, whichDevice); if (!prop.deviceOverlap) { printf("Device will not not handle overlaps, so no speed up from streams\n"); return 0; } cudaEvent_t start, stop; float elapsedTime; // start the timers cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // initialize the streams cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; cudaMalloc((void**)&dev_a0, N * sizeof(int)); cudaMalloc((void**)&dev_b0, N * sizeof(int)); cudaMalloc((void**)&dev_c0, N * sizeof(int)); cudaMalloc((void**)&dev_a1, N * sizeof(int)); cudaMalloc((void**)&dev_b1, N * sizeof(int)); cudaMalloc((void**)&dev_c1, N * sizeof(int)); cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); for (int i = 0; i < FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } #if 0 for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) { cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); kernel << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0); cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); kernel << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1); cudaMemcpyAsync(host_c + i + N, dev_c1, N*sizeof(int), cudaMemcpyDeviceToHost, stream1); } #else for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) { cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1); kernel << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0); kernel << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1); cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1); } #endif cudaStreamSynchronize(stream0); cudaStreamSynchronize(stream1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time taken: %3.1f ms\n", elapsedTime); // cleanup the streams and memory cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_c); cudaFree(dev_a0); cudaFree(dev_a1); cudaFree(dev_b0); cudaFree(dev_b1); cudaFree(dev_c0); cudaFree(dev_c1); cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); return 0; }
4b4355c8c227fe5147111ba474c4ff4513e5d115.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" #include <cstdio> __global__ void ReduceOnce(float* const d, const size_t n, const size_t w, const int op) { int tidx = blockIdx.x * blockDim.x + threadIdx.x; if (tidx >= w) return; float a = d[tidx]; if (tidx + w < n) { float b = d[tidx + w]; a = (op == 0) ? min(a, b) : max(a, b); } d[tidx] = a; } __global__ void BuildHistogram(const float* const d_val, const size_t num_val, unsigned int* const d_bin, const size_t num_bin, const float min_val, const float range) { int tidx = blockIdx.x * blockDim.x + threadIdx.x; if (tidx >= num_val) return; unsigned int bin_idx = min((unsigned int)(num_bin - 1), (unsigned int)((d_val[tidx] - min_val) / range * num_bin)); atomicAdd(&d_bin[bin_idx], 1); } __global__ void ExclusivePrefixSum(const unsigned int* const d_histo, unsigned int* const d_cdf, const size_t num_bin) { __shared__ unsigned int bin[1024]; int tidx = threadIdx.x; bin[tidx] = d_histo[tidx]; __syncthreads(); // Reduce for (size_t w = 2; w <= 1024; w <<= 1) { if (tidx % w == w - 1) bin[tidx] += bin[tidx - w / 2]; __syncthreads(); } // Downsweep if (tidx == 1023) bin[1023] = 0; __syncthreads(); for (size_t w = 1024; w >= 2; w >>= 1) { if (tidx % w == w - 1) { unsigned int s = bin[tidx - w / 2] + bin[tidx]; bin[tidx - w / 2] = bin[tidx]; bin[tidx] = s; } __syncthreads(); } d_cdf[tidx] = bin[tidx]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const size_t MAX_NUM_THREADS_PER_BLOCK = 1024; const size_t numPixels = numRows * numCols; int exponent = 0; while ((1 << (exponent + 1)) < numPixels) ++exponent; float *d_aux; checkCudaErrors(hipMalloc(&d_aux, sizeof(float) * numPixels)); // Find min_logLum checkCudaErrors(hipMemcpy(d_aux, d_logLuminance, sizeof(float) * numPixels, hipMemcpyDeviceToDevice)); for (size_t width = 1 << exponent; width >= 1; width >>= 1) { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((width + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); hipLaunchKernelGGL(( ReduceOnce), dim3(gridSize), dim3(blockSize), 0, 0, d_aux, numPixels, width, 0); // 0 - min } float h_min_logLum; checkCudaErrors(hipMemcpy(&h_min_logLum, d_aux, sizeof(float), hipMemcpyDeviceToHost)); min_logLum = h_min_logLum; // Find max_logLum checkCudaErrors(hipMemcpy(d_aux, d_logLuminance, sizeof(float) * numPixels, hipMemcpyDeviceToDevice)); for (size_t width = 1 << exponent; width >= 1; width >>= 1) { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((width + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); hipLaunchKernelGGL(( ReduceOnce), dim3(gridSize), dim3(blockSize), 0, 0, d_aux, numPixels, width, 1); // 1 - max } float h_max_logLum; checkCudaErrors(hipMemcpy(&h_max_logLum, d_aux, sizeof(float), hipMemcpyDeviceToHost)); max_logLum = h_max_logLum; checkCudaErrors(hipFree(d_aux)); // Find the range float logLumRange = max_logLum - min_logLum; // Build histogram unsigned int *d_histo; checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numBins)); { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((numPixels + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); hipLaunchKernelGGL(( BuildHistogram), dim3(gridSize), dim3(blockSize), 0, 0, d_logLuminance, numPixels, d_histo, numBins, min_logLum, logLumRange); } // Calculate the cumulative distribution { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize(1); // numBins = 1024 = MAX_NUM_THREADS_PER_BLOCK const size_t sharedMemoryBytes = sizeof(unsigned int) * numBins; hipLaunchKernelGGL(( ExclusivePrefixSum), dim3(gridSize), dim3(blockSize), sharedMemoryBytes, 0, d_histo, d_cdf, numBins); } checkCudaErrors(hipFree(d_histo)); /**************************************************************************** * You can use the code below to help with debugging, but make sure to * * comment it out again before submitting your assignment for grading, * * otherwise this code will take too much time and make it seem like your * * GPU implementation isn't fast enough. * * * * This code generates a reference cdf on the host by running the * * reference calculation we have given you. It then copies your GPU * * generated cdf back to the host and calls a function that compares the * * the two and will output the first location they differ. * * ************************************************************************* */ /* float *h_logLuminance = new float[numRows * numCols]; unsigned int *h_cdf = new unsigned int[numBins]; unsigned int *h_your_cdf = new unsigned int[numBins]; checkCudaErrors(hipMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost)); referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins); //compare the results of the CDF checkResultsExact(h_cdf, h_your_cdf, numBins); delete[] h_logLuminance; delete[] h_cdf; delete[] h_your_cdf; */ }
4b4355c8c227fe5147111ba474c4ff4513e5d115.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" #include <cstdio> __global__ void ReduceOnce(float* const d, const size_t n, const size_t w, const int op) { int tidx = blockIdx.x * blockDim.x + threadIdx.x; if (tidx >= w) return; float a = d[tidx]; if (tidx + w < n) { float b = d[tidx + w]; a = (op == 0) ? min(a, b) : max(a, b); } d[tidx] = a; } __global__ void BuildHistogram(const float* const d_val, const size_t num_val, unsigned int* const d_bin, const size_t num_bin, const float min_val, const float range) { int tidx = blockIdx.x * blockDim.x + threadIdx.x; if (tidx >= num_val) return; unsigned int bin_idx = min((unsigned int)(num_bin - 1), (unsigned int)((d_val[tidx] - min_val) / range * num_bin)); atomicAdd(&d_bin[bin_idx], 1); } __global__ void ExclusivePrefixSum(const unsigned int* const d_histo, unsigned int* const d_cdf, const size_t num_bin) { __shared__ unsigned int bin[1024]; int tidx = threadIdx.x; bin[tidx] = d_histo[tidx]; __syncthreads(); // Reduce for (size_t w = 2; w <= 1024; w <<= 1) { if (tidx % w == w - 1) bin[tidx] += bin[tidx - w / 2]; __syncthreads(); } // Downsweep if (tidx == 1023) bin[1023] = 0; __syncthreads(); for (size_t w = 1024; w >= 2; w >>= 1) { if (tidx % w == w - 1) { unsigned int s = bin[tidx - w / 2] + bin[tidx]; bin[tidx - w / 2] = bin[tidx]; bin[tidx] = s; } __syncthreads(); } d_cdf[tidx] = bin[tidx]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const size_t MAX_NUM_THREADS_PER_BLOCK = 1024; const size_t numPixels = numRows * numCols; int exponent = 0; while ((1 << (exponent + 1)) < numPixels) ++exponent; float *d_aux; checkCudaErrors(cudaMalloc(&d_aux, sizeof(float) * numPixels)); // Find min_logLum checkCudaErrors(cudaMemcpy(d_aux, d_logLuminance, sizeof(float) * numPixels, cudaMemcpyDeviceToDevice)); for (size_t width = 1 << exponent; width >= 1; width >>= 1) { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((width + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); ReduceOnce<<<gridSize, blockSize>>>(d_aux, numPixels, width, 0); // 0 - min } float h_min_logLum; checkCudaErrors(cudaMemcpy(&h_min_logLum, d_aux, sizeof(float), cudaMemcpyDeviceToHost)); min_logLum = h_min_logLum; // Find max_logLum checkCudaErrors(cudaMemcpy(d_aux, d_logLuminance, sizeof(float) * numPixels, cudaMemcpyDeviceToDevice)); for (size_t width = 1 << exponent; width >= 1; width >>= 1) { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((width + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); ReduceOnce<<<gridSize, blockSize>>>(d_aux, numPixels, width, 1); // 1 - max } float h_max_logLum; checkCudaErrors(cudaMemcpy(&h_max_logLum, d_aux, sizeof(float), cudaMemcpyDeviceToHost)); max_logLum = h_max_logLum; checkCudaErrors(cudaFree(d_aux)); // Find the range float logLumRange = max_logLum - min_logLum; // Build histogram unsigned int *d_histo; checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numBins)); { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize((numPixels + MAX_NUM_THREADS_PER_BLOCK - 1) / MAX_NUM_THREADS_PER_BLOCK); BuildHistogram<<<gridSize, blockSize>>>(d_logLuminance, numPixels, d_histo, numBins, min_logLum, logLumRange); } // Calculate the cumulative distribution { const dim3 blockSize(MAX_NUM_THREADS_PER_BLOCK); const dim3 gridSize(1); // numBins = 1024 = MAX_NUM_THREADS_PER_BLOCK const size_t sharedMemoryBytes = sizeof(unsigned int) * numBins; ExclusivePrefixSum<<<gridSize, blockSize, sharedMemoryBytes>>>(d_histo, d_cdf, numBins); } checkCudaErrors(cudaFree(d_histo)); /**************************************************************************** * You can use the code below to help with debugging, but make sure to * * comment it out again before submitting your assignment for grading, * * otherwise this code will take too much time and make it seem like your * * GPU implementation isn't fast enough. * * * * This code generates a reference cdf on the host by running the * * reference calculation we have given you. It then copies your GPU * * generated cdf back to the host and calls a function that compares the * * the two and will output the first location they differ. * * ************************************************************************* */ /* float *h_logLuminance = new float[numRows * numCols]; unsigned int *h_cdf = new unsigned int[numBins]; unsigned int *h_your_cdf = new unsigned int[numBins]; checkCudaErrors(cudaMemcpy(h_logLuminance, d_logLuminance, numCols * numRows * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost)); referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins); //compare the results of the CDF checkResultsExact(h_cdf, h_your_cdf, numBins); delete[] h_logLuminance; delete[] h_cdf; delete[] h_your_cdf; */ }
e6a3c5656bb0a26976a6d5008b8d1c44465e4bff.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #include "matrix.h" #include "nv_wavenet.cuh" #include "nv_wavenet_util.cuh" #include "nv_wavenet_reference.h" #include <assert.h> #include <stdio.h> #include <vector> // initialize to identity matrix void id(Matrix& mat) { mat.randomize(0,0,100); for (int i = 0; i < ::min(mat.rows(), mat.cols()); i++) { mat.set(i, i, 0.f); } } Matrix* createMatrix(int r, int c) { float mean = 0.0; //float scale = 0.5 / r; float scale = 0.5; Matrix* m = new Matrix(r,c,false); m->randomize(mean,scale); //id(*m); //m->randomize(0,0); return m; } template <typename T_weight, typename T_data, int R, int S, int A> void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool testStreaming, bool inputsFromDevice=false, bool weightsFromDevice=false) { float mean = 0.0; float scale = 0.5 * 1; //float scale = 0.5 / R; // Just encode one-hot vector as an integer std::vector<int> yInPrev(batch_size); std::vector<int> yInCur(batch_size); for (int b=0; b<batch_size; b++) { yInPrev[b] = rand() % A; yInCur[b] = rand() % A; } std::vector<int> yOut(batch_size); Matrix outputSelectors(batch_size,samples_per_iteration); outputSelectors.randomize(0.5,1.0); Matrix embeddingsPrev(R,A,false); Matrix embeddingsCur(R,A,false); embeddingsPrev.randomize(mean,scale); embeddingsCur.randomize(mean,scale); //embeddingsPrev.randomize(0,0); //embeddingsCur.randomize(0,0); std::vector<Matrix*> Wprev(num_layers); std::vector<Matrix*> Wcur(num_layers); std::vector<Matrix*> Bh(num_layers); std::vector<Matrix*> Wres(num_layers); std::vector<Matrix*> Bres(num_layers); std::vector<Matrix*> Wskip(num_layers); std::vector<Matrix*> Bskip(num_layers); std::vector<Matrix*> skipOut(num_layers+1); // Retain results for dilated inputs std::vector<std::vector<Matrix*>> Xt(samples_per_iteration); for (int sample=0; sample<samples_per_iteration; sample++) { Xt[sample].resize(num_layers+1); } for (int l=0; l<num_layers; l++) { // Weights Wprev[l] = createMatrix(2*R,R); Wcur[l] = createMatrix(2*R,R); Bh[l] = createMatrix(2*R,1); Wres[l] = createMatrix(R,R); Bres[l] = createMatrix(R,1); Wskip[l] = createMatrix(S,R); Bskip[l] = createMatrix(S,1); /*id(*Wprev[l]); id(*Wcur[l]); id(*Bh[l]); //id(*Bres[l]); id(*Wskip[l]);*/ //Bres[l]->randomize(0,0); //Wres[l]->randomize(0,0); //Wprev[l]->randomize(0,0); //Wcur[l]->randomize(0,0); // Activations skipOut[l] = createMatrix(S,batch_size); } for (int sample=0; sample<samples_per_iteration; sample++) { for (int layer=0; layer<num_layers+1; layer++) { Xt[sample][layer] = createMatrix(R, batch_size); } } Matrix WskipInit(S,R,false); WskipInit.randomize(mean,scale); //WskipInit.randomize(0,0,100); Matrix BskipInit(S,1,false); BskipInit.randomize(mean, scale); //BskipInit.randomize(0,0,100); //BskipInit.set(S-1,0,-1.f); Matrix WskipOut(S,S,false); WskipOut.randomize(mean,scale); //WskipOut.randomize(0,0); //id(WskipOut); Matrix BskipOut(S,1,false); BskipOut.randomize(mean, scale); //BskipOut.randomize(0, 0); Matrix Wout(A,S,false); Wout.randomize(mean,scale); //Wout.randomize(0,0); Matrix Bout(A,1,false); Bout.randomize(mean,scale); //Bout.randomize(0,0); Matrix skipOutFinal(A,batch_size,false); Matrix out(A,batch_size,false); Matrix p(A,batch_size,false); Matrix zero(S,batch_size,false); for (int row = 0; row < S; row++) { for (int col = 0; col < batch_size; col++) { zero.set(row,col,0.f); } } nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation); nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, testStreaming ? samples_per_iteration/2 : samples_per_iteration, impl); ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data()); for (int l=0; l<num_layers; l++) { ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data()); } ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data(), WskipInit.data(), BskipInit.data()); if (weightsFromDevice) { float* d_embeddingsPrev; float* d_embeddingsCur; gpuErrChk(hipMalloc(&d_embeddingsPrev, R*A*sizeof(float))); gpuErrChk(hipMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_embeddingsCur, R*A*sizeof(float))); gpuErrChk(hipMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), hipMemcpyHostToDevice)); infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur); gpuErrChk(hipFree(d_embeddingsPrev)); gpuErrChk(hipFree(d_embeddingsCur)); float* d_Wprev; float* d_Wcur; float* d_Bh; float* d_Wres; float* d_Bres; float* d_Wskip; float* d_Bskip; for (int l=0; l<num_layers; l++) { gpuErrChk(hipMalloc(&d_Wprev, 2*R*R*sizeof(float))); gpuErrChk(hipMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Wcur, 2*R*R*sizeof(float))); gpuErrChk(hipMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Bh, 2*R*sizeof(float))); gpuErrChk(hipMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Wres, R*R*sizeof(float))); gpuErrChk(hipMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Bres, R*sizeof(float))); gpuErrChk(hipMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Wskip, S*R*sizeof(float))); gpuErrChk(hipMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Bskip, S*sizeof(float))); gpuErrChk(hipMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), hipMemcpyHostToDevice)); infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip); gpuErrChk(hipFree(d_Wprev)); gpuErrChk(hipFree(d_Wcur)); gpuErrChk(hipFree(d_Bh)); gpuErrChk(hipFree(d_Wres)); gpuErrChk(hipFree(d_Bres)); gpuErrChk(hipFree(d_Wskip)); gpuErrChk(hipFree(d_Bskip)); } float* d_WskipInit; float* d_BskipInit; float* d_WskipOut; float* d_BskipOut; float* d_Wout; float* d_Bout; // init gpuErrChk(hipMalloc(&d_WskipInit, S*R*sizeof(float))); gpuErrChk(hipMemcpy(d_WskipInit, WskipInit.data(), S*R*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_BskipInit, S*sizeof(float))); gpuErrChk(hipMemcpy(d_BskipInit, BskipInit.data(), S*sizeof(float), hipMemcpyHostToDevice)); // skip gpuErrChk(hipMalloc(&d_WskipOut, S*S*sizeof(float))); gpuErrChk(hipMemcpy(d_WskipOut, WskipOut.data(), S*S*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_BskipOut, S*sizeof(float))); gpuErrChk(hipMemcpy(d_BskipOut, BskipOut.data(), S*sizeof(float), hipMemcpyHostToDevice)); // out gpuErrChk(hipMalloc(&d_Wout, A*S*sizeof(float))); gpuErrChk(hipMemcpy(d_Wout, Wout.data(), A*S*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMalloc(&d_Bout, A*sizeof(float))); gpuErrChk(hipMemcpy(d_Bout, Bout.data(), A*sizeof(float), hipMemcpyHostToDevice)); infer->setOutWeights(d_WskipInit, d_BskipInit, d_WskipOut, d_BskipOut, d_Wout, d_Bout); gpuErrChk(hipFree(d_WskipOut)); gpuErrChk(hipFree(d_BskipOut)); gpuErrChk(hipFree(d_Wout)); gpuErrChk(hipFree(d_Bout)); } else { infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data()); for (int l=0; l<num_layers; l++) { infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data()); } infer->setOutWeights(WskipInit.data(), BskipInit.data(), WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data()); } Matrix zeroMatrix(R,batch_size,false); for (int row=0; row<R; row++) { for (int col=0; col<batch_size; col++) { zeroMatrix.set(row,col,0.f); } } Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size); assert(Lh.data()); Lh.randomize(mean,scale); //Lh.randomize(0,0,100); Matrix LhFinal(S,samples_per_iteration*batch_size); assert(LhFinal.data()); LhFinal.randomize(mean,scale); //LhFinal.randomize(0,0,100); ref.setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); if (inputsFromDevice) { float* d_Lh; gpuErrChk(hipMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float))); float* d_LhFinal; gpuErrChk(hipMalloc(&d_LhFinal, S*samples_per_iteration*batch_size*sizeof(float))); float* d_outputSelectors; gpuErrChk(hipMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float))); gpuErrChk(hipMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMemcpy(d_LhFinal, LhFinal.data(), S*samples_per_iteration*batch_size*sizeof(float), hipMemcpyHostToDevice)); gpuErrChk(hipMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), hipMemcpyHostToDevice)); infer->setInputs(d_Lh, d_LhFinal, d_outputSelectors); gpuErrChk(hipFree(d_Lh)); gpuErrChk(hipFree(d_LhFinal)); gpuErrChk(hipFree(d_outputSelectors)); } else { infer->setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); } for (int i=0; i<num_iterations; i++) { printf("Iteration: %d\n", i); // Run reference implementation infer->reset(false); int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1; int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int)); int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int)); ref.run(samples_per_iteration, batch_size, refYout); if (!testStreaming) { assert(infer->run(samples_per_iteration, batch_size, mcYout, batch_size_per_block, true)); gpuErrChk(hipDeviceSynchronize()); } else { int* mcYoutTemp = (int*)malloc((samples_per_iteration+1)*batch_size*sizeof(int)); infer->setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); assert(infer->run(samples_per_iteration/2, batch_size, mcYoutTemp, batch_size_per_block, true)); gpuErrChk(hipDeviceSynchronize()); int shift1 = Lh.rows()*Lh.cols()/2; int shift2 = outputSelectors.rows()*outputSelectors.cols()/2; int shift3 = samples_per_iteration*batch_size/2; int shift4 = LhFinal.rows()*LhFinal.cols()/2; infer->setInputs(Lh.data()+shift1, LhFinal.data() + shift4, outputSelectors.data()+shift2); assert(infer->run(samples_per_iteration/2, batch_size, mcYoutTemp+shift3, batch_size_per_block, true)); gpuErrChk(hipDeviceSynchronize()); // mcYoutTemp now has outputs in the form: [run1_batch1_sample_1, ... run1_batch1_sample_N/2, r1_b2_s1,..., r1_bM_sN/2,..., r2_b1_sN/2+1,...] // change order to be by batch, not by batch per call to run() for (int b = 0; b < batch_size; b++) { for (int sample = 0; sample < samples_per_iteration/2; sample++) { mcYout[b*samples_per_iteration + sample] = mcYoutTemp[b*(samples_per_iteration/2) + sample]; mcYout[b*samples_per_iteration + samples_per_iteration/2 + sample] = mcYoutTemp[(b + batch_size)*(samples_per_iteration/2) + sample]; } } free(mcYoutTemp); } // Check results for (int l=0; l<num_layers; l++) { printf("Checking layer %d\n", l); Matrix refXout(R,batch_size); Matrix refSkipOut(S, batch_size); ref.getXtOut(l, refXout.data()); ref.getSkipOut(l, refSkipOut.data()); Matrix mcXout(R,batch_size,false); Matrix mcSkipOut(S,batch_size,false); infer->getXtOut(l, mcXout.data()); infer->getSkipOut(l, mcSkipOut.data()); matrix_compare("Xout", refXout, mcXout, 1.e-3); matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true); } Matrix refSkipOutFinal(S,batch_size); ref.getZs(refSkipOutFinal.data()); Matrix mcSkipOutFinal(S,batch_size,false); infer->getZs(mcSkipOutFinal.data()); matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true); Matrix refOut(A,batch_size); ref.getZa(refOut.data()); Matrix mcOut(A,batch_size,false); infer->getZa(mcOut.data()); matrix_compare("Za", refOut, mcOut, 1.e-4); Matrix refP(A,batch_size); ref.getP(refP.data()); Matrix mcP(A,batch_size,false); infer->getP(mcP.data()); matrix_compare("p",refP,mcP,1.e-3); Matrix refSkip2(S,batch_size); ref.getSkip2(refSkip2.data()); Matrix mcSkip2(S,batch_size,false); infer->getSkipOut(num_layers, mcSkip2.data()); matrix_compare("Skip2", refSkip2, mcSkip2, 1.e-4, true); printf("Comparing yOut\n"); for (int i=0; i<samples_per_iteration*batch_size; i++) { printf("%d ", refYout[i]); assert(refYout[i] == mcYout[i]); printf("\n"); } printf("SUCCESS!\n"); } // Clean up delete infer; for (int l=0; l<num_layers; l++) { delete Wprev[l]; delete Wcur[l]; delete Bh[l]; delete Wres[l]; delete Bres[l]; delete Wskip[l]; delete Bskip[l]; for (int sample=0; sample<samples_per_iteration;sample++) { delete Xt[sample][l]; } delete skipOut[l]; } } int main(int argc, char* argv[]) { int num_layers = 20; int batch_size = 16; int test = 2; int num_samples = 1; bool test_streaming = false; if (argc > 1) num_layers = atoi(argv[1]); if (argc > 2) batch_size = atoi(argv[2]); if (argc > 3) test = atoi(argv[3]); if (argc > 4) num_samples = atoi(argv[4]); if (argc > 5) test_streaming = true; // How many samples to generate each time we invoke the kernel const int SAMPLES_PER_ITERATION = num_samples; const int MAX_DILATION = SAMPLES_PER_ITERATION; if (test_streaming) { assert((num_samples % 2) == 0); } srand(3); /*printf("Testing R=32, S=128\n"); printf(" Testing Single-Block\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1); printf(" Testing Dual-Block\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2); printf(" Testing Persistent\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3); printf("Testing R=64, S=128\n"); printf(" Testing Single-Block\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false); printf(" Testing Dual-Block\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true); printf(" Testing Persistent\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true); printf("Testing R=64, S=256\n"); printf(" Testing Single-Block\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1); printf(" Testing Dual-Block\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2); printf(" Testing Persistent\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3); */ printf("Testing R=128, S=128\n"); switch(test) { case 1: printf(" Testing Single-Block\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, test_streaming); break; case 2: printf(" Testing Dual-Block\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, test_streaming); break; case 3: printf(" Testing Persistent\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, test_streaming); break; } }
e6a3c5656bb0a26976a6d5008b8d1c44465e4bff.cu
/****************************************************************************** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #include "matrix.h" #include "nv_wavenet.cuh" #include "nv_wavenet_util.cuh" #include "nv_wavenet_reference.h" #include <assert.h> #include <stdio.h> #include <vector> // initialize to identity matrix void id(Matrix& mat) { mat.randomize(0,0,100); for (int i = 0; i < std::min(mat.rows(), mat.cols()); i++) { mat.set(i, i, 0.f); } } Matrix* createMatrix(int r, int c) { float mean = 0.0; //float scale = 0.5 / r; float scale = 0.5; Matrix* m = new Matrix(r,c,false); m->randomize(mean,scale); //id(*m); //m->randomize(0,0); return m; } template <typename T_weight, typename T_data, int R, int S, int A> void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool testStreaming, bool inputsFromDevice=false, bool weightsFromDevice=false) { float mean = 0.0; float scale = 0.5 * 1; //float scale = 0.5 / R; // Just encode one-hot vector as an integer std::vector<int> yInPrev(batch_size); std::vector<int> yInCur(batch_size); for (int b=0; b<batch_size; b++) { yInPrev[b] = rand() % A; yInCur[b] = rand() % A; } std::vector<int> yOut(batch_size); Matrix outputSelectors(batch_size,samples_per_iteration); outputSelectors.randomize(0.5,1.0); Matrix embeddingsPrev(R,A,false); Matrix embeddingsCur(R,A,false); embeddingsPrev.randomize(mean,scale); embeddingsCur.randomize(mean,scale); //embeddingsPrev.randomize(0,0); //embeddingsCur.randomize(0,0); std::vector<Matrix*> Wprev(num_layers); std::vector<Matrix*> Wcur(num_layers); std::vector<Matrix*> Bh(num_layers); std::vector<Matrix*> Wres(num_layers); std::vector<Matrix*> Bres(num_layers); std::vector<Matrix*> Wskip(num_layers); std::vector<Matrix*> Bskip(num_layers); std::vector<Matrix*> skipOut(num_layers+1); // Retain results for dilated inputs std::vector<std::vector<Matrix*>> Xt(samples_per_iteration); for (int sample=0; sample<samples_per_iteration; sample++) { Xt[sample].resize(num_layers+1); } for (int l=0; l<num_layers; l++) { // Weights Wprev[l] = createMatrix(2*R,R); Wcur[l] = createMatrix(2*R,R); Bh[l] = createMatrix(2*R,1); Wres[l] = createMatrix(R,R); Bres[l] = createMatrix(R,1); Wskip[l] = createMatrix(S,R); Bskip[l] = createMatrix(S,1); /*id(*Wprev[l]); id(*Wcur[l]); id(*Bh[l]); //id(*Bres[l]); id(*Wskip[l]);*/ //Bres[l]->randomize(0,0); //Wres[l]->randomize(0,0); //Wprev[l]->randomize(0,0); //Wcur[l]->randomize(0,0); // Activations skipOut[l] = createMatrix(S,batch_size); } for (int sample=0; sample<samples_per_iteration; sample++) { for (int layer=0; layer<num_layers+1; layer++) { Xt[sample][layer] = createMatrix(R, batch_size); } } Matrix WskipInit(S,R,false); WskipInit.randomize(mean,scale); //WskipInit.randomize(0,0,100); Matrix BskipInit(S,1,false); BskipInit.randomize(mean, scale); //BskipInit.randomize(0,0,100); //BskipInit.set(S-1,0,-1.f); Matrix WskipOut(S,S,false); WskipOut.randomize(mean,scale); //WskipOut.randomize(0,0); //id(WskipOut); Matrix BskipOut(S,1,false); BskipOut.randomize(mean, scale); //BskipOut.randomize(0, 0); Matrix Wout(A,S,false); Wout.randomize(mean,scale); //Wout.randomize(0,0); Matrix Bout(A,1,false); Bout.randomize(mean,scale); //Bout.randomize(0,0); Matrix skipOutFinal(A,batch_size,false); Matrix out(A,batch_size,false); Matrix p(A,batch_size,false); Matrix zero(S,batch_size,false); for (int row = 0; row < S; row++) { for (int col = 0; col < batch_size; col++) { zero.set(row,col,0.f); } } nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation); nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, testStreaming ? samples_per_iteration/2 : samples_per_iteration, impl); ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data()); for (int l=0; l<num_layers; l++) { ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data()); } ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data(), WskipInit.data(), BskipInit.data()); if (weightsFromDevice) { float* d_embeddingsPrev; float* d_embeddingsCur; gpuErrChk(cudaMalloc(&d_embeddingsPrev, R*A*sizeof(float))); gpuErrChk(cudaMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_embeddingsCur, R*A*sizeof(float))); gpuErrChk(cudaMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), cudaMemcpyHostToDevice)); infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur); gpuErrChk(cudaFree(d_embeddingsPrev)); gpuErrChk(cudaFree(d_embeddingsCur)); float* d_Wprev; float* d_Wcur; float* d_Bh; float* d_Wres; float* d_Bres; float* d_Wskip; float* d_Bskip; for (int l=0; l<num_layers; l++) { gpuErrChk(cudaMalloc(&d_Wprev, 2*R*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Wcur, 2*R*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Bh, 2*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Wres, R*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Bres, R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Wskip, S*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Bskip, S*sizeof(float))); gpuErrChk(cudaMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), cudaMemcpyHostToDevice)); infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip); gpuErrChk(cudaFree(d_Wprev)); gpuErrChk(cudaFree(d_Wcur)); gpuErrChk(cudaFree(d_Bh)); gpuErrChk(cudaFree(d_Wres)); gpuErrChk(cudaFree(d_Bres)); gpuErrChk(cudaFree(d_Wskip)); gpuErrChk(cudaFree(d_Bskip)); } float* d_WskipInit; float* d_BskipInit; float* d_WskipOut; float* d_BskipOut; float* d_Wout; float* d_Bout; // init gpuErrChk(cudaMalloc(&d_WskipInit, S*R*sizeof(float))); gpuErrChk(cudaMemcpy(d_WskipInit, WskipInit.data(), S*R*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_BskipInit, S*sizeof(float))); gpuErrChk(cudaMemcpy(d_BskipInit, BskipInit.data(), S*sizeof(float), cudaMemcpyHostToDevice)); // skip gpuErrChk(cudaMalloc(&d_WskipOut, S*S*sizeof(float))); gpuErrChk(cudaMemcpy(d_WskipOut, WskipOut.data(), S*S*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_BskipOut, S*sizeof(float))); gpuErrChk(cudaMemcpy(d_BskipOut, BskipOut.data(), S*sizeof(float), cudaMemcpyHostToDevice)); // out gpuErrChk(cudaMalloc(&d_Wout, A*S*sizeof(float))); gpuErrChk(cudaMemcpy(d_Wout, Wout.data(), A*S*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMalloc(&d_Bout, A*sizeof(float))); gpuErrChk(cudaMemcpy(d_Bout, Bout.data(), A*sizeof(float), cudaMemcpyHostToDevice)); infer->setOutWeights(d_WskipInit, d_BskipInit, d_WskipOut, d_BskipOut, d_Wout, d_Bout); gpuErrChk(cudaFree(d_WskipOut)); gpuErrChk(cudaFree(d_BskipOut)); gpuErrChk(cudaFree(d_Wout)); gpuErrChk(cudaFree(d_Bout)); } else { infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data()); for (int l=0; l<num_layers; l++) { infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data()); } infer->setOutWeights(WskipInit.data(), BskipInit.data(), WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data()); } Matrix zeroMatrix(R,batch_size,false); for (int row=0; row<R; row++) { for (int col=0; col<batch_size; col++) { zeroMatrix.set(row,col,0.f); } } Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size); assert(Lh.data()); Lh.randomize(mean,scale); //Lh.randomize(0,0,100); Matrix LhFinal(S,samples_per_iteration*batch_size); assert(LhFinal.data()); LhFinal.randomize(mean,scale); //LhFinal.randomize(0,0,100); ref.setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); if (inputsFromDevice) { float* d_Lh; gpuErrChk(cudaMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float))); float* d_LhFinal; gpuErrChk(cudaMalloc(&d_LhFinal, S*samples_per_iteration*batch_size*sizeof(float))); float* d_outputSelectors; gpuErrChk(cudaMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float))); gpuErrChk(cudaMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMemcpy(d_LhFinal, LhFinal.data(), S*samples_per_iteration*batch_size*sizeof(float), cudaMemcpyHostToDevice)); gpuErrChk(cudaMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), cudaMemcpyHostToDevice)); infer->setInputs(d_Lh, d_LhFinal, d_outputSelectors); gpuErrChk(cudaFree(d_Lh)); gpuErrChk(cudaFree(d_LhFinal)); gpuErrChk(cudaFree(d_outputSelectors)); } else { infer->setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); } for (int i=0; i<num_iterations; i++) { printf("Iteration: %d\n", i); // Run reference implementation infer->reset(false); int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1; int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int)); int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int)); ref.run(samples_per_iteration, batch_size, refYout); if (!testStreaming) { assert(infer->run(samples_per_iteration, batch_size, mcYout, batch_size_per_block, true)); gpuErrChk(cudaDeviceSynchronize()); } else { int* mcYoutTemp = (int*)malloc((samples_per_iteration+1)*batch_size*sizeof(int)); infer->setInputs(Lh.data(), LhFinal.data(), outputSelectors.data()); assert(infer->run(samples_per_iteration/2, batch_size, mcYoutTemp, batch_size_per_block, true)); gpuErrChk(cudaDeviceSynchronize()); int shift1 = Lh.rows()*Lh.cols()/2; int shift2 = outputSelectors.rows()*outputSelectors.cols()/2; int shift3 = samples_per_iteration*batch_size/2; int shift4 = LhFinal.rows()*LhFinal.cols()/2; infer->setInputs(Lh.data()+shift1, LhFinal.data() + shift4, outputSelectors.data()+shift2); assert(infer->run(samples_per_iteration/2, batch_size, mcYoutTemp+shift3, batch_size_per_block, true)); gpuErrChk(cudaDeviceSynchronize()); // mcYoutTemp now has outputs in the form: [run1_batch1_sample_1, ... run1_batch1_sample_N/2, r1_b2_s1,..., r1_bM_sN/2,..., r2_b1_sN/2+1,...] // change order to be by batch, not by batch per call to run() for (int b = 0; b < batch_size; b++) { for (int sample = 0; sample < samples_per_iteration/2; sample++) { mcYout[b*samples_per_iteration + sample] = mcYoutTemp[b*(samples_per_iteration/2) + sample]; mcYout[b*samples_per_iteration + samples_per_iteration/2 + sample] = mcYoutTemp[(b + batch_size)*(samples_per_iteration/2) + sample]; } } free(mcYoutTemp); } // Check results for (int l=0; l<num_layers; l++) { printf("Checking layer %d\n", l); Matrix refXout(R,batch_size); Matrix refSkipOut(S, batch_size); ref.getXtOut(l, refXout.data()); ref.getSkipOut(l, refSkipOut.data()); Matrix mcXout(R,batch_size,false); Matrix mcSkipOut(S,batch_size,false); infer->getXtOut(l, mcXout.data()); infer->getSkipOut(l, mcSkipOut.data()); matrix_compare("Xout", refXout, mcXout, 1.e-3); matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true); } Matrix refSkipOutFinal(S,batch_size); ref.getZs(refSkipOutFinal.data()); Matrix mcSkipOutFinal(S,batch_size,false); infer->getZs(mcSkipOutFinal.data()); matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true); Matrix refOut(A,batch_size); ref.getZa(refOut.data()); Matrix mcOut(A,batch_size,false); infer->getZa(mcOut.data()); matrix_compare("Za", refOut, mcOut, 1.e-4); Matrix refP(A,batch_size); ref.getP(refP.data()); Matrix mcP(A,batch_size,false); infer->getP(mcP.data()); matrix_compare("p",refP,mcP,1.e-3); Matrix refSkip2(S,batch_size); ref.getSkip2(refSkip2.data()); Matrix mcSkip2(S,batch_size,false); infer->getSkipOut(num_layers, mcSkip2.data()); matrix_compare("Skip2", refSkip2, mcSkip2, 1.e-4, true); printf("Comparing yOut\n"); for (int i=0; i<samples_per_iteration*batch_size; i++) { printf("%d ", refYout[i]); assert(refYout[i] == mcYout[i]); printf("\n"); } printf("SUCCESS!\n"); } // Clean up delete infer; for (int l=0; l<num_layers; l++) { delete Wprev[l]; delete Wcur[l]; delete Bh[l]; delete Wres[l]; delete Bres[l]; delete Wskip[l]; delete Bskip[l]; for (int sample=0; sample<samples_per_iteration;sample++) { delete Xt[sample][l]; } delete skipOut[l]; } } int main(int argc, char* argv[]) { int num_layers = 20; int batch_size = 16; int test = 2; int num_samples = 1; bool test_streaming = false; if (argc > 1) num_layers = atoi(argv[1]); if (argc > 2) batch_size = atoi(argv[2]); if (argc > 3) test = atoi(argv[3]); if (argc > 4) num_samples = atoi(argv[4]); if (argc > 5) test_streaming = true; // How many samples to generate each time we invoke the kernel const int SAMPLES_PER_ITERATION = num_samples; const int MAX_DILATION = SAMPLES_PER_ITERATION; if (test_streaming) { assert((num_samples % 2) == 0); } srand(3); /*printf("Testing R=32, S=128\n"); printf(" Testing Single-Block\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1); printf(" Testing Dual-Block\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2); printf(" Testing Persistent\n"); runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3); printf("Testing R=64, S=128\n"); printf(" Testing Single-Block\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false); printf(" Testing Dual-Block\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true); printf(" Testing Persistent\n"); runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true); printf("Testing R=64, S=256\n"); printf(" Testing Single-Block\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1); printf(" Testing Dual-Block\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2); printf(" Testing Persistent\n"); runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3); */ printf("Testing R=128, S=128\n"); switch(test) { case 1: printf(" Testing Single-Block\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, test_streaming); break; case 2: printf(" Testing Dual-Block\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, test_streaming); break; case 3: printf(" Testing Persistent\n"); runTest<float,float,128,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, test_streaming); break; } }
d518838c5d936cdf07e45c04d7d619233058aaa6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // __device__ - GPU // __global__ - GPU // __host__ - CPU __global__ void add( int a, int b, int *c) { *c = a + b; } int main() { int c; int *dev_c; hipMalloc( (void**)&dev_c, sizeof(int)); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 1, 2, dev_c); hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost); printf("%i\n", c); hipFree(dev_c); return 0; }
d518838c5d936cdf07e45c04d7d619233058aaa6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // __device__ - GPU // __global__ - GPU // __host__ - CPU __global__ void add( int a, int b, int *c) { *c = a + b; } int main() { int c; int *dev_c; cudaMalloc( (void**)&dev_c, sizeof(int)); add<<<1,1>>> (1, 2, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%i\n", c); cudaFree(dev_c); return 0; }
2177f84164b32d160294daa095568caa9605d533.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * jacobi2D.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #define POLYBENCH_TIME 1 #include "../../../utilities/remapping.h" #include "../../../utilities/remapping_mode.h" #include "jacobi2D.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { A[i][j] = ((DATA_TYPE) i*(j+2) + 10) / N; B[i][j] = ((DATA_TYPE) (i-4)*(j-1) + 11) / N; } } } void runJacobi2DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i = 1; i < _PB_N - 1; i++) { for (int j = 1; j < _PB_N - 1; j++) { B[i][j] = 0.2f * (A[i][j] + A[i][(j-1)] + A[i][(1+j)] + A[(1+i)][j] + A[(i-1)][j]); } } for (int i = 1; i < _PB_N-1; i++) { for (int j = 1; j < _PB_N-1; j++) { A[i][j] = B[i][j]; } } } } __global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE); int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE); if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1))) { B[i*N + j] = 0.2f * (A[i*N + j] + A[i*N + (j-1)] + A[i*N + (1 + j)] + A[(1 + i)*N + j] + A[(i-1)*N + j]); } } __global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE); int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE); if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1))) { A[i*N + j] = B[i*N + j]; } } void compareResults(int n, DATA_TYPE POLYBENCH_2D(a,N,N,n,n), DATA_TYPE POLYBENCH_2D(a_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(b,N,N,n,n), DATA_TYPE POLYBENCH_2D(b_outputFromGpu,N,N,n,n)) { int i, j, fail; fail = 0; // Compare output from CPU and GPU for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(a[i][j], a_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(b[i][j], b_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void runJacobi2DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(A_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n)) { DATA_TYPE* Agpu; DATA_TYPE* Bgpu; hipMalloc(&Agpu, N * N * sizeof(DATA_TYPE)); hipMalloc(&Bgpu, N * N * sizeof(DATA_TYPE)); hipMemcpy(Agpu, A, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMemcpy(Bgpu, B, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), (unsigned int)ceil( ((float)N) / ((float)block.y) )); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS; t++) { hipLaunchKernelGGL(( runJacobiCUDA_kernel1), dim3(grid),dim3(block), 0, 0, n, Agpu, Bgpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( runJacobiCUDA_kernel2), dim3(grid),dim3(block), 0, 0, n, Agpu, Bgpu); hipDeviceSynchronize(); } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; hipMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost); hipMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost); hipFree(Agpu); hipFree(Bgpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]); if ((i * n + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; POLYBENCH_2D_ARRAY_DECL(a,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(b,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,N,n,n); init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); runJacobi2DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; runJacobi2DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(a); POLYBENCH_FREE_ARRAY(a_outputFromGpu); POLYBENCH_FREE_ARRAY(b); POLYBENCH_FREE_ARRAY(b_outputFromGpu); return 0; } #include <polybench.c>
2177f84164b32d160294daa095568caa9605d533.cu
/** * jacobi2D.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #define POLYBENCH_TIME 1 #include "../../../utilities/remapping.h" #include "../../../utilities/remapping_mode.h" #include "jacobi2D.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { A[i][j] = ((DATA_TYPE) i*(j+2) + 10) / N; B[i][j] = ((DATA_TYPE) (i-4)*(j-1) + 11) / N; } } } void runJacobi2DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i = 1; i < _PB_N - 1; i++) { for (int j = 1; j < _PB_N - 1; j++) { B[i][j] = 0.2f * (A[i][j] + A[i][(j-1)] + A[i][(1+j)] + A[(1+i)][j] + A[(i-1)][j]); } } for (int i = 1; i < _PB_N-1; i++) { for (int j = 1; j < _PB_N-1; j++) { A[i][j] = B[i][j]; } } } } __global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE); int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE); if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1))) { B[i*N + j] = 0.2f * (A[i*N + j] + A[i*N + (j-1)] + A[i*N + (1 + j)] + A[(1 + i)*N + j] + A[(i-1)*N + j]); } } __global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE); int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE); if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1))) { A[i*N + j] = B[i*N + j]; } } void compareResults(int n, DATA_TYPE POLYBENCH_2D(a,N,N,n,n), DATA_TYPE POLYBENCH_2D(a_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(b,N,N,n,n), DATA_TYPE POLYBENCH_2D(b_outputFromGpu,N,N,n,n)) { int i, j, fail; fail = 0; // Compare output from CPU and GPU for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(a[i][j], a_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(b[i][j], b_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void runJacobi2DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(A_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n)) { DATA_TYPE* Agpu; DATA_TYPE* Bgpu; cudaMalloc(&Agpu, N * N * sizeof(DATA_TYPE)); cudaMalloc(&Bgpu, N * N * sizeof(DATA_TYPE)); cudaMemcpy(Agpu, A, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMemcpy(Bgpu, B, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), (unsigned int)ceil( ((float)N) / ((float)block.y) )); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS; t++) { runJacobiCUDA_kernel1<<<grid,block>>>(n, Agpu, Bgpu); cudaThreadSynchronize(); runJacobiCUDA_kernel2<<<grid,block>>>(n, Agpu, Bgpu); cudaThreadSynchronize(); } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; cudaMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost); cudaMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost); cudaFree(Agpu); cudaFree(Bgpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]); if ((i * n + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; POLYBENCH_2D_ARRAY_DECL(a,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(b,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,N,n,n); init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); runJacobi2DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; runJacobi2DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(a); POLYBENCH_FREE_ARRAY(a_outputFromGpu); POLYBENCH_FREE_ARRAY(b); POLYBENCH_FREE_ARRAY(b_outputFromGpu); return 0; } #include <polybench.c>
4df30476e3cb27279685df2dbecef3647b015f11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaRTCommon.h" #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> #include "conffilehelper.h" #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 5 #define PATHSTREAM_SIZE 1E4*64 #define LIGHTRAY_BOUND_MAX 5 #define LIGHTVERTEX_N 640 namespace cudaRTBDPTStreamAdapCheatSort { const char* g_enumAdapModeName[] = {"PDF", "Const"}; NPAttrHelper::Attrib g_enumAdapMode("Adaptive Mode", g_enumAdapModeName, 2, 0); NPAttrHelper::Attrib g_uiDesiredMaxAdaptiveSampling("DesiredMaxAdaptiveSampling", 5); NPAttrHelper::Attrib g_fMinTraceProb("MinTraceProb", 0.f); NPAttrHelper::Attrib g_uiDesiredTraceTimes("DesiredTraceTimes", 5); const char* g_enumDebugModeName[] = { "None", "Traced", "Prob", "Prob With Limit" }; NPAttrHelper::Attrib g_enumDebugMode("Debug Mode", g_enumDebugModeName, 4, 0); CUDA_RT_COMMON_ATTRIBS_N(5) CUDA_RT_COMMON_ATTRIBS_BGN CUDA_RT_COMMON_ATTRIB_DECLARE(0, Adaptive Mode, g_enumAdapMode) CUDA_RT_COMMON_ATTRIB_DECLARE(1, Desired Max Sampling, g_uiDesiredMaxAdaptiveSampling) CUDA_RT_COMMON_ATTRIB_DECLARE(2, Min Trace Probability, g_fMinTraceProb) CUDA_RT_COMMON_ATTRIB_DECLARE(3, Desired Trace Time, g_uiDesiredTraceTimes) CUDA_RT_COMMON_ATTRIB_DECLARE(4, Debug Mode, g_enumDebugMode) CUDA_RT_COMMON_ATTRIBS_END float* g_fConvergedResult = nullptr; struct LightVertex { float3 pos; float3 norm; float3 irrad; float3 irradDir; float3 diff; float3 emissive; float specular; float metallic; float roughness; float pathPotential; __hd__ LightVertex() { pos = norm = irrad = irradDir = make_float3(0.f, 0.f, 0.f); pathPotential = 1.f; } }; LightVertex* g_devLightVertices = nullptr; uint g_uLightVerticesSize = 0; uint* g_devLightTri = nullptr; uint g_lightTriN = 0; void freeLightPathMem() { g_uLightVerticesSize = 0; g_lightTriN = 0; CUFREE(g_devLightVertices); CUFREE(g_devLightTri); } void allocateLightPathMem() { HANDLE_ERROR(hipMalloc((void**)&g_devLightVertices, sizeof(LightVertex) * LIGHTVERTEX_N)); HANDLE_ERROR(hipMemset((void*)g_devLightVertices, 0, sizeof(LightVertex) * LIGHTVERTEX_N)); } void updateLightTriCudaMem(RTScene* scene) { g_lightTriN = 0; CUFREE(g_devLightTri); std::vector<uint> lightTri; for (uint i = 0; i < scene->m_pTriangles.size(); i++) { if (NPMathHelper::Vec3::length(scene->m_pMaterials[scene->m_pTriangles[i].matInd].emissive) > 0.f) lightTri.push_back(i); } uint* tempLightTri = new uint[lightTri.size()]; for (uint i = 0; i < lightTri.size(); i++) { tempLightTri[i] = lightTri[i]; } g_lightTriN = lightTri.size(); HANDLE_ERROR(hipMalloc((void**)&g_devLightTri, sizeof(uint) * g_lightTriN)); HANDLE_ERROR(hipMemcpy(g_devLightTri, tempLightTri, sizeof(uint) * g_lightTriN, hipMemcpyHostToDevice)); DEL_ARRAY(tempLightTri); } enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2, RAYTYPE_LIGHT = 3 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float3 pathSample; float3 pathAccumSample; uint pathSampleN; uint pathSampleDepth; hiprandState_t randState; // for connecting light path float3 pathInMulTerm; float3 pathInDir; float3 origNorm; float3 origDiff; float origMetallic; float origRoughness; float origSpecular; float origTrans; // for adaptive sampling float pathPotential; float pathAccumPotential; __device__ PTPathVertex() : isTerminated(true) , pathPixel(make_uint2(0,0)) , pathOutDir(make_float3(0.f, 1.f, 0.f)) , pathVertexPos(make_float3(0.f, 0.f, 0.f)) , pathOutMulTerm(make_float3(1.f, 1.f, 1.f)) , pathType(RAYTYPE_EYE) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState() , pathInMulTerm(make_float3(0.f, 0.f, 0.f)) , pathInDir(make_float3(0.f, 0.f, 0.f)) , origNorm(make_float3(0.f, 1.f, 0.f)) , origDiff(make_float3(0.f, 0.f, 0.f)) , origMetallic(0.f) , origRoughness(0.f) , origSpecular(0.f) , origTrans(0.f) , pathPotential(1.f) , pathAccumPotential(0.f) {} __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, hiprandState_t _randState) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f, 1.f, 1.f)) , pathType(_pathType) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState(_randState) , pathInMulTerm(make_float3(0.f, 0.f, 0.f)) , pathInDir(make_float3(0.f, 0.f, 0.f)) , origNorm(make_float3(0.f, 1.f, 0.f)) , origDiff(make_float3(0.f, 0.f, 0.f)) , origMetallic(0.f) , origRoughness(0.f) , origSpecular(0.f) , origTrans(0.f) , pathPotential(1.f) , pathAccumPotential(0.f) {} }; uint* g_devTempPathQueue = nullptr; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; PTPathVertex** g_devEyeLightConPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devTempPathQueue); CUFREE(g_devPathQueue); CUFREE(g_devPathStream); CUFREE(g_devEyeLightConPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(hipMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMalloc((void**)&g_devTempPathQueue, sizeof(uint) * g_uPathQueueSize * 2)); HANDLE_ERROR(hipMemset((void*)g_devTempPathQueue, 0, sizeof(uint) * g_uPathQueueSize * 2)); HANDLE_ERROR(hipMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(hipMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(hipMalloc((void**)&g_devEyeLightConPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(hipMemset((void*)g_devEyeLightConPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); } float* g_devResultData = nullptr; float* g_devAccResultData = nullptr; float* g_devResultVarKeyData = nullptr; uint* g_devPixelVarData = nullptr; float* g_devConvergedData = nullptr; uint* g_devSampleResultN = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceLight_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize, LightVertex* lightVertices, uint curLightVerticesSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; lightVertices[curLightVerticesSize + x].irrad = procVertex->pathSample; lightVertices[curLightVerticesSize + x].irradDir = -1 * ray.dir; lightVertices[curLightVerticesSize + x].norm = nl; lightVertices[curLightVerticesSize + x].pos = triPos; lightVertices[curLightVerticesSize + x].diff = diff; lightVertices[curLightVerticesSize + x].emissive = emissive; lightVertices[curLightVerticesSize + x].specular = specular; lightVertices[curLightVerticesSize + x].metallic = metallic; lightVertices[curLightVerticesSize + x].roughness = roughness; lightVertices[curLightVerticesSize + x].pathPotential = procVertex->pathPotential; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // PDF float NoH = vecDot(nl, hDir); float VoH = vecDot(-1 * ray.dir, hDir); float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;//refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; pdf *= reflProb; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; pdf *= (refrProb * (1.f - reflProb)); } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState); float r2cos = sqrtf(hiprand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl); } } procVertex->pathSample = emissive + vecMul(procVertex->pathSample, lightMulTerm); procVertex->pathPotential *= pdf; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if ((procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) || length(emissive) > 0.f) pixelContrib = 0.f; if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { lightVertices[curLightVerticesSize + x] = lightVertices[procVertex->pathPixel.x]; procVertex->isTerminated = true; } } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // PDF float NoH = vecDot(nl, hDir); float VoH = vecDot(-1 * ray.dir, hDir); float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;//refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; pdf *= reflProb; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; pdf *= (refrProb * (1.f - reflProb)); } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState); float r2cos = sqrtf(hiprand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl); } } procVertex->pathSample = procVertex->pathSample + vecMul(emissive , procVertex->pathOutMulTerm); procVertex->origDiff = diff; procVertex->pathInDir = -1 * ray.dir; procVertex->origNorm = nl; procVertex->origRoughness = roughness; procVertex->origMetallic = metallic; procVertex->origSpecular = specular; procVertex->origTrans = trans; procVertex->pathInMulTerm = procVertex->pathOutMulTerm; procVertex->pathPotential *= pdf; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if ((procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) || length(emissive) > 0.f) pixelContrib = 0.f; if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential; procVertex->pathSampleN++; procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathSampleDepth++; } procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; } } else { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential; procVertex->pathSampleN++; procVertex->isTerminated = true; } } __global__ void pt_genLightPathQueue_kernel(uint32 frameN, uint32 hashedFrameN, uint* lightTri, uint lightTriN, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex* pathQueue, uint pathQueueCap, LightVertex* lightVertices, uint curLightVerticesSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x > pathQueueCap) return; hiprandState_t randstate; hiprand_init(hashedFrameN + x, 0, 0, &randstate); uint lightSourceId = hiprand_uniform(&randstate) * lightTriN; float lightW = hiprand_uniform(&randstate); float lightU = hiprand_uniform(&randstate); if (lightW + lightU > 1.0f) { lightW = 1.f - lightW; lightU = 1.f - lightU; } float lightV = 1.f - lightW - lightU; uint triId = lightTri[lightSourceId]; RTTriangle* tri = &triangles[triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * lightW + uv1 * lightU + uv2 * lightV; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 triNorm = n0 * lightW + n1 * lightU + n2 * lightV; float3 triPos = V32F3(v0->pos) * lightW + V32F3(v1->pos) * lightU + V32F3(v2->pos) * lightV; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, triNorm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 w = triNorm; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * hiprand_uniform(&randstate); float r2cos = sqrtf(hiprand_uniform(&randstate)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); pathQueue[x] = PTPathVertex(false, make_uint2(curLightVerticesSize + x, 0), diffDir , triPos + M_FLT_BIAS_EPSILON * triNorm, RAYTYPE_LIGHT, randstate); pathQueue[x].pathSample = emissive; lightVertices[curLightVerticesSize + x].irrad = emissive; lightVertices[curLightVerticesSize + x].irradDir = make_float3(0.f, 0.f, 0.f); lightVertices[curLightVerticesSize + x].norm = triNorm; lightVertices[curLightVerticesSize + x].pos = triPos; lightVertices[curLightVerticesSize + x].diff = diff; lightVertices[curLightVerticesSize + x].emissive = emissive; lightVertices[curLightVerticesSize + x].specular = specular; lightVertices[curLightVerticesSize + x].metallic = metallic; lightVertices[curLightVerticesSize + x].roughness = roughness; } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); hiprandState_t randstate; hiprand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x,y), dir, camPos, RAYTYPE_EYE, randstate); } __global__ void pt_fillTempAdapPathQueue_kernel(uint* pathQueue, uint fillSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= fillSize) return; pathQueue[ind] = ind; } __global__ void pt_genTempAdapPathQueue_kernel(float width, float height, uint32 hashedFrameN, uint32 seedoffset , float* genChance, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); hiprandState_t randstate; hiprand_init(hashedFrameN + ind + seedoffset, 0, 0, &randstate); pathQueue[ind] = x + y * width; //float modChance = 1.f - expf(-genChance[ind]); if (hiprand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb)) { pathQueue[ind] = 0 - 1; } } __global__ void pt_genTempAdapPathQueueByKey_kernel(uint size, uint32 hashedFrameN, uint32 seedoffset, uint genSize, float* genChance, uint* genChancePixel, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= size) return; uint genInd = size - 1 - x % genSize; //hiprandState_t randstate; //hiprand_init(hashedFrameN + x + seedoffset, 0, 0, &randstate); uint ind = genChancePixel[genInd]; pathQueue[x] = ind; //float modChance = 1.f - expf(-genChance[ind]); //if (hiprand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb)) //{ // pathQueue[x] = 0 - 1; //} } __global__ void pt_convTempPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, uint* tempPathQueue, uint tempPathQueueSize, PTPathVertex* pathQueue) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= tempPathQueueSize) return; uint pathInd = tempPathQueue[ind]; uint y = pathInd / width; uint x = pathInd - y * width; float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); hiprandState_t randstate; hiprand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate); } __device__ float3 GetShadingResult(const float3& lightOutDir, const float3& lightInDir, const float3& lightInIrrad, const float3& norm, const float3& diff, const float metallic, const float roughness, const float specular, const float2 diffspec) { if (vecDot(norm, lightInDir) <= 0.f) return make_float3(0.f, 0.f, 0.f); float3 h = normalize(lightOutDir + lightInDir); float voH = vecDot(lightOutDir, h); float noV = vecDot(norm, lightOutDir); float noH = vecDot(norm, h); float noL = vecDot(norm, lightInDir); float3 f0 = vecLerp(0.08f * specular * make_float3(1.f, 1.f, 1.f), diff, metallic); float3 brdf_f = Fresnel(f0, voH); //float g = GeometricVisibility(roughness, noV, noL, voH); float d = D_GGX(roughness, noH); float v = Vis_SmithJointApprox(roughness, noV, noL); // Microfacet specular = D*G*F / (4*NoL*NoV) float3 specIrrad = d*v*brdf_f;// vecMul(d*g*brdf_f / (4.f * noV), lightInIrrad); float3 diffIrrad = vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), Diffuse(diff, roughness, noV, noL, voH));//vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), diff / M_PI); return vecMul(lightInIrrad*noL, diffspec.y*specIrrad + diffspec.x*diffIrrad); } __device__ void GetLightFromRandLightVertices(float3 pos, float3 norm, LightVertex* lightVertices, uint lightVerticesSize, hiprandState_t* randstate, float3& irrad, float3& irradDir, float& pathPotential) { //LightVertex dummy; //dummy.diff = make_float3(1.f, 1.f, 1.f); //dummy.irrad = make_float3(1.f, 0.f, 0.f); //dummy.pos = make_float3(0.f, 0.f, 0.f); //dummy.norm = dummy.irradDir = normalize(pos - dummy.pos); //dummy.roughness = 0.5f; //dummy.specular = 0.5f; //dummy.metallic = 0.f; irrad = make_float3(0.f, 0.f, 0.f); uint lightVert = hiprand_uniform(randstate) * lightVerticesSize; LightVertex* lightVertex = &lightVertices[lightVert]; float3 toLightVertexDir = normalize(lightVertex->pos - pos); float toLightVertexDist = length(lightVertex->pos - pos); CURay toLightVertex(pos, toLightVertexDir); TracePrimitiveResult traceResult; if (length(lightVertex->irrad) > 0.f && vecDot(norm, toLightVertexDir) > 0.f && !TracePrimitive(toLightVertex, traceResult, toLightVertexDist - M_FLT_BIAS_EPSILON, M_FLT_BIAS_EPSILON, false)) { if (length(lightVertex->irradDir) > M_FLT_EPSILON) irrad = GetShadingResult(-1 * toLightVertexDir, lightVertex->irradDir, lightVertex->irrad, lightVertex->norm , lightVertex->diff, lightVertex->metallic, lightVertex->roughness, lightVertex->specular, make_float2(1.f, 1.f)) + lightVertex->emissive; else irrad = lightVertex->irrad; irradDir = toLightVertexDir; pathPotential = lightVertex->pathPotential; } } __global__ void pt_connectEyeLightPath_kernel(PTPathVertex** eyeStream, uint eyeStreamSize, LightVertex* lightVertices, uint lightVerticesSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= eyeStreamSize) return; PTPathVertex* eyePath = eyeStream[ind]; float3 lightFromLightVertex = make_float3(0.f, 0.f, 0.f); float3 toLightVertexDir = make_float3(0.f, 0.f, 0.f); float lightPathPotential = 1.f; GetLightFromRandLightVertices(eyePath->pathVertexPos + eyePath->origNorm * M_FLT_BIAS_EPSILON, eyePath->origNorm , lightVertices, lightVerticesSize, &eyePath->randState, lightFromLightVertex, toLightVertexDir, lightPathPotential); float3 lightContribFromLightVertex = vecMax(make_float3(0.f, 0.f, 0.f) , GetShadingResult(eyePath->pathInDir, toLightVertexDir, lightFromLightVertex, eyePath->origNorm , eyePath->origDiff, eyePath->origMetallic, eyePath->origRoughness, eyePath->origSpecular , make_float2(1.f - eyePath->origTrans, 1.f))); if (length(lightContribFromLightVertex) > 0.f) { eyePath->pathAccumSample = eyePath->pathAccumSample + vecMul(lightContribFromLightVertex, eyePath->pathInMulTerm); eyePath->pathSampleN += 4; eyePath->pathPotential *= lightPathPotential; } } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize, uint assignableSlot) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind < assignableSlot) { int pathStreamInd = pathStreamSize + ind; int pathQueueInd = pathQueueCur + ind; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[pathStreamInd] = assignSample; } } __global__ void pt_applyPixelProbToResult_kernel(uint width, uint height, float* result, float* varResult, float minProb = 0.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); result[ind * 3] = result[ind * 3 + 1] = result[ind * 3 + 2] = fmaxf(minProb, varResult[ind]); } __global__ void pt_debugTracedPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult, float* varResult, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; if (!frameN) { sampleResultN[ind] = 0; } uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN; float3 sampleResult = make_float3(1.f,1.f,1.f); float potentialResult = 1.f - pathQueue[x].pathAccumPotential; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[ind] * resultInf; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); //varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f); sampleResultN[ind] = tempNextSampleResultN; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult, float* varResult, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; if (!frameN) { sampleResultN[ind] = 0; } uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN; if (tempNextSampleResultN > sampleResultN[ind]) { float3 sampleResult = pathQueue[x].pathAccumSample; float potentialResult = 1.f - pathQueue[x].pathAccumPotential; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[ind] * resultInf; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); //varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f); sampleResultN[ind] = tempNextSampleResultN; } } } __global__ void pt_calculateSquareError_kernel(float* correctData, float* sampleData, float* resultData, uint* resultPixel, uint dataSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= dataSize) return; resultData[x] = /*fminf(*/((correctData[x * 3] - sampleData[x * 3]) * (correctData[x * 3] - sampleData[x * 3]) + (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) * (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) + (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) * (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) ) / 3.f/*, 1.f)*/; resultPixel[x] = x; } void CleanMem() { freeLightPathMem(); freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devConvergedData); CUFREE(g_devSampleResultN); CUFREE(g_devPixelVarData); CUFREE(g_devResultVarKeyData); CUFREE(g_devResultData); CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_temppathqueue_terminated { __hd__ bool operator()(const uint& vert) { return (vert+1 == 0); } }; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; struct is_connectToLightPath { __hd__ bool operator()(const PTPathVertex* vert) { return vert->pathType == RAYTYPE_DIFF; } }; void TracePathQueue(uint pathQueueSize) { dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < pathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; int assignableStreamSlot = min((uint)PATHSTREAM_SIZE - activePathStreamSize, pathQueueSize - g_uPathQueueCur); if (assignableStreamSlot > 0) pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur , pathQueueSize, assignableStreamSlot); //readjust activePathStreamSize activePathStreamSize += assignableStreamSlot; g_uPathQueueCur += assignableStreamSlot; //tracing process pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; //gen connectionpathstream PTPathVertex** conPathStreamEndItr = thrust::copy_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, g_devEyeLightConPathStream, is_connectToLightPath()); uint activeConPathStreamSize = conPathStreamEndItr - g_devEyeLightConPathStream; //connect eye and light path stream if (activeConPathStreamSize > 0) { pt_connectEyeLightPath_kernel << < dim3(ceil((float)activeConPathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devEyeLightConPathStream, activeConPathStreamSize, g_devLightVertices, g_uLightVerticesSize); } } } bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid() || !g_fConvergedResult) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); allocateLightPathMem(); updateLightTriCudaMem(scene); size_t mem_tot; size_t mem_free; hipMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); updateLightTriCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height) || !g_devConvergedData || !g_devPixelVarData) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); HANDLE_ERROR(hipMalloc((void**)&g_devResultData, g_resultDataSize)); CUFREE(g_devAccResultData); HANDLE_ERROR(hipMalloc((void**)&g_devAccResultData, g_resultDataSize)); CUFREE(g_devPixelVarData); HANDLE_ERROR(hipMalloc((void**)&g_devPixelVarData, sizeof(uint) * width * height)); CUFREE(g_devResultVarKeyData); HANDLE_ERROR(hipMalloc((void**)&g_devResultVarKeyData, sizeof(float) * width * height)); CUFREE(g_devSampleResultN); HANDLE_ERROR(hipMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height)); CUFREE(g_devConvergedData); HANDLE_ERROR(hipMalloc((void**)&g_devConvergedData, g_resultDataSize)); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); // light paths if (g_uCurFrameN % 3 == 0) { uint lightPathStreamSizeCap = min((uint)PATHSTREAM_SIZE, (uint)(LIGHTVERTEX_N / LIGHTRAY_BOUND_MAX)); pt_genLightPathQueue_kernel << < dim3(ceil((float)lightPathStreamSizeCap / (float)block1.x), 1, 1), block1 >> > (g_uCurFrameN, WangHash(g_uCurFrameN), g_devLightTri, g_lightTriN, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathQueue, lightPathStreamSizeCap , g_devLightVertices, 0); HANDLE_KERNEL_ERROR(); uint activePathStreamSize = 0; g_uLightVerticesSize = lightPathStreamSizeCap; g_uPathQueueCur = 0; while (g_uPathQueueCur < lightPathStreamSizeCap || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; int assignableStreamSlot = min(lightPathStreamSizeCap - activePathStreamSize, lightPathStreamSizeCap - g_uPathQueueCur); if (assignableStreamSlot > 0) { pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur , g_uLightVerticesSize, assignableStreamSlot); HANDLE_KERNEL_ERROR(); } //readjust activePathStreamSize activePathStreamSize += assignableStreamSlot; g_uPathQueueCur += assignableStreamSlot; pt_traceLight_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize , g_devLightVertices, g_uLightVerticesSize); HANDLE_KERNEL_ERROR(); g_uLightVerticesSize += activePathStreamSize; //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } //std::cout << "Generated light vertices size: " << g_uLightVerticesSize << std::endl; } if (g_uCurFrameN == 0) { hipMemcpy(g_devConvergedData, g_fConvergedResult, sizeof(float) * 3 * (uint)width * (uint)height, hipMemcpyHostToDevice); //float time; //hipEvent_t start, stop; //HANDLE_ERROR(hipEventCreate(&start)); //HANDLE_ERROR(hipEventCreate(&stop)); uint useQueueSize = width * height; //HANDLE_ERROR(hipEventRecord(start, 0)); // eye paths pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "gen path: " << time << std::endl; //HANDLE_ERROR(hipEventRecord(start, 0)); // trace path queue TracePathQueue(useQueueSize); //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "trace path: " << time << std::endl; //HANDLE_ERROR(hipEventRecord(start, 0)); pt_applyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue, useQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "accum path: " << time << std::endl; } else { //float time; //hipEvent_t start, stop; //HANDLE_ERROR(hipEventCreate(&start)); //HANDLE_ERROR(hipEventCreate(&stop)); // calculate sampling map from converged result pt_calculateSquareError_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> > (g_devConvergedData, g_devResultData, g_devResultVarKeyData, g_devPixelVarData, (uint)(width * height)); HANDLE_KERNEL_ERROR(); //thrust::sort(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height)); thrust::sort_by_key(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), g_devPixelVarData); float sumMSE = thrust::reduce(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), 0.f, thrust::plus<float>()); float maxMSE = thrust::reduce(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), 0.f, thrust::maximum<float>()); float meanMSE = sumMSE / (width * height); std::cout << "maxMSE: " << maxMSE << "\n"; std::cout << "meanMSE: " << meanMSE << "\n"; //if (g_uCurFrameN == 1) //{ // float* tempDiffData = new float[(uint)width * (uint)height]; // hipMemcpy(tempDiffData, g_devResultVarKeyData, (uint)(width * height) * sizeof(float), hipMemcpyDeviceToHost); // NPConfFileHelper::txtConfFile conf("adapCheat_diffData.txt"); // for (uint j = 0; j < width * height; j++) // { // conf.WriteRaw<float>(tempDiffData[j]); // conf.WriteRaw("\n"); // } // conf.SyncDataToFile(); // DELETE_ARRAY(tempDiffData); //} //HANDLE_ERROR(hipEventRecord(start, 0)); // gen adaptive eye paths //std::vector<uint> pathQueuesSize; uint accumPathQueueSize = 0; uint genSize = width * height; //uint debugLoopTime = 0; uint selectSize = ceil((float)(width * height) / (float)(*g_uiDesiredTraceTimes.GetUint())); //std::cout << "selectSize : " << selectSize << std::endl; pt_genTempAdapPathQueueByKey_kernel << < dim3(ceil(genSize / (float)block1.x), 1, 1), block1 >> > (genSize , WangHash(g_uCurFrameN), accumPathQueueSize, selectSize, g_devResultVarKeyData, g_devPixelVarData, g_devTempPathQueue + accumPathQueueSize , *g_fMinTraceProb.GetFloat(), maxMSE); HANDLE_KERNEL_ERROR(); accumPathQueueSize = genSize; //while (accumPathQueueSize < genSize) //{ // // generate path into temp path // uint iterGenSize = ceil((float)(width * height) / (float)(*g_uiDesiredMaxAdaptiveSampling.GetFloat())); // pt_genTempAdapPathQueueByKey_kernel << < dim3(ceil(iterGenSize / (float)block1.x), 1, 1), block1 >> > (genSize // , WangHash(g_uCurFrameN), accumPathQueueSize, iterGenSize, g_devResultVarKeyData, g_devPixelVarData, g_devTempPathQueue + accumPathQueueSize // , *g_fMinTraceProb.GetFloat(), maxMSE); // HANDLE_KERNEL_ERROR(); // uint* pathQueueEndItr = thrust::remove_if(thrust::device, g_devTempPathQueue + accumPathQueueSize // , g_devTempPathQueue + accumPathQueueSize + iterGenSize, is_temppathqueue_terminated()); // uint compactedGenSize = min(genSize - accumPathQueueSize, (uint)(pathQueueEndItr - (g_devTempPathQueue + accumPathQueueSize))); // pathQueuesSize.push_back(compactedGenSize); // accumPathQueueSize += compactedGenSize; // if (compactedGenSize == 0) break; // //std::cout << "Gened: " << compactedGenSize << std::endl << "Accum: " << accumPathQueueSize << std::endl; // //debugLoopTime++; //} //std::cout << "Debug Loop Time: " << debugLoopTime << "\n"; // fill temp path int unfilledPathQueueSize = genSize - accumPathQueueSize; if (unfilledPathQueueSize > 0) { pt_fillTempAdapPathQueue_kernel << < dim3(ceil((float)unfilledPathQueueSize / (float)block1.x), 1, 1), block1 >> > (g_devTempPathQueue + accumPathQueueSize, unfilledPathQueueSize); HANDLE_KERNEL_ERROR(); //pathQueuesSize.push_back(unfilledPathQueueSize); accumPathQueueSize += unfilledPathQueueSize; } // generate real path from temp path pt_convTempPathQueue_kernel << < dim3(ceil((float)accumPathQueueSize/ (float)block1.x), 1, 1), block1 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devTempPathQueue, accumPathQueueSize, g_devPathQueue); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "gen path: " << time << std::endl; //HANDLE_ERROR(hipEventRecord(start, 0)); TracePathQueue(genSize); //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "trace path: " << time << std::endl; //HANDLE_ERROR(hipEventRecord(start, 0)); for (uint accumStart = 0; accumStart < genSize; accumStart += selectSize) { uint procSize = min(selectSize, genSize - accumStart); if (*g_enumDebugMode.GetUint() == 1) { pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)procSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue + accumStart, procSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); } else { pt_applyPathQueueResult_kernel << < dim3(ceil((float)procSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue + accumStart, procSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); } } //accumPathQueueSize = 0; //for (auto pathQueueSize : pathQueuesSize) //{ // if (*g_enumDebugMode.GetUint() == 1) // { // pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)pathQueueSize / (float)block1.x), 1, 1), block1 >> > // (g_devPathQueue + accumPathQueueSize, pathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); // HANDLE_KERNEL_ERROR(); // } // else // { // pt_applyPathQueueResult_kernel << < dim3(ceil((float)pathQueueSize / (float)block1.x), 1, 1), block1 >> > // (g_devPathQueue + accumPathQueueSize, pathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); // HANDLE_KERNEL_ERROR(); // } // accumPathQueueSize += pathQueueSize; //} //HANDLE_ERROR(hipEventRecord(stop, 0)); //HANDLE_ERROR(hipEventSynchronize(stop)); //HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); //std::cout << "accum path: " << time << std::endl; } if (*g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3) { pt_applyPixelProbToResult_kernel << < renderGrid, block2 >> >(width, height, g_devResultData, g_devResultVarKeyData, (*g_enumDebugMode.GetUint() == 3) ? *g_fMinTraceProb.GetFloat() : 0.f); HANDLE_KERNEL_ERROR(); } // Copy result to host hipMemcpy(result, g_devResultData, g_resultDataSize, hipMemcpyDeviceToHost); return true; } }
4df30476e3cb27279685df2dbecef3647b015f11.cu
#include "cudaRTCommon.h" #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> #include "conffilehelper.h" #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 5 #define PATHSTREAM_SIZE 1E4*64 #define LIGHTRAY_BOUND_MAX 5 #define LIGHTVERTEX_N 640 namespace cudaRTBDPTStreamAdapCheatSort { const char* g_enumAdapModeName[] = {"PDF", "Const"}; NPAttrHelper::Attrib g_enumAdapMode("Adaptive Mode", g_enumAdapModeName, 2, 0); NPAttrHelper::Attrib g_uiDesiredMaxAdaptiveSampling("DesiredMaxAdaptiveSampling", 5); NPAttrHelper::Attrib g_fMinTraceProb("MinTraceProb", 0.f); NPAttrHelper::Attrib g_uiDesiredTraceTimes("DesiredTraceTimes", 5); const char* g_enumDebugModeName[] = { "None", "Traced", "Prob", "Prob With Limit" }; NPAttrHelper::Attrib g_enumDebugMode("Debug Mode", g_enumDebugModeName, 4, 0); CUDA_RT_COMMON_ATTRIBS_N(5) CUDA_RT_COMMON_ATTRIBS_BGN CUDA_RT_COMMON_ATTRIB_DECLARE(0, Adaptive Mode, g_enumAdapMode) CUDA_RT_COMMON_ATTRIB_DECLARE(1, Desired Max Sampling, g_uiDesiredMaxAdaptiveSampling) CUDA_RT_COMMON_ATTRIB_DECLARE(2, Min Trace Probability, g_fMinTraceProb) CUDA_RT_COMMON_ATTRIB_DECLARE(3, Desired Trace Time, g_uiDesiredTraceTimes) CUDA_RT_COMMON_ATTRIB_DECLARE(4, Debug Mode, g_enumDebugMode) CUDA_RT_COMMON_ATTRIBS_END float* g_fConvergedResult = nullptr; struct LightVertex { float3 pos; float3 norm; float3 irrad; float3 irradDir; float3 diff; float3 emissive; float specular; float metallic; float roughness; float pathPotential; __hd__ LightVertex() { pos = norm = irrad = irradDir = make_float3(0.f, 0.f, 0.f); pathPotential = 1.f; } }; LightVertex* g_devLightVertices = nullptr; uint g_uLightVerticesSize = 0; uint* g_devLightTri = nullptr; uint g_lightTriN = 0; void freeLightPathMem() { g_uLightVerticesSize = 0; g_lightTriN = 0; CUFREE(g_devLightVertices); CUFREE(g_devLightTri); } void allocateLightPathMem() { HANDLE_ERROR(cudaMalloc((void**)&g_devLightVertices, sizeof(LightVertex) * LIGHTVERTEX_N)); HANDLE_ERROR(cudaMemset((void*)g_devLightVertices, 0, sizeof(LightVertex) * LIGHTVERTEX_N)); } void updateLightTriCudaMem(RTScene* scene) { g_lightTriN = 0; CUFREE(g_devLightTri); std::vector<uint> lightTri; for (uint i = 0; i < scene->m_pTriangles.size(); i++) { if (NPMathHelper::Vec3::length(scene->m_pMaterials[scene->m_pTriangles[i].matInd].emissive) > 0.f) lightTri.push_back(i); } uint* tempLightTri = new uint[lightTri.size()]; for (uint i = 0; i < lightTri.size(); i++) { tempLightTri[i] = lightTri[i]; } g_lightTriN = lightTri.size(); HANDLE_ERROR(cudaMalloc((void**)&g_devLightTri, sizeof(uint) * g_lightTriN)); HANDLE_ERROR(cudaMemcpy(g_devLightTri, tempLightTri, sizeof(uint) * g_lightTriN, cudaMemcpyHostToDevice)); DEL_ARRAY(tempLightTri); } enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2, RAYTYPE_LIGHT = 3 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float3 pathSample; float3 pathAccumSample; uint pathSampleN; uint pathSampleDepth; curandState randState; // for connecting light path float3 pathInMulTerm; float3 pathInDir; float3 origNorm; float3 origDiff; float origMetallic; float origRoughness; float origSpecular; float origTrans; // for adaptive sampling float pathPotential; float pathAccumPotential; __device__ PTPathVertex() : isTerminated(true) , pathPixel(make_uint2(0,0)) , pathOutDir(make_float3(0.f, 1.f, 0.f)) , pathVertexPos(make_float3(0.f, 0.f, 0.f)) , pathOutMulTerm(make_float3(1.f, 1.f, 1.f)) , pathType(RAYTYPE_EYE) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState() , pathInMulTerm(make_float3(0.f, 0.f, 0.f)) , pathInDir(make_float3(0.f, 0.f, 0.f)) , origNorm(make_float3(0.f, 1.f, 0.f)) , origDiff(make_float3(0.f, 0.f, 0.f)) , origMetallic(0.f) , origRoughness(0.f) , origSpecular(0.f) , origTrans(0.f) , pathPotential(1.f) , pathAccumPotential(0.f) {} __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos, RAYTYPE _pathType, curandState _randState) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f, 1.f, 1.f)) , pathType(_pathType) , pathSample(make_float3(0.f, 0.f, 0.f)) , pathAccumSample(make_float3(0.f, 0.f, 0.f)) , pathSampleN(0) , pathSampleDepth(0) , randState(_randState) , pathInMulTerm(make_float3(0.f, 0.f, 0.f)) , pathInDir(make_float3(0.f, 0.f, 0.f)) , origNorm(make_float3(0.f, 1.f, 0.f)) , origDiff(make_float3(0.f, 0.f, 0.f)) , origMetallic(0.f) , origRoughness(0.f) , origSpecular(0.f) , origTrans(0.f) , pathPotential(1.f) , pathAccumPotential(0.f) {} }; uint* g_devTempPathQueue = nullptr; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; PTPathVertex** g_devEyeLightConPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devTempPathQueue); CUFREE(g_devPathQueue); CUFREE(g_devPathStream); CUFREE(g_devEyeLightConPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(cudaMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMalloc((void**)&g_devTempPathQueue, sizeof(uint) * g_uPathQueueSize * 2)); HANDLE_ERROR(cudaMemset((void*)g_devTempPathQueue, 0, sizeof(uint) * g_uPathQueueSize * 2)); HANDLE_ERROR(cudaMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(cudaMalloc((void**)&g_devEyeLightConPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(cudaMemset((void*)g_devEyeLightConPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); } float* g_devResultData = nullptr; float* g_devAccResultData = nullptr; float* g_devResultVarKeyData = nullptr; uint* g_devPixelVarData = nullptr; float* g_devConvergedData = nullptr; uint* g_devSampleResultN = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceLight_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize, LightVertex* lightVertices, uint curLightVerticesSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 triPos = V32F3(v0->pos) * traceResult.w + V32F3(v1->pos) * traceResult.u + V32F3(v2->pos) * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; lightVertices[curLightVerticesSize + x].irrad = procVertex->pathSample; lightVertices[curLightVerticesSize + x].irradDir = -1 * ray.dir; lightVertices[curLightVerticesSize + x].norm = nl; lightVertices[curLightVerticesSize + x].pos = triPos; lightVertices[curLightVerticesSize + x].diff = diff; lightVertices[curLightVerticesSize + x].emissive = emissive; lightVertices[curLightVerticesSize + x].specular = specular; lightVertices[curLightVerticesSize + x].metallic = metallic; lightVertices[curLightVerticesSize + x].roughness = roughness; lightVertices[curLightVerticesSize + x].pathPotential = procVertex->pathPotential; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // PDF float NoH = vecDot(nl, hDir); float VoH = vecDot(-1 * ray.dir, hDir); float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;//refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; pdf *= reflProb; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; pdf *= (refrProb * (1.f - reflProb)); } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState); float r2cos = sqrtf(curand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl); } } procVertex->pathSample = emissive + vecMul(procVertex->pathSample, lightMulTerm); procVertex->pathPotential *= pdf; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if ((procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) || length(emissive) > 0.f) pixelContrib = 0.f; if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { lightVertices[curLightVerticesSize + x] = lightVertices[procVertex->pathPixel.x]; procVertex->isTerminated = true; } } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // PDF float NoH = vecDot(nl, hDir); float VoH = vecDot(-1 * ray.dir, hDir); float pdf = D_GGX(roughness, NoH) * NoH / (4.f * VoH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; float nt = specular * 0.8f + 1.f; float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); if (cos2t < 0.f) { reflProb = 1.0f;//refrProb = 0.f; } else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; pdf *= reflProb; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; pdf *= (refrProb * (1.f - reflProb)); } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState); float r2cos = sqrtf(curand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; pdf *= ((1.f - refrProb) * (1.f - reflProb)) * vecDot(diffDir, nl); } } procVertex->pathSample = procVertex->pathSample + vecMul(emissive , procVertex->pathOutMulTerm); procVertex->origDiff = diff; procVertex->pathInDir = -1 * ray.dir; procVertex->origNorm = nl; procVertex->origRoughness = roughness; procVertex->origMetallic = metallic; procVertex->origSpecular = specular; procVertex->origTrans = trans; procVertex->pathInMulTerm = procVertex->pathOutMulTerm; procVertex->pathPotential *= pdf; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if ((procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) || length(emissive) > 0.f) pixelContrib = 0.f; if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential; procVertex->pathSampleN++; procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathSampleDepth++; } procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; } } else { procVertex->pathAccumSample = procVertex->pathAccumSample + procVertex->pathSample; procVertex->pathAccumPotential = procVertex->pathAccumPotential + procVertex->pathPotential; procVertex->pathSampleN++; procVertex->isTerminated = true; } } __global__ void pt_genLightPathQueue_kernel(uint32 frameN, uint32 hashedFrameN, uint* lightTri, uint lightTriN, RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials, CURTTexture* textures, PTPathVertex* pathQueue, uint pathQueueCap, LightVertex* lightVertices, uint curLightVerticesSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x > pathQueueCap) return; curandState randstate; curand_init(hashedFrameN + x, 0, 0, &randstate); uint lightSourceId = curand_uniform(&randstate) * lightTriN; float lightW = curand_uniform(&randstate); float lightU = curand_uniform(&randstate); if (lightW + lightU > 1.0f) { lightW = 1.f - lightW; lightU = 1.f - lightU; } float lightV = 1.f - lightW - lightU; uint triId = lightTri[lightSourceId]; RTTriangle* tri = &triangles[triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * lightW + uv1 * lightU + uv2 * lightV; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 triNorm = n0 * lightW + n1 * lightU + n2 * lightV; float3 triPos = V32F3(v0->pos) * lightW + V32F3(v1->pos) * lightU + V32F3(v2->pos) * lightV; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, triNorm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 w = triNorm; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * curand_uniform(&randstate); float r2cos = sqrtf(curand_uniform(&randstate)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); pathQueue[x] = PTPathVertex(false, make_uint2(curLightVerticesSize + x, 0), diffDir , triPos + M_FLT_BIAS_EPSILON * triNorm, RAYTYPE_LIGHT, randstate); pathQueue[x].pathSample = emissive; lightVertices[curLightVerticesSize + x].irrad = emissive; lightVertices[curLightVerticesSize + x].irradDir = make_float3(0.f, 0.f, 0.f); lightVertices[curLightVerticesSize + x].norm = triNorm; lightVertices[curLightVerticesSize + x].pos = triPos; lightVertices[curLightVerticesSize + x].diff = diff; lightVertices[curLightVerticesSize + x].emissive = emissive; lightVertices[curLightVerticesSize + x].specular = specular; lightVertices[curLightVerticesSize + x].metallic = metallic; lightVertices[curLightVerticesSize + x].roughness = roughness; } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); curandState randstate; curand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x,y), dir, camPos, RAYTYPE_EYE, randstate); } __global__ void pt_fillTempAdapPathQueue_kernel(uint* pathQueue, uint fillSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= fillSize) return; pathQueue[ind] = ind; } __global__ void pt_genTempAdapPathQueue_kernel(float width, float height, uint32 hashedFrameN, uint32 seedoffset , float* genChance, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); curandState randstate; curand_init(hashedFrameN + ind + seedoffset, 0, 0, &randstate); pathQueue[ind] = x + y * width; //float modChance = 1.f - expf(-genChance[ind]); if (curand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb)) { pathQueue[ind] = 0 - 1; } } __global__ void pt_genTempAdapPathQueueByKey_kernel(uint size, uint32 hashedFrameN, uint32 seedoffset, uint genSize, float* genChance, uint* genChancePixel, uint* pathQueue, float minProb = 0.f, float mulRand = 1.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= size) return; uint genInd = size - 1 - x % genSize; //curandState randstate; //curand_init(hashedFrameN + x + seedoffset, 0, 0, &randstate); uint ind = genChancePixel[genInd]; pathQueue[x] = ind; //float modChance = 1.f - expf(-genChance[ind]); //if (curand_uniform(&randstate)*mulRand > fmaxf(genChance[ind], minProb)) //{ // pathQueue[x] = 0 - 1; //} } __global__ void pt_convTempPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, uint* tempPathQueue, uint tempPathQueueSize, PTPathVertex* pathQueue) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= tempPathQueueSize) return; uint pathInd = tempPathQueue[ind]; uint y = pathInd / width; uint x = pathInd - y * width; float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); curandState randstate; curand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate); } __device__ float3 GetShadingResult(const float3& lightOutDir, const float3& lightInDir, const float3& lightInIrrad, const float3& norm, const float3& diff, const float metallic, const float roughness, const float specular, const float2 diffspec) { if (vecDot(norm, lightInDir) <= 0.f) return make_float3(0.f, 0.f, 0.f); float3 h = normalize(lightOutDir + lightInDir); float voH = vecDot(lightOutDir, h); float noV = vecDot(norm, lightOutDir); float noH = vecDot(norm, h); float noL = vecDot(norm, lightInDir); float3 f0 = vecLerp(0.08f * specular * make_float3(1.f, 1.f, 1.f), diff, metallic); float3 brdf_f = Fresnel(f0, voH); //float g = GeometricVisibility(roughness, noV, noL, voH); float d = D_GGX(roughness, noH); float v = Vis_SmithJointApprox(roughness, noV, noL); // Microfacet specular = D*G*F / (4*NoL*NoV) float3 specIrrad = d*v*brdf_f;// vecMul(d*g*brdf_f / (4.f * noV), lightInIrrad); float3 diffIrrad = vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), Diffuse(diff, roughness, noV, noL, voH));//vecMul((make_float3(1.f, 1.f, 1.f) - brdf_f), diff / M_PI); return vecMul(lightInIrrad*noL, diffspec.y*specIrrad + diffspec.x*diffIrrad); } __device__ void GetLightFromRandLightVertices(float3 pos, float3 norm, LightVertex* lightVertices, uint lightVerticesSize, curandState* randstate, float3& irrad, float3& irradDir, float& pathPotential) { //LightVertex dummy; //dummy.diff = make_float3(1.f, 1.f, 1.f); //dummy.irrad = make_float3(1.f, 0.f, 0.f); //dummy.pos = make_float3(0.f, 0.f, 0.f); //dummy.norm = dummy.irradDir = normalize(pos - dummy.pos); //dummy.roughness = 0.5f; //dummy.specular = 0.5f; //dummy.metallic = 0.f; irrad = make_float3(0.f, 0.f, 0.f); uint lightVert = curand_uniform(randstate) * lightVerticesSize; LightVertex* lightVertex = &lightVertices[lightVert]; float3 toLightVertexDir = normalize(lightVertex->pos - pos); float toLightVertexDist = length(lightVertex->pos - pos); CURay toLightVertex(pos, toLightVertexDir); TracePrimitiveResult traceResult; if (length(lightVertex->irrad) > 0.f && vecDot(norm, toLightVertexDir) > 0.f && !TracePrimitive(toLightVertex, traceResult, toLightVertexDist - M_FLT_BIAS_EPSILON, M_FLT_BIAS_EPSILON, false)) { if (length(lightVertex->irradDir) > M_FLT_EPSILON) irrad = GetShadingResult(-1 * toLightVertexDir, lightVertex->irradDir, lightVertex->irrad, lightVertex->norm , lightVertex->diff, lightVertex->metallic, lightVertex->roughness, lightVertex->specular, make_float2(1.f, 1.f)) + lightVertex->emissive; else irrad = lightVertex->irrad; irradDir = toLightVertexDir; pathPotential = lightVertex->pathPotential; } } __global__ void pt_connectEyeLightPath_kernel(PTPathVertex** eyeStream, uint eyeStreamSize, LightVertex* lightVertices, uint lightVerticesSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= eyeStreamSize) return; PTPathVertex* eyePath = eyeStream[ind]; float3 lightFromLightVertex = make_float3(0.f, 0.f, 0.f); float3 toLightVertexDir = make_float3(0.f, 0.f, 0.f); float lightPathPotential = 1.f; GetLightFromRandLightVertices(eyePath->pathVertexPos + eyePath->origNorm * M_FLT_BIAS_EPSILON, eyePath->origNorm , lightVertices, lightVerticesSize, &eyePath->randState, lightFromLightVertex, toLightVertexDir, lightPathPotential); float3 lightContribFromLightVertex = vecMax(make_float3(0.f, 0.f, 0.f) , GetShadingResult(eyePath->pathInDir, toLightVertexDir, lightFromLightVertex, eyePath->origNorm , eyePath->origDiff, eyePath->origMetallic, eyePath->origRoughness, eyePath->origSpecular , make_float2(1.f - eyePath->origTrans, 1.f))); if (length(lightContribFromLightVertex) > 0.f) { eyePath->pathAccumSample = eyePath->pathAccumSample + vecMul(lightContribFromLightVertex, eyePath->pathInMulTerm); eyePath->pathSampleN += 4; eyePath->pathPotential *= lightPathPotential; } } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize, PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize, uint assignableSlot) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind < assignableSlot) { int pathStreamInd = pathStreamSize + ind; int pathQueueInd = pathQueueCur + ind; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[pathStreamInd] = assignSample; } } __global__ void pt_applyPixelProbToResult_kernel(uint width, uint height, float* result, float* varResult, float minProb = 0.f) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); result[ind * 3] = result[ind * 3 + 1] = result[ind * 3 + 2] = fmaxf(minProb, varResult[ind]); } __global__ void pt_debugTracedPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult, float* varResult, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; if (!frameN) { sampleResultN[ind] = 0; } uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN; float3 sampleResult = make_float3(1.f,1.f,1.f); float potentialResult = 1.f - pathQueue[x].pathAccumPotential; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[ind] * resultInf; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); //varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f); sampleResultN[ind] = tempNextSampleResultN; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize, uint width, uint height, uint frameN, float* result, float* accResult, float* varResult, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; // add calculating sample to the result if (!pathQueue[x].isTerminated) { pathQueue[x].pathAccumSample = pathQueue[x].pathAccumSample + pathQueue[x].pathSample; pathQueue[x].pathAccumPotential = pathQueue[x].pathAccumPotential + pathQueue[x].pathPotential; pathQueue[x].pathSampleN++; } if (pathQueue[x].pathSampleN > 0) { uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; if (!frameN) { sampleResultN[ind] = 0; } uint tempNextSampleResultN = sampleResultN[ind] + pathQueue[x].pathSampleN; if (tempNextSampleResultN > sampleResultN[ind]) { float3 sampleResult = pathQueue[x].pathAccumSample; float potentialResult = 1.f - pathQueue[x].pathAccumPotential; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[ind] * resultInf; result[ind * 3] = max(resultInf * sampleResult.x + oldInf * result[ind * 3], 0.f); result[ind * 3 + 1] = max(resultInf * sampleResult.y + oldInf * result[ind * 3 + 1], 0.f); result[ind * 3 + 2] = max(resultInf * sampleResult.z + oldInf * result[ind * 3 + 2], 0.f); //varResult[ind] = max(resultInf * potentialResult + oldInf * varResult[ind], 0.f); sampleResultN[ind] = tempNextSampleResultN; } } } __global__ void pt_calculateSquareError_kernel(float* correctData, float* sampleData, float* resultData, uint* resultPixel, uint dataSize) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= dataSize) return; resultData[x] = /*fminf(*/((correctData[x * 3] - sampleData[x * 3]) * (correctData[x * 3] - sampleData[x * 3]) + (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) * (correctData[x * 3 + 1] - sampleData[x * 3 + 1]) + (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) * (correctData[x * 3 + 2] - sampleData[x * 3 + 2]) ) / 3.f/*, 1.f)*/; resultPixel[x] = x; } void CleanMem() { freeLightPathMem(); freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devConvergedData); CUFREE(g_devSampleResultN); CUFREE(g_devPixelVarData); CUFREE(g_devResultVarKeyData); CUFREE(g_devResultData); CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_temppathqueue_terminated { __hd__ bool operator()(const uint& vert) { return (vert+1 == 0); } }; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; struct is_connectToLightPath { __hd__ bool operator()(const PTPathVertex* vert) { return vert->pathType == RAYTYPE_DIFF; } }; void TracePathQueue(uint pathQueueSize) { dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < pathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; int assignableStreamSlot = min((uint)PATHSTREAM_SIZE - activePathStreamSize, pathQueueSize - g_uPathQueueCur); if (assignableStreamSlot > 0) pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur , pathQueueSize, assignableStreamSlot); //readjust activePathStreamSize activePathStreamSize += assignableStreamSlot; g_uPathQueueCur += assignableStreamSlot; //tracing process pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; //gen connectionpathstream PTPathVertex** conPathStreamEndItr = thrust::copy_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, g_devEyeLightConPathStream, is_connectToLightPath()); uint activeConPathStreamSize = conPathStreamEndItr - g_devEyeLightConPathStream; //connect eye and light path stream if (activeConPathStreamSize > 0) { pt_connectEyeLightPath_kernel << < dim3(ceil((float)activeConPathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devEyeLightConPathStream, activeConPathStreamSize, g_devLightVertices, g_uLightVerticesSize); } } } bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid() || !g_fConvergedResult) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); allocateLightPathMem(); updateLightTriCudaMem(scene); size_t mem_tot; size_t mem_free; cudaMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); updateLightTriCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData || !g_devAccResultData || g_resultDataSize != (sizeof(float) * 3 * width * height) || !g_devConvergedData || !g_devPixelVarData) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); HANDLE_ERROR(cudaMalloc((void**)&g_devResultData, g_resultDataSize)); CUFREE(g_devAccResultData); HANDLE_ERROR(cudaMalloc((void**)&g_devAccResultData, g_resultDataSize)); CUFREE(g_devPixelVarData); HANDLE_ERROR(cudaMalloc((void**)&g_devPixelVarData, sizeof(uint) * width * height)); CUFREE(g_devResultVarKeyData); HANDLE_ERROR(cudaMalloc((void**)&g_devResultVarKeyData, sizeof(float) * width * height)); CUFREE(g_devSampleResultN); HANDLE_ERROR(cudaMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height)); CUFREE(g_devConvergedData); HANDLE_ERROR(cudaMalloc((void**)&g_devConvergedData, g_resultDataSize)); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); // light paths if (g_uCurFrameN % 3 == 0) { uint lightPathStreamSizeCap = min((uint)PATHSTREAM_SIZE, (uint)(LIGHTVERTEX_N / LIGHTRAY_BOUND_MAX)); pt_genLightPathQueue_kernel << < dim3(ceil((float)lightPathStreamSizeCap / (float)block1.x), 1, 1), block1 >> > (g_uCurFrameN, WangHash(g_uCurFrameN), g_devLightTri, g_lightTriN, g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathQueue, lightPathStreamSizeCap , g_devLightVertices, 0); HANDLE_KERNEL_ERROR(); uint activePathStreamSize = 0; g_uLightVerticesSize = lightPathStreamSizeCap; g_uPathQueueCur = 0; while (g_uPathQueueCur < lightPathStreamSizeCap || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; int assignableStreamSlot = min(lightPathStreamSizeCap - activePathStreamSize, lightPathStreamSizeCap - g_uPathQueueCur); if (assignableStreamSlot > 0) { pt_assignPathStream_kernel << < dim3(ceil((float)assignableStreamSlot / (float)block1.x), 1, 1), block1 >> >(g_devPathStream, activePathStreamSize, g_devPathQueue, g_uPathQueueCur , g_uLightVerticesSize, assignableStreamSlot); HANDLE_KERNEL_ERROR(); } //readjust activePathStreamSize activePathStreamSize += assignableStreamSlot; g_uPathQueueCur += assignableStreamSlot; pt_traceLight_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices, g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize , g_devLightVertices, g_uLightVerticesSize); HANDLE_KERNEL_ERROR(); g_uLightVerticesSize += activePathStreamSize; //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, is_terminated()); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } //std::cout << "Generated light vertices size: " << g_uLightVerticesSize << std::endl; } if (g_uCurFrameN == 0) { cudaMemcpy(g_devConvergedData, g_fConvergedResult, sizeof(float) * 3 * (uint)width * (uint)height, cudaMemcpyHostToDevice); //float time; //cudaEvent_t start, stop; //HANDLE_ERROR(cudaEventCreate(&start)); //HANDLE_ERROR(cudaEventCreate(&stop)); uint useQueueSize = width * height; //HANDLE_ERROR(cudaEventRecord(start, 0)); // eye paths pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devPathQueue); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "gen path: " << time << std::endl; //HANDLE_ERROR(cudaEventRecord(start, 0)); // trace path queue TracePathQueue(useQueueSize); //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "trace path: " << time << std::endl; //HANDLE_ERROR(cudaEventRecord(start, 0)); pt_applyPathQueueResult_kernel << < dim3(ceil((float)useQueueSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue, useQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "accum path: " << time << std::endl; } else { //float time; //cudaEvent_t start, stop; //HANDLE_ERROR(cudaEventCreate(&start)); //HANDLE_ERROR(cudaEventCreate(&stop)); // calculate sampling map from converged result pt_calculateSquareError_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> > (g_devConvergedData, g_devResultData, g_devResultVarKeyData, g_devPixelVarData, (uint)(width * height)); HANDLE_KERNEL_ERROR(); //thrust::sort(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height)); thrust::sort_by_key(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), g_devPixelVarData); float sumMSE = thrust::reduce(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), 0.f, thrust::plus<float>()); float maxMSE = thrust::reduce(thrust::device, g_devResultVarKeyData, g_devResultVarKeyData + (uint)(width * height), 0.f, thrust::maximum<float>()); float meanMSE = sumMSE / (width * height); std::cout << "maxMSE: " << maxMSE << "\n"; std::cout << "meanMSE: " << meanMSE << "\n"; //if (g_uCurFrameN == 1) //{ // float* tempDiffData = new float[(uint)width * (uint)height]; // cudaMemcpy(tempDiffData, g_devResultVarKeyData, (uint)(width * height) * sizeof(float), cudaMemcpyDeviceToHost); // NPConfFileHelper::txtConfFile conf("adapCheat_diffData.txt"); // for (uint j = 0; j < width * height; j++) // { // conf.WriteRaw<float>(tempDiffData[j]); // conf.WriteRaw("\n"); // } // conf.SyncDataToFile(); // DELETE_ARRAY(tempDiffData); //} //HANDLE_ERROR(cudaEventRecord(start, 0)); // gen adaptive eye paths //std::vector<uint> pathQueuesSize; uint accumPathQueueSize = 0; uint genSize = width * height; //uint debugLoopTime = 0; uint selectSize = ceil((float)(width * height) / (float)(*g_uiDesiredTraceTimes.GetUint())); //std::cout << "selectSize : " << selectSize << std::endl; pt_genTempAdapPathQueueByKey_kernel << < dim3(ceil(genSize / (float)block1.x), 1, 1), block1 >> > (genSize , WangHash(g_uCurFrameN), accumPathQueueSize, selectSize, g_devResultVarKeyData, g_devPixelVarData, g_devTempPathQueue + accumPathQueueSize , *g_fMinTraceProb.GetFloat(), maxMSE); HANDLE_KERNEL_ERROR(); accumPathQueueSize = genSize; //while (accumPathQueueSize < genSize) //{ // // generate path into temp path // uint iterGenSize = ceil((float)(width * height) / (float)(*g_uiDesiredMaxAdaptiveSampling.GetFloat())); // pt_genTempAdapPathQueueByKey_kernel << < dim3(ceil(iterGenSize / (float)block1.x), 1, 1), block1 >> > (genSize // , WangHash(g_uCurFrameN), accumPathQueueSize, iterGenSize, g_devResultVarKeyData, g_devPixelVarData, g_devTempPathQueue + accumPathQueueSize // , *g_fMinTraceProb.GetFloat(), maxMSE); // HANDLE_KERNEL_ERROR(); // uint* pathQueueEndItr = thrust::remove_if(thrust::device, g_devTempPathQueue + accumPathQueueSize // , g_devTempPathQueue + accumPathQueueSize + iterGenSize, is_temppathqueue_terminated()); // uint compactedGenSize = min(genSize - accumPathQueueSize, (uint)(pathQueueEndItr - (g_devTempPathQueue + accumPathQueueSize))); // pathQueuesSize.push_back(compactedGenSize); // accumPathQueueSize += compactedGenSize; // if (compactedGenSize == 0) break; // //std::cout << "Gened: " << compactedGenSize << std::endl << "Accum: " << accumPathQueueSize << std::endl; // //debugLoopTime++; //} //std::cout << "Debug Loop Time: " << debugLoopTime << "\n"; // fill temp path int unfilledPathQueueSize = genSize - accumPathQueueSize; if (unfilledPathQueueSize > 0) { pt_fillTempAdapPathQueue_kernel << < dim3(ceil((float)unfilledPathQueueSize / (float)block1.x), 1, 1), block1 >> > (g_devTempPathQueue + accumPathQueueSize, unfilledPathQueueSize); HANDLE_KERNEL_ERROR(); //pathQueuesSize.push_back(unfilledPathQueueSize); accumPathQueueSize += unfilledPathQueueSize; } // generate real path from temp path pt_convTempPathQueue_kernel << < dim3(ceil((float)accumPathQueueSize/ (float)block1.x), 1, 1), block1 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), g_devTempPathQueue, accumPathQueueSize, g_devPathQueue); HANDLE_KERNEL_ERROR(); //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "gen path: " << time << std::endl; //HANDLE_ERROR(cudaEventRecord(start, 0)); TracePathQueue(genSize); //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "trace path: " << time << std::endl; //HANDLE_ERROR(cudaEventRecord(start, 0)); for (uint accumStart = 0; accumStart < genSize; accumStart += selectSize) { uint procSize = min(selectSize, genSize - accumStart); if (*g_enumDebugMode.GetUint() == 1) { pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)procSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue + accumStart, procSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); } else { pt_applyPathQueueResult_kernel << < dim3(ceil((float)procSize / (float)block1.x), 1, 1), block1 >> > (g_devPathQueue + accumStart, procSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); HANDLE_KERNEL_ERROR(); } } //accumPathQueueSize = 0; //for (auto pathQueueSize : pathQueuesSize) //{ // if (*g_enumDebugMode.GetUint() == 1) // { // pt_debugTracedPathQueueResult_kernel << < dim3(ceil((float)pathQueueSize / (float)block1.x), 1, 1), block1 >> > // (g_devPathQueue + accumPathQueueSize, pathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); // HANDLE_KERNEL_ERROR(); // } // else // { // pt_applyPathQueueResult_kernel << < dim3(ceil((float)pathQueueSize / (float)block1.x), 1, 1), block1 >> > // (g_devPathQueue + accumPathQueueSize, pathQueueSize, width, height, g_uCurFrameN, g_devResultData, g_devAccResultData, g_devResultVarKeyData, g_devSampleResultN); // HANDLE_KERNEL_ERROR(); // } // accumPathQueueSize += pathQueueSize; //} //HANDLE_ERROR(cudaEventRecord(stop, 0)); //HANDLE_ERROR(cudaEventSynchronize(stop)); //HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); //std::cout << "accum path: " << time << std::endl; } if (*g_enumDebugMode.GetUint() == 2 || *g_enumDebugMode.GetUint() == 3) { pt_applyPixelProbToResult_kernel << < renderGrid, block2 >> >(width, height, g_devResultData, g_devResultVarKeyData, (*g_enumDebugMode.GetUint() == 3) ? *g_fMinTraceProb.GetFloat() : 0.f); HANDLE_KERNEL_ERROR(); } // Copy result to host cudaMemcpy(result, g_devResultData, g_resultDataSize, cudaMemcpyDeviceToHost); return true; } }
631d9fc053036c5b99e195b6a1bd7375b7baab76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <op_boilerplate.h> #include <loops/reduce_same.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceSameFunction<X>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceSameFunction<X>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = static_cast<X*>(vsPartials); auto extraParams = static_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer); if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ X* sPartials; // __shared__ shape::TAD *tad; __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; __shared__ bool isPlainOutput; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<X*>(shmem); tadLength = shape::length(tadOnlyShapeInfo); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo, numTads)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void ReduceSameFunction<X>::execScalarCudaLegacy(int opNum, void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_T(execScalarCuda, PARAMS(vx, xShapeInfo, vextraParams, vz, zShapeInfo, vreductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS); } //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ X* sPartials; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<X*>(shmem); xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo, len)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); printf("ReduceScalarSame: %f\n", (float) z[0]); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); printf("ReduceScalarSame: %f\n", (float) z[0]); } } } //////////////////////////////////////////////////////////////////////// template <typename X> template<typename OpType> __host__ void ReduceSameFunction<X>::intermediateXD(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( simpleReduce<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X> template<typename OpType> __host__ void ReduceSameFunction<X>::intermediateScalar(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { hipLaunchKernelGGL(( simpleScalar<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X> _CUDA_H void ReduceSameFunction<X>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_T(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS); nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarSame(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X> _CUDA_H void ReduceSameFunction<X>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateXD, PARAMS(launchDims, stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), REDUCE_SAME_OPS); DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ReduceSameFunction, , LIBND4J_TYPES); } }
631d9fc053036c5b99e195b6a1bd7375b7baab76.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <op_boilerplate.h> #include <loops/reduce_same.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> #include <types/types.h> using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void simpleReduce(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceSameFunction<X>::template transformCudaXD<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void simpleScalar(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { functions::reduce::ReduceSameFunction<X>::template execScalarCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo); } namespace functions { namespace reduce { //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::aggregatePartials(void *vsPartials, Nd4jLong tid, Nd4jLong numItems, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto sPartials = static_cast<X*>(vsPartials); auto extraParams = static_cast<X*>(vextraParams); Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (tid >= floorPow2) sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::transformCudaXD( void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer); if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ X* sPartials; // __shared__ shape::TAD *tad; __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; __shared__ bool isPlainOutput; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<X*>(shmem); tadLength = shape::length(tadOnlyShapeInfo); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); isPlainOutput = shape::order(zShapeInfo) == 'c' && shape::elementWiseStride(zShapeInfo) == 1; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(x + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo, tadLength); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) z[isPlainOutput ? r : shape::getIndexOffset(r, zShapeInfo, numTads)] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void ReduceSameFunction<X>::execScalarCudaLegacy(int opNum, void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_T(execScalarCuda, PARAMS(vx, xShapeInfo, vextraParams, vz, zShapeInfo, vreductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS); } //////////////////////////////////////////////////////////////////////// template <typename X> template <typename OpType> __device__ void ReduceSameFunction<X>::execScalarCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo) { auto x = reinterpret_cast<X*>(vx); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); auto reductionBuffer = reinterpret_cast<X*>(vreductionBuffer); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results __shared__ X* sPartials; __shared__ Nd4jLong xEws; __shared__ Nd4jLong len; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<X*>(shmem); xEws = shape::elementWiseStride(xShapeInfo); len = shape::length(xShapeInfo); } __syncthreads(); sPartials[threadIdx.x] = OpType::startingValue(x); if (xEws > 0) for (int i = tid; i < len; i += (blockDim.x * gridDim.x)) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams); else for (int i = tid; i < len; i += blockDim.x * gridDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo, len)], extraParams), extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, len), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],len,extraParams); __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(x); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[0] = OpType::postProcess(sPartials[0], len, extraParams); printf("ReduceScalarSame: %f\n", (float) z[0]); } } } else { if (threadIdx.x == 0) { auto tc = reinterpret_cast<unsigned int *>(reductionBuffer); tc[16384] = 0; z[0] = OpType::postProcess(sPartials[0], len, extraParams); printf("ReduceScalarSame: %f\n", (float) z[0]); } } } //////////////////////////////////////////////////////////////////////// template <typename X> template<typename OpType> __host__ void ReduceSameFunction<X>::intermediateXD(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { simpleReduce<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template <typename X> template<typename OpType> __host__ void ReduceSameFunction<X>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { simpleScalar<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo); } //////////////////////////////////////////////////////////////////////// template <typename X> _CUDA_H void ReduceSameFunction<X>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM_T(intermediateScalar, PARAMS(launchDims, stream, x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), REDUCE_SAME_OPS); nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarSame(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X> _CUDA_H void ReduceSameFunction<X>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, void *x, Nd4jLong *xShape, void *extraParams, void *z, Nd4jLong *zShape, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateXD, PARAMS(launchDims, stream, x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), REDUCE_SAME_OPS); DEBUG_KERNEL(stream, opNum); } //////////////////////////////////////////////////////////////////////// template <typename X> __device__ void initializeShared(X *extraParams, X **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(X); X *sPartialsDeref = (X *) *sPartials; for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0]; } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ReduceSameFunction, , LIBND4J_TYPES); } }
c714944f1ac738bc5a70995e5ed190b6ecb752e0.hip
// !!! This is a file automatically generated by hipify!!! #include "file_system.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cstdio> __device__ void user_program(FileSystem* fs, uchar* input, uchar* output) { /* /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); */ /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); }
c714944f1ac738bc5a70995e5ed190b6ecb752e0.cu
#include "file_system.h" #include <cuda.h> #include <cuda_runtime.h> #include <cstdio> __device__ void user_program(FileSystem* fs, uchar* input, uchar* output) { /* /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); */ /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); }
a175deeb4dbda9d02888d10143f36208f2e4a290.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_code.hpp" #include "HZ.hpp" #include "HZ_L.hpp" #include "cuda_helper.hpp" #include "device_code_common.hpp" #include "device_code_cdsort_0.hpp" #include "device_code_cdsort_accumV.hpp" static const dim3 hzL1bD(HZ_L1_THREADS_PER_BLOCK_X, HZ_L1_THREADS_PER_BLOCK_Y, 1u); void HZ_L1_sv(double *const F, double *const G, const hipStream_t s) throw() { const dim3 hzL1gD(STRAT1_PAIRS, 1u, 1u); const size_t shmD = static_cast<size_t>(0u); hipLaunchKernelGGL(( dHZ_L1_sv), dim3(hzL1gD), dim3(hzL1bD), shmD, s , F, G); } void initS(double *const F, double *const G, double *const V, double *const S, double *const H, double *const K, const unsigned nRank, const hipStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); hipLaunchKernelGGL(( dInitS), dim3(gD), dim3(bD), shmD, s , F, G, V, S, H, K); } void initS(double *const F, double *const G, double *const V, const unsigned nRank, const hipStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); hipLaunchKernelGGL(( dInitS), dim3(gD), dim3(bD), shmD, s , F, G, V); } void initV(double *const F, double *const G, double *const V, const unsigned nRank, const hipStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); hipLaunchKernelGGL(( dInitV), dim3(gD), dim3(bD), shmD, s , F, G, V); } void initSymbols (double *const W, unsigned long long *const C, const unsigned nRowF, const unsigned nRowG, const unsigned nRowV, const unsigned nRowW, const unsigned ldF, const unsigned ldG, const unsigned ldV, const unsigned ldW, const unsigned nRank, const unsigned nSwp, const hipStream_t s ) throw() { const size_t off = static_cast<size_t>(0u); CUDA_CALL(hipMemcpyToSymbolAsync(_W, &W, sizeof(double*), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_C, &C, sizeof(unsigned long long*), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nRowF, &nRowF, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nRowG, &nRowG, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nRowV, &nRowV, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nRowW, &nRowW, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_ldF, &ldF, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_ldG, &ldG, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_ldV, &ldV, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_ldW, &ldW, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nRank, &nRank, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_nSwp, &nSwp, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_STRAT0_STEPS, &STRAT0_STEPS, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_STRAT0_PAIRS, &STRAT0_PAIRS, sizeof(unsigned), off, hipMemcpyHostToDevice, s)); CUDA_CALL(hipMemcpyToSymbolAsync(_strat0, strat0, sizeof(strat0), off, hipMemcpyHostToDevice, s)); }
a175deeb4dbda9d02888d10143f36208f2e4a290.cu
#include "device_code.hpp" #include "HZ.hpp" #include "HZ_L.hpp" #include "cuda_helper.hpp" #include "device_code_common.hpp" #include "device_code_cdsort_0.hpp" #include "device_code_cdsort_accumV.hpp" static const dim3 hzL1bD(HZ_L1_THREADS_PER_BLOCK_X, HZ_L1_THREADS_PER_BLOCK_Y, 1u); void HZ_L1_sv(double *const F, double *const G, const cudaStream_t s) throw() { const dim3 hzL1gD(STRAT1_PAIRS, 1u, 1u); const size_t shmD = static_cast<size_t>(0u); dHZ_L1_sv<<< hzL1gD, hzL1bD, shmD, s >>>(F, G); } void initS(double *const F, double *const G, double *const V, double *const S, double *const H, double *const K, const unsigned nRank, const cudaStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); dInitS<<< gD, bD, shmD, s >>>(F, G, V, S, H, K); } void initS(double *const F, double *const G, double *const V, const unsigned nRank, const cudaStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); dInitS<<< gD, bD, shmD, s >>>(F, G, V); } void initV(double *const F, double *const G, double *const V, const unsigned nRank, const cudaStream_t s) throw() { const dim3 bD(2u * WARP_SZ, 1u, 1u); const dim3 gD(udiv_ceil(nRank * WARP_SZ, bD.x), 1u, 1u); const size_t shmD = static_cast<size_t>(0u); dInitV<<< gD, bD, shmD, s >>>(F, G, V); } void initSymbols (double *const W, unsigned long long *const C, const unsigned nRowF, const unsigned nRowG, const unsigned nRowV, const unsigned nRowW, const unsigned ldF, const unsigned ldG, const unsigned ldV, const unsigned ldW, const unsigned nRank, const unsigned nSwp, const cudaStream_t s ) throw() { const size_t off = static_cast<size_t>(0u); CUDA_CALL(cudaMemcpyToSymbolAsync(_W, &W, sizeof(double*), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_C, &C, sizeof(unsigned long long*), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nRowF, &nRowF, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nRowG, &nRowG, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nRowV, &nRowV, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nRowW, &nRowW, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_ldF, &ldF, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_ldG, &ldG, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_ldV, &ldV, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_ldW, &ldW, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nRank, &nRank, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_nSwp, &nSwp, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_STRAT0_STEPS, &STRAT0_STEPS, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_STRAT0_PAIRS, &STRAT0_PAIRS, sizeof(unsigned), off, cudaMemcpyHostToDevice, s)); CUDA_CALL(cudaMemcpyToSymbolAsync(_strat0, strat0, sizeof(strat0), off, cudaMemcpyHostToDevice, s)); }
3671a072c2dfb1cffe33b480c39024419571ed1a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ }\ __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; } void initialData(float *ip,int size) { // generate different seed for random number time_t t; srand((unsigned int) time(&t)); for (int i=0; i<size; i++) { ip[i] = (float)( rand() & 0xFF )/10.0f; } } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up date size of vectors int nElem = 1<<24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); double iStart,iElaps; // initialize data at host side iStart = cpuSecond(); initialData (h_A, nElem); initialData (h_B, nElem); iElaps = cpuSecond() - iStart; memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost (h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; // malloc device global memory float *d_A, *d_B, *d_C; hipMalloc((float**)&d_A, nBytes); hipMalloc((float**)&d_B, nBytes); hipMalloc((float**)&d_C, nBytes); // transfer data from host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // invoke kernel at host side int iLen = 1024; dim3 block (iLen); dim3 grid ((nElem+block.x-1)/block.x); iStart = cpuSecond(); hipLaunchKernelGGL(( sumArraysOnGPU) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C,nElem); hipDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<<%d,%d>>> Time elapsed %f" \ "sec\n", grid.x, block.x, iElaps); // copy kernel result back to host side hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return(0); }
3671a072c2dfb1cffe33b480c39024419571ed1a.cu
#include <cuda_runtime.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ }\ __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; } void initialData(float *ip,int size) { // generate different seed for random number time_t t; srand((unsigned int) time(&t)); for (int i=0; i<size; i++) { ip[i] = (float)( rand() & 0xFF )/10.0f; } } void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up date size of vectors int nElem = 1<<24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); double iStart,iElaps; // initialize data at host side iStart = cpuSecond(); initialData (h_A, nElem); initialData (h_B, nElem); iElaps = cpuSecond() - iStart; memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost (h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; // malloc device global memory float *d_A, *d_B, *d_C; cudaMalloc((float**)&d_A, nBytes); cudaMalloc((float**)&d_B, nBytes); cudaMalloc((float**)&d_C, nBytes); // transfer data from host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // invoke kernel at host side int iLen = 1024; dim3 block (iLen); dim3 grid ((nElem+block.x-1)/block.x); iStart = cpuSecond(); sumArraysOnGPU <<<grid, block>>>(d_A, d_B, d_C,nElem); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<<%d,%d>>> Time elapsed %f" \ "sec\n", grid.x, block.x, iElaps); // copy kernel result back to host side cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return(0); }
371a5defbeaa42364cd9fa18029450afdcab1527.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <sys/time.h> // #define CHECK(call){ // const hipError_t error = call; // if(error != hipSuccess){ // printf("Error: %s:%d",__FILE__,__LINE__); // printf("code:%d, reason:%s\n",error,hipGetErrorString(error)); // exit(1); // } // } // block = N __global__ void sumArraysOnDevice(float *A, float *B, float *C, const int N){ int idx = threadIdx.x; C[idx] = A[idx] + B[idx]; } // grid = N __global__ void sumArraysOnDeviceGrid(float *A,float *B,float *C, const int N){ int idx = blockIdx.x; C[idx] = A[idx] + B[idx]; } __global__ void sumArraysOnDeviceCommon(float *A, float *B, float *C, const int N){ int idx = threadIdx.x + blockIdx.x*blockDim.x; C[idx] = A[idx] + B[idx]; } void sumArraysOnHost(float *A, float *B, float *C, const int N){ for(int i =0; i < N; i++){ C[i] = A[i] + B[i]; } } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0e-8; bool match = true; for(int i =0; i< N; i++){ if(abs(hostRef[i] - gpuRef[i]) > epsilon){ match = false; printf("Result do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n",hostRef,gpuRef,i); break; } } if(match) { printf("Result match!\n"); } } void initialData(float *ip, int size){ time_t t; srand((unsigned int) time(&t)); for(int i =0; i < size; i++){ ip[i] = (float)(rand() & 0xFF)/10.0f; } } int main(){ int dev = 0; hipSetDevice(dev); int nElem = 32; printf("Vector size %d\n",nElem); size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C,*h_Ref; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); h_Ref = (float *)malloc(nBytes); initialData(h_A,nElem); initialData(h_B,nElem); memset(h_C,0,nBytes); memset(h_Ref,0,nBytes); // cpu exec sumArraysOnHost(h_A,h_B,h_Ref,nElem); float *d_A, *d_B, *d_C; hipMalloc((float **)&d_A,nBytes); hipMalloc((float **)&d_B,nBytes); hipMalloc((float **)&d_C,nBytes); // cp data from cpu to gpu hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice); hipMemcpy(d_B,h_B,nBytes,hipMemcpyHostToDevice); int iLen = 128; dim3 block(iLen); dim3 grid((nElem + block.x -1)/block.x); // exec gpu // sumArrasOnDevice(h_C,h_C,h_C,nElem); //sumArrasOnDevice<<<1,nElem>>>(d_A,d_B,d_C,nElem); // wait all device finish! hipMemcpy block // hipDeviceReset(); hipLaunchKernelGGL(( sumArraysOnDeviceCommon), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,nElem); hipDeviceSynchronize(); // hipMemcpy(h_C,d_C,nBytes,hipMemcpyDeviceToHost); checkResult(h_Ref,h_C,nElem); free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; }
371a5defbeaa42364cd9fa18029450afdcab1527.cu
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> // #define CHECK(call){ // const cudaError_t error = call; // if(error != cudaSuccess){ // printf("Error: %s:%d",__FILE__,__LINE__); // printf("code:%d, reason:%s\n",error,cudaGetErrorString(error)); // exit(1); // } // } // block = N __global__ void sumArraysOnDevice(float *A, float *B, float *C, const int N){ int idx = threadIdx.x; C[idx] = A[idx] + B[idx]; } // grid = N __global__ void sumArraysOnDeviceGrid(float *A,float *B,float *C, const int N){ int idx = blockIdx.x; C[idx] = A[idx] + B[idx]; } __global__ void sumArraysOnDeviceCommon(float *A, float *B, float *C, const int N){ int idx = threadIdx.x + blockIdx.x*blockDim.x; C[idx] = A[idx] + B[idx]; } void sumArraysOnHost(float *A, float *B, float *C, const int N){ for(int i =0; i < N; i++){ C[i] = A[i] + B[i]; } } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0e-8; bool match = true; for(int i =0; i< N; i++){ if(abs(hostRef[i] - gpuRef[i]) > epsilon){ match = false; printf("Result do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n",hostRef,gpuRef,i); break; } } if(match) { printf("Result match!\n"); } } void initialData(float *ip, int size){ time_t t; srand((unsigned int) time(&t)); for(int i =0; i < size; i++){ ip[i] = (float)(rand() & 0xFF)/10.0f; } } int main(){ int dev = 0; cudaSetDevice(dev); int nElem = 32; printf("Vector size %d\n",nElem); size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C,*h_Ref; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); h_Ref = (float *)malloc(nBytes); initialData(h_A,nElem); initialData(h_B,nElem); memset(h_C,0,nBytes); memset(h_Ref,0,nBytes); // cpu exec sumArraysOnHost(h_A,h_B,h_Ref,nElem); float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A,nBytes); cudaMalloc((float **)&d_B,nBytes); cudaMalloc((float **)&d_C,nBytes); // cp data from cpu to gpu cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,nBytes,cudaMemcpyHostToDevice); int iLen = 128; dim3 block(iLen); dim3 grid((nElem + block.x -1)/block.x); // exec gpu // sumArrasOnDevice(h_C,h_C,h_C,nElem); //sumArrasOnDevice<<<1,nElem>>>(d_A,d_B,d_C,nElem); // wait all device finish! cudaMemcpy 内部还有同步,因此此处不需要加上 block // cudaDeviceReset(); sumArraysOnDeviceCommon<<<grid,block>>>(d_A,d_B,d_C,nElem); cudaDeviceSynchronize(); // cudaMemcpy(h_C,d_C,nBytes,cudaMemcpyDeviceToHost); checkResult(h_Ref,h_C,nElem); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
d97163ea409261aaab0d6d98cba6de10eb033c11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipFree(mAnchor[ii])); } CUDA_CHECK(hipHostFree(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); Detection *det = (Detection*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, hipStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(hipMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream)); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
d97163ea409261aaab0d6d98cba6de10eb033c11.cu
#include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaFree(mAnchor[ii])); } CUDA_CHECK(cudaFreeHost(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); Detection *det = (Detection*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream)); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
bc477d4c94e264f6adbb0c740ab4089f03414200.hip
// !!! This is a file automatically generated by hipify!!! #include "evaluator.cuh" #include <catboost/libs/cuda_wrappers/kernel.cuh> #include <catboost/libs/cuda_wrappers/kernel_helpers.cuh> #include <catboost/libs/cuda_wrappers/arch.cuh> #include <catboost/libs/cuda_wrappers/kernel_helpers.cuh> #include <hip/hip_runtime.h> #include <assert.h> template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout> struct TFeatureAccessor { TFeatureAccessor() = default; using TFeature = TFeatureType; using TFeaturePtr = const TFeature*; i32 FeatureStride = 0; i32 ObjectStride = 0; i32 FeatureCount = 0; i32 ObjectCount = 0; TFeaturePtr FeaturesPtr = nullptr; __forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const { if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId * FeatureStride + objectId) : NegativeInfty(); } else { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId + objectId * ObjectStride) : NegativeInfty(); } } __forceinline__ __device__ int FeaturesCount() const { return FeatureCount; } __forceinline__ __device__ int SamplesCount() const { return ObjectCount; } }; constexpr ui32 ObjectsPerThread = 4; constexpr ui32 TreeSubBlockWidth = 8; constexpr ui32 ExtTreeBlockWidth = 128; constexpr ui32 QuantizationDocBlockSize = 256; constexpr ui32 BlockWidth = 256; constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth; static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize"); using TTreeIndex = uint4; template<typename TFloatFeatureAccessor> __launch_bounds__(QuantizationDocBlockSize, 1) __global__ void Binarize( TFloatFeatureAccessor floatAccessor, const float* __restrict__ borders, const ui32* __restrict__ featureBorderOffsets, const ui32* __restrict__ featureBordersCount, const ui32* __restrict__ floatFeatureForBucketIdx, const ui32 bucketsCount, TCudaQuantizationBucket* __restrict__ target ) { const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize; const int targetBucketIdx = blockIdx.y; const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx]; const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx); const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx]; __shared__ float bordersLocal[QuantizationDocBlockSize]; if (threadIdx.x < featureBorderCount) { bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x); } __syncthreads(); float4 features; features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize); features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize); features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize); features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize); TCudaQuantizationBucket bins = { 0 }; #pragma unroll 8 for (int borderId = 0; borderId < featureBorderCount; ++borderId) { const float border = bordersLocal[borderId]; bins.x += features.x > border; bins.y += features.y > border; bins.z += features.z > border; bins.w += features.w > border; } if (firstDocForThread < floatAccessor.SamplesCount()) { target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins; } } template<int TreeDepth> TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex result = { 0 }; #pragma unroll TreeDepth for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx); result.x |= ((buckets.x) >= bin.FeatureVal) << depth; result.y |= ((buckets.y) >= bin.FeatureVal) << depth; result.z |= ((buckets.z) >= bin.FeatureVal) << depth; result.w |= ((buckets.w) >= bin.FeatureVal) << depth; } return result; } TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex bins = { 0 }; for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx); bins.x |= ((vals.x) >= bin.FeatureVal) << depth; bins.y |= ((vals.y) >= bin.FeatureVal) << depth; bins.z |= ((vals.z) >= bin.FeatureVal) << depth; bins.w |= ((vals.w) >= bin.FeatureVal) << depth; } return bins; } TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { switch (curTreeDepth) { case 6: return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures); case 7: return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures); case 8: return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures); default: return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures); } } __launch_bounds__(BlockWidth, 1) __global__ void EvalObliviousTrees( const TCudaQuantizationBucket* __restrict__ quantizedFeatures, const ui32* __restrict__ treeSizes, const ui32 treeCount, const ui32* __restrict__ treeStartOffsets, const TGPURepackedBin* __restrict__ repackedBins, const ui32* __restrict__ firstLeafOfset, const ui32 bucketsCount, const TCudaEvaluatorLeafType* __restrict__ leafValues, const ui32 documentCount, TCudaEvaluatorLeafType* __restrict__ results) { const int innerBlockBy32 = threadIdx.x / WarpSize; const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32; const int inBlockId = threadIdx.x % WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId; quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize; const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x); const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount); double4 localResult = { 0 }; if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) { const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx); leafValues += firstLeafOfset[firstTreeIdx]; int treeIdx = firstTreeIdx; const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3); for (; treeIdx < lastTreeBy2; treeIdx += 2) { const int curTreeDepth1 = __ldg(treeSizes + treeIdx); const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1); const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures); const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures); const auto leafValues2 = leafValues + (1 << curTreeDepth1); localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x); localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y); localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z); localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w); curRepackedBinPtr += curTreeDepth1 + curTreeDepth2; leafValues = leafValues2 + (1 << curTreeDepth2); } for (; treeIdx < lastTreeIdx; ++treeIdx) { const int curTreeDepth = __ldg(treeSizes + treeIdx); const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures); localResult.x += __ldg(leafValues + bins.x); localResult.y += __ldg(leafValues + bins.y); localResult.z += __ldg(leafValues + bins.z); localResult.w += __ldg(leafValues + bins.w); curRepackedBinPtr += curTreeDepth; leafValues += (1 << curTreeDepth); } } // TODO(kirillovs): reduce code is valid if those conditions met static_assert(EvalDocBlockSize * ObjectsPerThread == 128, ""); static_assert(EvalDocBlockSize == 32, ""); __shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth]; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w; __syncthreads(); TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize]; for (int i = 256; i < 256 * 4; i += 256) { lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize]; } reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr; __syncthreads(); if (threadIdx.y < ObjectsPerThread) { TAtomicAdd<TCudaEvaluatorLeafType>::Add( results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize, reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128] ); } } void TEvaluationDataCache::PrepareCache(ui32 effectiveBucketCount, ui32 objectsCount) { const auto one32blockSize = WarpSize * effectiveBucketCount; const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4; if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) { BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device); } if (EvalResults.Size() < objectsCount) { EvalResults = TCudaVec<TCudaEvaluatorLeafType>(AlignBy<2048>(objectsCount), EMemoryType::Device); } } void TGPUCatboostEvaluationContext::EvalData(const TGPUDataInput& dataInput, TArrayRef<TCudaEvaluatorLeafType> result, size_t treeStart, size_t treeEnd) const { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.FeatureStride = dataInput.Stride; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.ObjectStride = 1; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.Get(); EvalDataCache.PrepareCache(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount); const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1); const dim3 quantizationDimGrid( NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread), GPUModelData.BordersCount.Size() // float features from models ); hipLaunchKernelGGL(( Binarize), dim3(quantizationDimGrid), dim3(quantizationDimBlock), 0, Stream, floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), EvalDataCache.BinarizedFeaturesBuffer.Get() ); const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth); const dim3 treeCalcDimGrid( NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth), NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, EvalDocBlockSize * ObjectsPerThread) ); ClearMemoryAsync(EvalDataCache.EvalResults.AsArrayRef(), Stream); hipLaunchKernelGGL(( EvalObliviousTrees), dim3(treeCalcDimGrid), dim3(treeCalcDimBlock), 0, Stream, EvalDataCache.BinarizedFeaturesBuffer.Get(), GPUModelData.TreeSizes.Get(), GPUModelData.TreeSizes.Size(), GPUModelData.TreeStartOffsets.Get(), GPUModelData.TreeSplits.Get(), GPUModelData.TreeFirstLeafOffsets.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), GPUModelData.ModelLeafs.Get(), dataInput.ObjectCount, EvalDataCache.EvalResults.Get() ); MemoryCopyAsync<TCudaEvaluatorLeafType>( EvalDataCache.EvalResults.AsArrayRef().Slice(0, dataInput.ObjectCount), result, Stream ); Stream.Synchronize(); }
bc477d4c94e264f6adbb0c740ab4089f03414200.cu
#include "evaluator.cuh" #include <catboost/libs/cuda_wrappers/kernel.cuh> #include <catboost/libs/cuda_wrappers/kernel_helpers.cuh> #include <catboost/libs/cuda_wrappers/arch.cuh> #include <catboost/libs/cuda_wrappers/kernel_helpers.cuh> #include <cuda_runtime.h> #include <assert.h> template<typename TFeatureType, TGPUDataInput::EFeatureLayout Layout> struct TFeatureAccessor { TFeatureAccessor() = default; using TFeature = TFeatureType; using TFeaturePtr = const TFeature*; i32 FeatureStride = 0; i32 ObjectStride = 0; i32 FeatureCount = 0; i32 ObjectCount = 0; TFeaturePtr FeaturesPtr = nullptr; __forceinline__ __device__ TFeature operator()(i32 featureId, i32 objectId) const { if (Layout == TGPUDataInput::EFeatureLayout::ColumnFirst) { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId * FeatureStride + objectId) : NegativeInfty(); } else { return objectId < ObjectCount && featureId < FeatureCount ? __ldg(FeaturesPtr + featureId + objectId * ObjectStride) : NegativeInfty(); } } __forceinline__ __device__ int FeaturesCount() const { return FeatureCount; } __forceinline__ __device__ int SamplesCount() const { return ObjectCount; } }; constexpr ui32 ObjectsPerThread = 4; constexpr ui32 TreeSubBlockWidth = 8; constexpr ui32 ExtTreeBlockWidth = 128; constexpr ui32 QuantizationDocBlockSize = 256; constexpr ui32 BlockWidth = 256; constexpr ui32 EvalDocBlockSize = BlockWidth / TreeSubBlockWidth; static_assert(EvalDocBlockSize >= WarpSize, "EvalBlockSize should be greater than WarpSize"); using TTreeIndex = uint4; template<typename TFloatFeatureAccessor> __launch_bounds__(QuantizationDocBlockSize, 1) __global__ void Binarize( TFloatFeatureAccessor floatAccessor, const float* __restrict__ borders, const ui32* __restrict__ featureBorderOffsets, const ui32* __restrict__ featureBordersCount, const ui32* __restrict__ floatFeatureForBucketIdx, const ui32 bucketsCount, TCudaQuantizationBucket* __restrict__ target ) { const int blockby32 = blockIdx.x * QuantizationDocBlockSize / WarpSize + threadIdx.x / WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + threadIdx.x % WarpSize; const int targetBucketIdx = blockIdx.y; const float* featureBorders = borders + featureBorderOffsets[targetBucketIdx]; const int featureBorderCount = __ldg(featureBordersCount + targetBucketIdx); const int featureIdx = floatFeatureForBucketIdx[targetBucketIdx]; __shared__ float bordersLocal[QuantizationDocBlockSize]; if (threadIdx.x < featureBorderCount) { bordersLocal[threadIdx.x] = __ldg(featureBorders + threadIdx.x); } __syncthreads(); float4 features; features.x = floatAccessor(featureIdx, firstDocForThread + 0 * WarpSize); features.y = floatAccessor(featureIdx, firstDocForThread + 1 * WarpSize); features.z = floatAccessor(featureIdx, firstDocForThread + 2 * WarpSize); features.w = floatAccessor(featureIdx, firstDocForThread + 3 * WarpSize); TCudaQuantizationBucket bins = { 0 }; #pragma unroll 8 for (int borderId = 0; borderId < featureBorderCount; ++borderId) { const float border = bordersLocal[borderId]; bins.x += features.x > border; bins.y += features.y > border; bins.z += features.z > border; bins.w += features.w > border; } if (firstDocForThread < floatAccessor.SamplesCount()) { target[bucketsCount * WarpSize * blockby32 + targetBucketIdx * WarpSize + threadIdx.x % WarpSize] = bins; } } template<int TreeDepth> TTreeIndex __device__ __forceinline__ CalcIndexesUnwrapped(const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex result = { 0 }; #pragma unroll TreeDepth for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket buckets = __ldg(quantizedFeatures + bin.FeatureIdx); result.x |= ((buckets.x) >= bin.FeatureVal) << depth; result.y |= ((buckets.y) >= bin.FeatureVal) << depth; result.z |= ((buckets.z) >= bin.FeatureVal) << depth; result.w |= ((buckets.w) >= bin.FeatureVal) << depth; } return result; } TTreeIndex __device__ CalcIndexesBase(int TreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { TTreeIndex bins = { 0 }; for (int depth = 0; depth < TreeDepth; ++depth) { const TGPURepackedBin bin = Ldg(curRepackedBinPtr + depth); TCudaQuantizationBucket vals = __ldg(quantizedFeatures + bin.FeatureIdx); bins.x |= ((vals.x) >= bin.FeatureVal) << depth; bins.y |= ((vals.y) >= bin.FeatureVal) << depth; bins.z |= ((vals.z) >= bin.FeatureVal) << depth; bins.w |= ((vals.w) >= bin.FeatureVal) << depth; } return bins; } TTreeIndex __device__ __forceinline__ CalcTreeVals(int curTreeDepth, const TGPURepackedBin* const __restrict__ curRepackedBinPtr, const TCudaQuantizationBucket* const __restrict__ quantizedFeatures) { switch (curTreeDepth) { case 6: return CalcIndexesUnwrapped<6>(curRepackedBinPtr, quantizedFeatures); case 7: return CalcIndexesUnwrapped<7>(curRepackedBinPtr, quantizedFeatures); case 8: return CalcIndexesUnwrapped<8>(curRepackedBinPtr, quantizedFeatures); default: return CalcIndexesBase(curTreeDepth, curRepackedBinPtr, quantizedFeatures); } } __launch_bounds__(BlockWidth, 1) __global__ void EvalObliviousTrees( const TCudaQuantizationBucket* __restrict__ quantizedFeatures, const ui32* __restrict__ treeSizes, const ui32 treeCount, const ui32* __restrict__ treeStartOffsets, const TGPURepackedBin* __restrict__ repackedBins, const ui32* __restrict__ firstLeafOfset, const ui32 bucketsCount, const TCudaEvaluatorLeafType* __restrict__ leafValues, const ui32 documentCount, TCudaEvaluatorLeafType* __restrict__ results) { const int innerBlockBy32 = threadIdx.x / WarpSize; const int blockby32 = blockIdx.y * EvalDocBlockSize / WarpSize + innerBlockBy32; const int inBlockId = threadIdx.x % WarpSize; const int firstDocForThread = blockby32 * WarpSize * ObjectsPerThread + inBlockId; quantizedFeatures += bucketsCount * WarpSize * blockby32 + threadIdx.x % WarpSize; const int firstTreeIdx = TreeSubBlockWidth * ExtTreeBlockWidth * (threadIdx.y + TreeSubBlockWidth * blockIdx.x); const int lastTreeIdx = min(firstTreeIdx + TreeSubBlockWidth * ExtTreeBlockWidth, treeCount); double4 localResult = { 0 }; if (firstTreeIdx < lastTreeIdx && firstDocForThread < documentCount) { const TGPURepackedBin* __restrict__ curRepackedBinPtr = repackedBins + __ldg(treeStartOffsets + firstTreeIdx); leafValues += firstLeafOfset[firstTreeIdx]; int treeIdx = firstTreeIdx; const int lastTreeBy2 = lastTreeIdx - ((lastTreeIdx - firstTreeIdx) & 0x3); for (; treeIdx < lastTreeBy2; treeIdx += 2) { const int curTreeDepth1 = __ldg(treeSizes + treeIdx); const int curTreeDepth2 = __ldg(treeSizes + treeIdx + 1); const TTreeIndex bins1 = CalcTreeVals(curTreeDepth1, curRepackedBinPtr, quantizedFeatures); const TTreeIndex bins2 = CalcTreeVals(curTreeDepth2, curRepackedBinPtr + curTreeDepth1, quantizedFeatures); const auto leafValues2 = leafValues + (1 << curTreeDepth1); localResult.x += __ldg(leafValues + bins1.x) + __ldg(leafValues2 + bins2.x); localResult.y += __ldg(leafValues + bins1.y) + __ldg(leafValues2 + bins2.y); localResult.z += __ldg(leafValues + bins1.z) + __ldg(leafValues2 + bins2.z); localResult.w += __ldg(leafValues + bins1.w) + __ldg(leafValues2 + bins2.w); curRepackedBinPtr += curTreeDepth1 + curTreeDepth2; leafValues = leafValues2 + (1 << curTreeDepth2); } for (; treeIdx < lastTreeIdx; ++treeIdx) { const int curTreeDepth = __ldg(treeSizes + treeIdx); const TTreeIndex bins = CalcTreeVals(curTreeDepth, curRepackedBinPtr, quantizedFeatures); localResult.x += __ldg(leafValues + bins.x); localResult.y += __ldg(leafValues + bins.y); localResult.z += __ldg(leafValues + bins.z); localResult.w += __ldg(leafValues + bins.w); curRepackedBinPtr += curTreeDepth; leafValues += (1 << curTreeDepth); } } // TODO(kirillovs): reduce code is valid if those conditions met static_assert(EvalDocBlockSize * ObjectsPerThread == 128, ""); static_assert(EvalDocBlockSize == 32, ""); __shared__ TCudaEvaluatorLeafType reduceVals[EvalDocBlockSize * ObjectsPerThread * TreeSubBlockWidth]; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 0 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.x; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 1 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.y; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 2 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.z; reduceVals[innerBlockBy32 * WarpSize * ObjectsPerThread + WarpSize * 3 + inBlockId + threadIdx.y * EvalDocBlockSize * ObjectsPerThread] = localResult.w; __syncthreads(); TCudaEvaluatorLeafType lr = reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize]; for (int i = 256; i < 256 * 4; i += 256) { lr += reduceVals[i + threadIdx.x + threadIdx.y * EvalDocBlockSize]; } reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] = lr; __syncthreads(); if (threadIdx.y < ObjectsPerThread) { TAtomicAdd<TCudaEvaluatorLeafType>::Add( results + blockby32 * WarpSize * ObjectsPerThread + threadIdx.x + threadIdx.y * EvalDocBlockSize, reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize] + reduceVals[threadIdx.x + threadIdx.y * EvalDocBlockSize + 128] ); } } void TEvaluationDataCache::PrepareCache(ui32 effectiveBucketCount, ui32 objectsCount) { const auto one32blockSize = WarpSize * effectiveBucketCount; const auto desiredQuantBuff = one32blockSize * NKernel::CeilDivide<ui32>(objectsCount, 128) * 4; if (BinarizedFeaturesBuffer.Size() < desiredQuantBuff) { BinarizedFeaturesBuffer = TCudaVec<TCudaQuantizationBucket>(desiredQuantBuff, EMemoryType::Device); } if (EvalResults.Size() < objectsCount) { EvalResults = TCudaVec<TCudaEvaluatorLeafType>(AlignBy<2048>(objectsCount), EMemoryType::Device); } } void TGPUCatboostEvaluationContext::EvalData(const TGPUDataInput& dataInput, TArrayRef<TCudaEvaluatorLeafType> result, size_t treeStart, size_t treeEnd) const { TFeatureAccessor<float, TGPUDataInput::EFeatureLayout::ColumnFirst> floatFeatureAccessor; floatFeatureAccessor.FeatureCount = dataInput.FloatFeatureCount; floatFeatureAccessor.FeatureStride = dataInput.Stride; floatFeatureAccessor.ObjectCount = dataInput.ObjectCount; floatFeatureAccessor.ObjectStride = 1; floatFeatureAccessor.FeaturesPtr = dataInput.FlatFloatsVector.Get(); EvalDataCache.PrepareCache(GPUModelData.FloatFeatureForBucketIdx.Size(), dataInput.ObjectCount); const dim3 quantizationDimBlock(QuantizationDocBlockSize, 1); const dim3 quantizationDimGrid( NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, QuantizationDocBlockSize * ObjectsPerThread), GPUModelData.BordersCount.Size() // float features from models ); Binarize<<<quantizationDimGrid, quantizationDimBlock, 0, Stream>>> ( floatFeatureAccessor, GPUModelData.FlatBordersVector.Get(), GPUModelData.BordersOffsets.Get(), GPUModelData.BordersCount.Get(), GPUModelData.FloatFeatureForBucketIdx.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), EvalDataCache.BinarizedFeaturesBuffer.Get() ); const dim3 treeCalcDimBlock(EvalDocBlockSize, TreeSubBlockWidth); const dim3 treeCalcDimGrid( NKernel::CeilDivide<unsigned int>(GPUModelData.TreeSizes.Size(), TreeSubBlockWidth * ExtTreeBlockWidth), NKernel::CeilDivide<unsigned int>(dataInput.ObjectCount, EvalDocBlockSize * ObjectsPerThread) ); ClearMemoryAsync(EvalDataCache.EvalResults.AsArrayRef(), Stream); EvalObliviousTrees<<<treeCalcDimGrid, treeCalcDimBlock, 0, Stream>>> ( EvalDataCache.BinarizedFeaturesBuffer.Get(), GPUModelData.TreeSizes.Get(), GPUModelData.TreeSizes.Size(), GPUModelData.TreeStartOffsets.Get(), GPUModelData.TreeSplits.Get(), GPUModelData.TreeFirstLeafOffsets.Get(), GPUModelData.FloatFeatureForBucketIdx.Size(), GPUModelData.ModelLeafs.Get(), dataInput.ObjectCount, EvalDataCache.EvalResults.Get() ); MemoryCopyAsync<TCudaEvaluatorLeafType>( EvalDataCache.EvalResults.AsArrayRef().Slice(0, dataInput.ObjectCount), result, Stream ); Stream.Synchronize(); }
d58cf7846d9384e7716ee2aef6242cf876726ca7.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "..\CUDA\SyncedMemory.h" #include "pgm.h" #include "lab3.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { if (argc != 7) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); if (!(sucb && suct && sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm || ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 || ct != 3) { puts("The background and target image must be colored."); abort(); } const int ox = atoi(argv[4]), oy = atoi(argv[5]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox ); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); WritePPM(o.get(), wb, hb, argv[6]); return 0; }
d58cf7846d9384e7716ee2aef6242cf876726ca7.cu
#include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "..\CUDA\SyncedMemory.h" #include "pgm.h" #include "lab3.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { if (argc != 7) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); if (!(sucb && suct && sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm || ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 || ct != 3) { puts("The background and target image must be colored."); abort(); } const int ox = atoi(argv[4]), oy = atoi(argv[5]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox ); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); WritePPM(o.get(), wb, hb, argv[6]); return 0; }
f4c665b19c2080a61413fe12cbe25475f3c79e01.hip
// !!! This is a file automatically generated by hipify!!! #include <drivers/mem_driver.h> #include <functions/cnn_forward.h> #include <functions/cnn_backward.h> #include <functions/cnn_hv_forward.h> #include <functions/cnn_hv_backward.h> #include <nn/read_nn.h> #include <device/cuda_utils.h> #include <core/errors.h> void testCudaMemcpy2D( SCRATCH_AREA *scratch ) { real *devPtr = scratch->nextDevPtr; real *hostPtr = scratch->nextHostPtr; real *src = devPtr; real *tgt = src + 9; for (int i = 0; i < 9; i ++) hostPtr[ i ] = i + 1; fprintf( stderr, "Source Matrix... \n"); for( int i = 0; i < 3; i ++) { for (int j = 0; j < 3; j ++ ) fprintf( stderr, "%f ", hostPtr[ j * 3 + i ] ); fprintf( stderr, "\n"); } fprintf( stderr, "\n\n\n... CUDAMEMCPY2D Testing... \n\n\n"); copy_host_device( hostPtr, src, sizeof(real) * 9, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); for (int i = 0; i < 12; i ++) hostPtr[ i ] = -1; copy_host_device( hostPtr, tgt, sizeof(real) * 12, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); //cuda_memset( tgt, 0, sizeof(real) * 4 * 3, ERROR_MEMSET ); hipMemcpy2D( tgt, sizeof(real) * 4, src, sizeof(real) * 3, sizeof(real) * 3, sizeof(real) * 3, hipMemcpyDeviceToDevice ); copy_host_device( hostPtr, tgt, sizeof(real) * 4 * 3, hipMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST ); for( int i = 0; i < 4; i ++) { for (int j = 0; j < 3; j ++ ) fprintf( stderr, "%f ", hostPtr[ j * 4 + i ] ); fprintf( stderr, "\n"); } fprintf( stderr, "... Done with the testing... \n\n\n"); } void getMemRequired( CNN_MODEL *model ) { fprintf( stderr, "\n\n"); fprintf( stderr, "*** Memory Requirement Report *** \n\n"); for (int i = 1; i < pow(2, 15); i *= 2 ){ readLenetCNN( model, 3, 32, 32, i, 1, 0); long forward = cnnForwardMemRequired( model ); long backward = cnnBackwardMemRequired( model ); long hvForward = cnnROpForwardMemRequired( model ); long hvBackward = cnnROpBackwardMemRequired( model ); long gradient = forward + backward; long hv = hvForward + hvBackward + gradient; long total = hv + 3 * model->zSize + 5 * model->maxDeltaSize; fprintf( stderr, "Batch Size: %d\n", i ); fprintf( stderr, " Gradient Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n", (long)(((double)gradient) / (1024 * 1024 * 1024)), (long)(((double)gradient) / (1024 * 1024)), (long)(((double)gradient) / (1024)), gradient ); fprintf( stderr, " Hv Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n\n\n", (long)(((double)hv) / (1024 * 1024 * 1024)), (long)(((double)hv) / (1024 * 1024)), (long)(((double)hv) / (1024)), hv ); fprintf( stderr, " Total Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n\n\n", (long)(((double)total) / (1024 * 1024 * 1024)), (long)(((double)total) / (1024 * 1024)), (long)(((double)total) / (1024)), total ); } fprintf( stderr, "*** End Report *** \n\n"); }
f4c665b19c2080a61413fe12cbe25475f3c79e01.cu
#include <drivers/mem_driver.h> #include <functions/cnn_forward.h> #include <functions/cnn_backward.h> #include <functions/cnn_hv_forward.h> #include <functions/cnn_hv_backward.h> #include <nn/read_nn.h> #include <device/cuda_utils.h> #include <core/errors.h> void testCudaMemcpy2D( SCRATCH_AREA *scratch ) { real *devPtr = scratch->nextDevPtr; real *hostPtr = scratch->nextHostPtr; real *src = devPtr; real *tgt = src + 9; for (int i = 0; i < 9; i ++) hostPtr[ i ] = i + 1; fprintf( stderr, "Source Matrix... \n"); for( int i = 0; i < 3; i ++) { for (int j = 0; j < 3; j ++ ) fprintf( stderr, "%f ", hostPtr[ j * 3 + i ] ); fprintf( stderr, "\n"); } fprintf( stderr, "\n\n\n... CUDAMEMCPY2D Testing... \n\n\n"); copy_host_device( hostPtr, src, sizeof(real) * 9, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); for (int i = 0; i < 12; i ++) hostPtr[ i ] = -1; copy_host_device( hostPtr, tgt, sizeof(real) * 12, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); //cuda_memset( tgt, 0, sizeof(real) * 4 * 3, ERROR_MEMSET ); cudaMemcpy2D( tgt, sizeof(real) * 4, src, sizeof(real) * 3, sizeof(real) * 3, sizeof(real) * 3, cudaMemcpyDeviceToDevice ); copy_host_device( hostPtr, tgt, sizeof(real) * 4 * 3, cudaMemcpyDeviceToHost, ERROR_MEMCPY_DEVICE_HOST ); for( int i = 0; i < 4; i ++) { for (int j = 0; j < 3; j ++ ) fprintf( stderr, "%f ", hostPtr[ j * 4 + i ] ); fprintf( stderr, "\n"); } fprintf( stderr, "... Done with the testing... \n\n\n"); } void getMemRequired( CNN_MODEL *model ) { fprintf( stderr, "\n\n"); fprintf( stderr, "*** Memory Requirement Report *** \n\n"); for (int i = 1; i < pow(2, 15); i *= 2 ){ readLenetCNN( model, 3, 32, 32, i, 1, 0); long forward = cnnForwardMemRequired( model ); long backward = cnnBackwardMemRequired( model ); long hvForward = cnnROpForwardMemRequired( model ); long hvBackward = cnnROpBackwardMemRequired( model ); long gradient = forward + backward; long hv = hvForward + hvBackward + gradient; long total = hv + 3 * model->zSize + 5 * model->maxDeltaSize; fprintf( stderr, "Batch Size: %d\n", i ); fprintf( stderr, " Gradient Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n", (long)(((double)gradient) / (1024 * 1024 * 1024)), (long)(((double)gradient) / (1024 * 1024)), (long)(((double)gradient) / (1024)), gradient ); fprintf( stderr, " Hv Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n\n\n", (long)(((double)hv) / (1024 * 1024 * 1024)), (long)(((double)hv) / (1024 * 1024)), (long)(((double)hv) / (1024)), hv ); fprintf( stderr, " Total Buffer Requirement: (%ld GB, %ld MB, %ld KB, %ld) \n\n\n", (long)(((double)total) / (1024 * 1024 * 1024)), (long)(((double)total) / (1024 * 1024)), (long)(((double)total) / (1024)), total ); } fprintf( stderr, "*** End Report *** \n\n"); }
ccfca5788ff228466a3a64757ff8d5b566168856.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define MODULUS ((double) 4294967296.0) // 2^32 as a double #define MODULUS_INV ((float) (1.0 / MODULUS)) // a uniform random float between zero and one __device__ float urand(uint4 *seed) { // George Marsaglia's KISS generator from his post to // to sci.stat.math on 1/12/99 // a pair of MWC's seed->x = 36969*(seed->x & 0xffff) + (seed->x >> 16); seed->y = 18000*(seed->y & 0xffff) + (seed->y >> 16); unsigned int z = (seed->x << 16) + seed->y; // a shift register seed->z ^= (seed->z << 17); seed->z ^= (seed->z >> 13); seed->z ^= (seed->z << 5); // a linear congruential generator seed->w = 69069*seed->w + 1234567; z ^= seed->w; z += seed->z; return z * MODULUS_INV; } __global__ void urandTest() { #ifdef EMULATION uint4 seed = make_uint4(12345, 56789, 123, 4325); for (int i = 0; i < 10000; i++) { printf("%f\n", urand(&seed)); } #endif } // draw a random sample from an distribution that approximates a gaussian of std dev 1 __device__ float gaussianApproxRand(uint4 *seed) { return (urand(seed) + urand(seed) + urand(seed))*2 - 3; } // the PDF of the aforementioned distribution __device__ float gaussianApproxPDF(float x) { if (x < -3) { return 0; } if (x < -1) { x += 3; return x*x/16; } if (x < 1) { x = 3-x; return x*x/8; } if (x < 3) { x -= 3; return x*x/16; } return 0; } // the CDF of the aforementioned distribution __device__ float gaussianApproxCDF(float x) { if (x < -3) { return 0; } if (x < -1) { x += 3; return x*x*x/48; } if (x < 1) { return (12 + x*(9 - x*x))/24; } if (x < 3) { x -= 3; return 1 + x*x*x/48; } return 1; } // A function that prints the whole tree. Only useful in emulation mode. #ifdef EMULATION __device__ void printNode(kd_tree *t, int nodeIdx, int depth) { char space[256]; for (int i = 0; i < depth; i++) space[i] = '.'; space[depth] = 0; if (nodeIdx > 0 || depth == 0) { node n = t->nodeArray[nodeIdx]; printf("%sNode %i: %i %f (%f %f) (%i %i %i)\n", space, nodeIdx, n.cut_dim, n.cut_val, n.min_val, n.max_val, n.parent, n.left, n.right); printNode(t, n.left, depth+1); printNode(t, n.right, depth+1); } else { printf("%sLeaf %i: \n", space, -nodeIdx); printf("%s Position: ", space); for (int i = 0; i < t->positionDimensions; i++) { printf("%3.3f ", t->leafPositions[-nodeIdx*t->positionDimensions+i]); } printf("\n%s Value: ", space); for (int i = 0; i < t->valueDimensions; i++) { printf("%3.3f ", t->leafValues[-nodeIdx*t->valueDimensions+i]); } printf("\n"); } } #endif __global__ void printTree(kd_tree *t) { #ifdef EMULATION printf("tree:\n" " nodeArray = %x\n" " leafPositions = %x\n" " leafValues = %x\n" " nodeCount = %i\n" " leafCount = %i\n" " positionDimensions = %i\n" " valueDimensions = %i\n", t->nodeArray, t->leafPositions, t->leafValues, t->nodeCount, t->leafCount, t->positionDimensions, t->valueDimensions); printNode(t, 0, 0); printf("End of tree\n"); fflush(stdout); #endif }
ccfca5788ff228466a3a64757ff8d5b566168856.cu
#define MODULUS ((double) 4294967296.0) // 2^32 as a double #define MODULUS_INV ((float) (1.0 / MODULUS)) // a uniform random float between zero and one __device__ float urand(uint4 *seed) { // George Marsaglia's KISS generator from his post to // to sci.stat.math on 1/12/99 // a pair of MWC's seed->x = 36969*(seed->x & 0xffff) + (seed->x >> 16); seed->y = 18000*(seed->y & 0xffff) + (seed->y >> 16); unsigned int z = (seed->x << 16) + seed->y; // a shift register seed->z ^= (seed->z << 17); seed->z ^= (seed->z >> 13); seed->z ^= (seed->z << 5); // a linear congruential generator seed->w = 69069*seed->w + 1234567; z ^= seed->w; z += seed->z; return z * MODULUS_INV; } __global__ void urandTest() { #ifdef EMULATION uint4 seed = make_uint4(12345, 56789, 123, 4325); for (int i = 0; i < 10000; i++) { printf("%f\n", urand(&seed)); } #endif } // draw a random sample from an distribution that approximates a gaussian of std dev 1 __device__ float gaussianApproxRand(uint4 *seed) { return (urand(seed) + urand(seed) + urand(seed))*2 - 3; } // the PDF of the aforementioned distribution __device__ float gaussianApproxPDF(float x) { if (x < -3) { return 0; } if (x < -1) { x += 3; return x*x/16; } if (x < 1) { x = 3-x; return x*x/8; } if (x < 3) { x -= 3; return x*x/16; } return 0; } // the CDF of the aforementioned distribution __device__ float gaussianApproxCDF(float x) { if (x < -3) { return 0; } if (x < -1) { x += 3; return x*x*x/48; } if (x < 1) { return (12 + x*(9 - x*x))/24; } if (x < 3) { x -= 3; return 1 + x*x*x/48; } return 1; } // A function that prints the whole tree. Only useful in emulation mode. #ifdef EMULATION __device__ void printNode(kd_tree *t, int nodeIdx, int depth) { char space[256]; for (int i = 0; i < depth; i++) space[i] = '.'; space[depth] = 0; if (nodeIdx > 0 || depth == 0) { node n = t->nodeArray[nodeIdx]; printf("%sNode %i: %i %f (%f %f) (%i %i %i)\n", space, nodeIdx, n.cut_dim, n.cut_val, n.min_val, n.max_val, n.parent, n.left, n.right); printNode(t, n.left, depth+1); printNode(t, n.right, depth+1); } else { printf("%sLeaf %i: \n", space, -nodeIdx); printf("%s Position: ", space); for (int i = 0; i < t->positionDimensions; i++) { printf("%3.3f ", t->leafPositions[-nodeIdx*t->positionDimensions+i]); } printf("\n%s Value: ", space); for (int i = 0; i < t->valueDimensions; i++) { printf("%3.3f ", t->leafValues[-nodeIdx*t->valueDimensions+i]); } printf("\n"); } } #endif __global__ void printTree(kd_tree *t) { #ifdef EMULATION printf("tree:\n" " nodeArray = %x\n" " leafPositions = %x\n" " leafValues = %x\n" " nodeCount = %i\n" " leafCount = %i\n" " positionDimensions = %i\n" " valueDimensions = %i\n", t->nodeArray, t->leafPositions, t->leafValues, t->nodeCount, t->leafCount, t->positionDimensions, t->valueDimensions); printNode(t, 0, 0); printf("End of tree\n"); fflush(stdout); #endif }
f6cd35c4b11742d17c1da8062e56ad1036a8728e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffepro/layers/resize_grid_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/utils/utils.h> #include <caffepro/math/cublas_wrapper.h> namespace caffepro { resize_grid_layer::resize_grid_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = 2; attr_.num_inputs_max = 3; attr_.num_outputs_min = 2; attr_.num_outputs_max = 3; attr_.set_constraint( layer_attribute::CF_REQUIRE_SAME_DEVICE | layer_attribute::CF_REQUIRE_SAME_NUM | layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future ); } resize_grid_layer::~resize_grid_layer() { release_all(); } void resize_grid_layer::init() { check_input(); if (inputs_.size() == 3) { CHECK_EQ(outputs_.size(), 3); } output_box_start_ = 0.f; output_box_scale_ = 1.f; if (inputs_.size() == 3) { output_box_start_ = config_.get<data_type>("output_box_start"); output_box_scale_ = config_.get<data_type>("output_box_scale"); } output_min_length_ = config_.get<int>("output_min_length"); output_max_scale_ = config_.get<data_type>("output_max_scale"); buffer_.reset(new node_blob()); sum_multiplier_.reset(new node_blob()); buffer_->set_attr(node_blob::NF_TEMP); internal_weights_.push_back(buffer_); } void resize_grid_layer::resize() { check_input(); int n_devices = (int)inputs_[0]->size(); bool init_output = (outputs_[0]->size() == 0); // calculate output spatial size and resize outputs for (int nd = 0; nd < n_devices; nd++) { CHECK_EQ(inputs_[1]->get(nd)->inner_count(), 1); int output_width, output_height; get_output_size(nd, output_width, output_height); if (init_output) { outputs_[0]->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); outputs_[1]->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); buffer_->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); sum_multiplier_->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, 1, 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); } else { outputs_[0]->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); outputs_[1]->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); buffer_->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); sum_multiplier_->get(nd)->reshape_4d(1, 1, output_height, output_width); } sum_multiplier_->get(nd)->fill_data(1.f); } if (inputs_.size() == 3) { for (int nd = 0; nd < n_devices; nd++) { CHECK_EQ(inputs_[2]->get(nd)->inner_count(), 4); } if (init_output) { outputs_[2]->add_like(context_, *inputs_[2]); } else { for (int nd = 0; nd < n_devices; nd++) { if (inputs_[2]->get(nd)->reshaped()) { outputs_[2]->get(nd)->reshape_like(*inputs_[2]->get(nd)); } } } } } void resize_grid_layer::get_output_size(int device_index, int &output_width, int &output_height) { device_blob &input = *inputs_[0]->get(device_index); device_blob &input_scale = *inputs_[1]->get(device_index); int width = input.width(), height = input.height(); double ratio = (double)width / height; int max_height = (int)ceil(sqrt(output_max_scale_ * output_max_scale_ / ratio)); int max_width = (int)ceil(max_height * ratio); output_width = output_min_length_; output_height = output_min_length_; const data_type *learnt_scales = input_scale.cpu_data(); int num = input.num(); for (int i = 0; i < num; i++) { output_width = ::max(output_width, (int)ceil((double)width / learnt_scales[i])); output_height = ::max(output_height, (int)ceil((double)height / learnt_scales[i])); } output_width = ::min(output_width, max_width); output_height = ::min(output_height, max_height); } __global__ static void fwd_kernel(const int count, const int input_height, const int input_width, const int output_height, const int output_width, const data_type *params, data_type *output_x, data_type *output_y) { CUDA_KERNEL_LOOP(index, count) { int w = index % output_width; int h = index / output_width % output_height; int n = index / output_width / output_height; data_type scale = params[n]; data_type mid_input_w = (data_type)(input_width - 1) / 2, mid_input_h = (data_type)(input_height - 1) / 2; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; output_x[index] = (w - mid_output_w) * scale + mid_input_w; output_y[index] = (h - mid_output_h) * scale + mid_input_h; } } void resize_grid_layer::on_forward(int device_index) { int count = outputs_[0]->get(device_index)->count(); int num = outputs_[0]->get(device_index)->num(); int input_width = inputs_[0]->get(device_index)->width(); int input_height = inputs_[0]->get(device_index)->height(); int output_width = outputs_[0]->get(device_index)->width(); int output_height = outputs_[0]->get(device_index)->height(); KERNEL_CALL(fwd_kernel, count)( count, input_height, input_width, output_height, output_width, inputs_[1]->get(device_index)->gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data(), // grid x outputs_[1]->get(device_index)->mutable_gpu_data() // grid y ); if (inputs_.size() == 3) { data_type *trans_boxes = outputs_[2]->get(device_index)->mutable_cpu_data(); const data_type *boxes = inputs_[2]->get(device_index)->cpu_data(); const data_type *scales = inputs_[1]->get(device_index)->cpu_data(); data_type mid_input_w = (data_type)(input_width - 1) / 2, mid_input_h = (data_type)(input_height - 1) / 2; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; for (int i = 0; i < num; i++) { data_type *cur_trans_box = trans_boxes + 4 * i; const data_type *cur_box = boxes + 4 * i; data_type scale = scales[i]; cur_trans_box[0] = cur_box[0] / scale + mid_output_w - mid_input_w / scale; // left cur_trans_box[1] = cur_box[1] / scale + mid_output_h - mid_input_h / scale; // top cur_trans_box[2] = cur_box[2] / scale + mid_output_w - mid_input_w / scale; // right cur_trans_box[3] = cur_box[3] / scale + mid_output_h - mid_input_h / scale; // bottom for (int j = 0; j < 4; j++) { cur_trans_box[j] = cur_trans_box[j] * output_box_scale_ + output_box_start_; } } } } __global__ static void bwd_kernel(const int count, const int input_height, const int input_width, const int output_height, const int output_width, const data_type *output_x_diff, const data_type *output_y_diff, data_type *params_diff) { CUDA_KERNEL_LOOP(index, count) { int w = index % output_width; int h = index / output_width % output_height; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; params_diff[index] = output_x_diff[index] * (w - mid_output_w) + output_y_diff[index] * (h - mid_output_h); } } void resize_grid_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { // we only need to bp inputs_[1] // fill 0 to inputs_[0] to meet the architecture requirement if (should_bp(bp_acts, 0) && get_beta(clear_acts_diff, 0) == 0) { inputs_[0]->get(device_index)->fill_diff(0.f); } if (should_bp(bp_acts, 1)) { data_type beta = get_beta(clear_acts_diff, 1); int count = outputs_[0]->get(device_index)->count(); KERNEL_CALL(bwd_kernel, count)( count, inputs_[0]->get(device_index)->height(), inputs_[0]->get(device_index)->width(), outputs_[0]->get(device_index)->height(), outputs_[0]->get(device_index)->width(), outputs_[0]->get(device_index)->gpu_diff(), outputs_[1]->get(device_index)->gpu_diff(), buffer_->get(device_index)->mutable_gpu_data() ); cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id()); cublas.gemv( CblasNoTrans, buffer_->get(device_index)->num(), // channels == 1 buffer_->get(device_index)->width() * buffer_->get(device_index)->height(), (data_type)1.f, buffer_->get(device_index)->gpu_data(), sum_multiplier_->get(device_index)->gpu_data(), beta, inputs_[1]->get(device_index)->mutable_gpu_diff() ); } } }
f6cd35c4b11742d17c1da8062e56ad1036a8728e.cu
#include <caffepro/layers/resize_grid_layer.h> #include <caffepro/proto/caffe.pb.h> #include <caffepro/utils/utils.h> #include <caffepro/math/cublas_wrapper.h> namespace caffepro { resize_grid_layer::resize_grid_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = 2; attr_.num_inputs_max = 3; attr_.num_outputs_min = 2; attr_.num_outputs_max = 3; attr_.set_constraint( layer_attribute::CF_REQUIRE_SAME_DEVICE | layer_attribute::CF_REQUIRE_SAME_NUM | layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future ); } resize_grid_layer::~resize_grid_layer() { release_all(); } void resize_grid_layer::init() { check_input(); if (inputs_.size() == 3) { CHECK_EQ(outputs_.size(), 3); } output_box_start_ = 0.f; output_box_scale_ = 1.f; if (inputs_.size() == 3) { output_box_start_ = config_.get<data_type>("output_box_start"); output_box_scale_ = config_.get<data_type>("output_box_scale"); } output_min_length_ = config_.get<int>("output_min_length"); output_max_scale_ = config_.get<data_type>("output_max_scale"); buffer_.reset(new node_blob()); sum_multiplier_.reset(new node_blob()); buffer_->set_attr(node_blob::NF_TEMP); internal_weights_.push_back(buffer_); } void resize_grid_layer::resize() { check_input(); int n_devices = (int)inputs_[0]->size(); bool init_output = (outputs_[0]->size() == 0); // calculate output spatial size and resize outputs for (int nd = 0; nd < n_devices; nd++) { CHECK_EQ(inputs_[1]->get(nd)->inner_count(), 1); int output_width, output_height; get_output_size(nd, output_width, output_height); if (init_output) { outputs_[0]->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); outputs_[1]->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); buffer_->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, inputs_[0]->get(nd)->num(), 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); sum_multiplier_->add(boost::shared_ptr<device_blob>(device_blob::create_4d( context_, 1, 1, output_height, output_width, inputs_[0]->get(nd)->device_id() ))); } else { outputs_[0]->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); outputs_[1]->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); buffer_->get(nd)->reshape_4d(inputs_[0]->get(nd)->num(), 1, output_height, output_width); sum_multiplier_->get(nd)->reshape_4d(1, 1, output_height, output_width); } sum_multiplier_->get(nd)->fill_data(1.f); } if (inputs_.size() == 3) { for (int nd = 0; nd < n_devices; nd++) { CHECK_EQ(inputs_[2]->get(nd)->inner_count(), 4); } if (init_output) { outputs_[2]->add_like(context_, *inputs_[2]); } else { for (int nd = 0; nd < n_devices; nd++) { if (inputs_[2]->get(nd)->reshaped()) { outputs_[2]->get(nd)->reshape_like(*inputs_[2]->get(nd)); } } } } } void resize_grid_layer::get_output_size(int device_index, int &output_width, int &output_height) { device_blob &input = *inputs_[0]->get(device_index); device_blob &input_scale = *inputs_[1]->get(device_index); int width = input.width(), height = input.height(); double ratio = (double)width / height; int max_height = (int)ceil(sqrt(output_max_scale_ * output_max_scale_ / ratio)); int max_width = (int)ceil(max_height * ratio); output_width = output_min_length_; output_height = output_min_length_; const data_type *learnt_scales = input_scale.cpu_data(); int num = input.num(); for (int i = 0; i < num; i++) { output_width = std::max(output_width, (int)ceil((double)width / learnt_scales[i])); output_height = std::max(output_height, (int)ceil((double)height / learnt_scales[i])); } output_width = std::min(output_width, max_width); output_height = std::min(output_height, max_height); } __global__ static void fwd_kernel(const int count, const int input_height, const int input_width, const int output_height, const int output_width, const data_type *params, data_type *output_x, data_type *output_y) { CUDA_KERNEL_LOOP(index, count) { int w = index % output_width; int h = index / output_width % output_height; int n = index / output_width / output_height; data_type scale = params[n]; data_type mid_input_w = (data_type)(input_width - 1) / 2, mid_input_h = (data_type)(input_height - 1) / 2; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; output_x[index] = (w - mid_output_w) * scale + mid_input_w; output_y[index] = (h - mid_output_h) * scale + mid_input_h; } } void resize_grid_layer::on_forward(int device_index) { int count = outputs_[0]->get(device_index)->count(); int num = outputs_[0]->get(device_index)->num(); int input_width = inputs_[0]->get(device_index)->width(); int input_height = inputs_[0]->get(device_index)->height(); int output_width = outputs_[0]->get(device_index)->width(); int output_height = outputs_[0]->get(device_index)->height(); KERNEL_CALL(fwd_kernel, count)( count, input_height, input_width, output_height, output_width, inputs_[1]->get(device_index)->gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data(), // grid x outputs_[1]->get(device_index)->mutable_gpu_data() // grid y ); if (inputs_.size() == 3) { data_type *trans_boxes = outputs_[2]->get(device_index)->mutable_cpu_data(); const data_type *boxes = inputs_[2]->get(device_index)->cpu_data(); const data_type *scales = inputs_[1]->get(device_index)->cpu_data(); data_type mid_input_w = (data_type)(input_width - 1) / 2, mid_input_h = (data_type)(input_height - 1) / 2; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; for (int i = 0; i < num; i++) { data_type *cur_trans_box = trans_boxes + 4 * i; const data_type *cur_box = boxes + 4 * i; data_type scale = scales[i]; cur_trans_box[0] = cur_box[0] / scale + mid_output_w - mid_input_w / scale; // left cur_trans_box[1] = cur_box[1] / scale + mid_output_h - mid_input_h / scale; // top cur_trans_box[2] = cur_box[2] / scale + mid_output_w - mid_input_w / scale; // right cur_trans_box[3] = cur_box[3] / scale + mid_output_h - mid_input_h / scale; // bottom for (int j = 0; j < 4; j++) { cur_trans_box[j] = cur_trans_box[j] * output_box_scale_ + output_box_start_; } } } } __global__ static void bwd_kernel(const int count, const int input_height, const int input_width, const int output_height, const int output_width, const data_type *output_x_diff, const data_type *output_y_diff, data_type *params_diff) { CUDA_KERNEL_LOOP(index, count) { int w = index % output_width; int h = index / output_width % output_height; data_type mid_output_w = (data_type)(output_width - 1) / 2, mid_output_h = (data_type)(output_height - 1) / 2; params_diff[index] = output_x_diff[index] * (w - mid_output_w) + output_y_diff[index] * (h - mid_output_h); } } void resize_grid_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { // we only need to bp inputs_[1] // fill 0 to inputs_[0] to meet the architecture requirement if (should_bp(bp_acts, 0) && get_beta(clear_acts_diff, 0) == 0) { inputs_[0]->get(device_index)->fill_diff(0.f); } if (should_bp(bp_acts, 1)) { data_type beta = get_beta(clear_acts_diff, 1); int count = outputs_[0]->get(device_index)->count(); KERNEL_CALL(bwd_kernel, count)( count, inputs_[0]->get(device_index)->height(), inputs_[0]->get(device_index)->width(), outputs_[0]->get(device_index)->height(), outputs_[0]->get(device_index)->width(), outputs_[0]->get(device_index)->gpu_diff(), outputs_[1]->get(device_index)->gpu_diff(), buffer_->get(device_index)->mutable_gpu_data() ); cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id()); cublas.gemv( CblasNoTrans, buffer_->get(device_index)->num(), // channels == 1 buffer_->get(device_index)->width() * buffer_->get(device_index)->height(), (data_type)1.f, buffer_->get(device_index)->gpu_data(), sum_multiplier_->get(device_index)->gpu_data(), beta, inputs_[1]->get(device_index)->mutable_gpu_diff() ); } } }
2edd933c407da259def3ec68ce9520ce93a37323.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/leaky_relu_op.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> __global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha; } } template <typename T> __global__ void LeakyReluGradientKernel( const int N, const T alpha, const T* Y, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha; } } } // namespace template <> bool LeakyReluOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); CAFFE_ENFORCE_GT(X.size(), 0); auto* Y = Output(0); Y->ResizeLike(X); hipLaunchKernelGGL(( LeakyReluKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), alpha_, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool LeakyReluGradientOp<float, CUDAContext>::RunOnDevice() { const auto& Y = Input(0); const auto& dY = Input(1); auto* dX = Output(0); dX->ResizeLike(Y); CAFFE_ENFORCE_EQ(Y.size(), dY.size()); hipLaunchKernelGGL(( LeakyReluGradientKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), Y.size(), alpha_, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LeakyRelu, LeakyReluOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LeakyReluGradient, LeakyReluGradientOp<float, CUDAContext>); } // namespace caffe2
2edd933c407da259def3ec68ce9520ce93a37323.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/leaky_relu_op.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> __global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha; } } template <typename T> __global__ void LeakyReluGradientKernel( const int N, const T alpha, const T* Y, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha; } } } // namespace template <> bool LeakyReluOp<float, CUDAContext>::RunOnDevice() { const auto& X = Input(0); CAFFE_ENFORCE_GT(X.size(), 0); auto* Y = Output(0); Y->ResizeLike(X); LeakyReluKernel<<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), alpha_, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool LeakyReluGradientOp<float, CUDAContext>::RunOnDevice() { const auto& Y = Input(0); const auto& dY = Input(1); auto* dX = Output(0); dX->ResizeLike(Y); CAFFE_ENFORCE_EQ(Y.size(), dY.size()); LeakyReluGradientKernel<<< CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( Y.size(), alpha_, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(LeakyRelu, LeakyReluOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LeakyReluGradient, LeakyReluGradientOp<float, CUDAContext>); } // namespace caffe2
941716216dfdcda3080bc1dfbf349c86482f33ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2019-2021 ETH Zurich, Automatic Control Lab, * Michel Schubiger, Goran Banjac. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_csr.h" #include "cuda_configure.h" #include "cuda_handler.h" #include "cuda_lin_alg.h" /* --> cuda_vec_gather */ #include "cuda_malloc.h" #include "helper_cuda.h" /* --> checkCudaErrors */ #include "csr_type.h" #include "glob_opts.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> extern CUDA_Handle_t *CUDA_handle; /* This function is implemented in cuda_lin_alg.cu */ extern void scatter(OSQPFloat *out, const OSQPFloat *in, const OSQPInt *ind, OSQPInt n); /******************************************************************************* * GPU Kernels * *******************************************************************************/ /* * Expand an upper triangular matrix given in COO format to a symmetric * matrix. Each entry is duplicated with its column- and row index switched. * In the case of a diagonal element we set the indices to a value that is * larger than n to easily remove it later. This is done to keep the memory * patern one to one (MAP operation). * * Additionally, it adds additional n diagonal elements to have a full * diagonal. * * The output arrays row_ind_out and col_ind_out have to be of size 2*nnz+n. */ __global__ void fill_full_matrix_kernel(OSQPInt* row_ind_out, OSQPInt* col_ind_out, OSQPInt* nnz_on_diag, OSQPInt* has_non_zero_diag_element, const OSQPInt* __restrict__ row_ind_in, const OSQPInt* __restrict__ col_ind_in, OSQPInt nnz, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt i = idx; i < nnz; i += grid_size) { OSQPInt row = row_ind_in[i]; OSQPInt column = col_ind_in[i]; row_ind_out[i] = row; col_ind_out[i] = column; if (row == column) { has_non_zero_diag_element[row] = 1; row_ind_out[i + nnz] = column + n; /* dummy value for sorting and removal later on */ col_ind_out[i + nnz] = row + n; atomicAdd(nnz_on_diag, 1); } else { row_ind_out[i + nnz] = column; col_ind_out[i + nnz] = row; } } } /** * Insert elements at structural zeros on the diagonal of the sparse matrix * specified by row and column index (COO format). To keep a one-to-one memory * patern we add n new elements to the matrix. In case where there already is a * diagonal element we add a dummy entry. The dummy entries will be removed later. */ __global__ void add_diagonal_kernel(OSQPInt* row_ind, OSQPInt* col_ind, const OSQPInt* has_non_zero_diag_element, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt row = idx; row < n; row += grid_size) { if (has_non_zero_diag_element[row] == 0) { row_ind[row] = row; col_ind[row] = row; } else { row_ind[row] = row + n; /* dummy value, for easy removal after sorting */ col_ind[row] = row + n; } } } /* * Permutation in: (size n, range 2*nnz+n): * * Gathers from the following array to create the full matrix : * * |P_lower->val|P_lower->val|zeros(n)| * * * Permutation out: (size n, range new_range) * * Gathers from the following array to create the full matrix : * * |P_lower->val|zeros(1)| * * | x[i] mod new_range if x[i] < 2 * new_range * x[i] -> | new_range if x[i] >= 2 * new_range * */ __global__ void reduce_permutation_kernel(OSQPInt* permutation, OSQPInt new_range, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt i = idx; i < n; i += grid_size) { if (permutation[i] < 2 * new_range) { permutation[i] = permutation[i] % new_range; } else { permutation[i] = new_range; /* gets the 0 element at nnz+1 of the value array */ } } } __global__ void get_diagonal_indices_kernel(OSQPInt* row_ind, OSQPInt* col_ind, OSQPInt nnz, OSQPInt* diag_index) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for (OSQPInt index = idx; index < nnz; index += grid_size) { OSQPInt row = row_ind[index]; OSQPInt column = col_ind[index]; if (row == column) { diag_index[row] = index; } } } __global__ void predicate_generator_kernel(const OSQPInt* row_ind, const OSQPInt* row_predicate, OSQPInt* predicate, OSQPInt nnz) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_stride = gridDim.x * blockDim.x; for(OSQPInt i = idx; i < nnz; i += grid_stride) { OSQPInt row = row_ind[i]; predicate[i] = row_predicate[row]; } } template<typename T> __global__ void compact(const T* data_in, T* data_out, OSQPInt* predicate, OSQPInt* scatter_addres, OSQPInt n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < n) { if(predicate[idx]) { int write_ind = scatter_addres[idx] - 1; data_out[write_ind] = data_in[idx]; } } } __global__ void compact_rows(const OSQPInt* row_ind, OSQPInt* data_out, OSQPInt* new_row_number, OSQPInt* predicate, OSQPInt* scatter_addres, OSQPInt n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < n) { if(predicate[idx]) { OSQPInt write_ind = scatter_addres[idx] - 1; OSQPInt row = row_ind[idx]; data_out[write_ind] = new_row_number[row]-1; } } } __global__ void vector_init_abs_kernel(const OSQPInt* a, OSQPInt* b, OSQPInt n) { OSQPInt i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { b[i] = abs(a[i]); } } __global__ void csr_eq_kernel(const OSQPInt* A_row_ptr, const OSQPInt* A_col_ind, const OSQPFloat* A_val, const OSQPInt* B_row_ptr, const OSQPInt* B_col_ind, const OSQPFloat* B_val, OSQPInt m, OSQPFloat tol, OSQPInt* res) { OSQPInt i = 0; OSQPInt j = 0; OSQPFloat diff = 0.0; *res = 1; for (j = 0; j < m; j++) { // Cycle over rows j // if row pointer of next row does not coincide, they are not equal // NB: first row always has A->p[0] = B->p[0] = 0 by construction. if (A_row_ptr[j+1] != B_row_ptr[j+1]) { *res = 0; return; } for (i = A_row_ptr[j]; i < A_row_ptr[j + 1]; i++) { // Cycle columns i in row j if (A_col_ind[i] != B_col_ind[i]) { // Different column indices *res = 0; return; } #ifdef OSQP_USE_FLOAT diff = fabsf(A_val[i] - B_val[i]); #else diff = fabs(A_val[i] - B_val[i]); #endif if (diff > tol) { // The actual matrix values are different *res = 0; return; } } } } /******************************************************************************* * Private Functions * *******************************************************************************/ static void init_SpMV_interface(csr *M) { OSQPFloat* d_x; OSQPFloat* d_y; hipsparseDnVecDescr_t vecx, vecy; OSQPFloat alpha = 1.0; OSQPInt m = M->m; OSQPInt n = M->n; /* Only create the matrix if it has non-zero dimensions. * Some versions of CUDA don't allow creating matrices with rows/columns of * size 0 and assert instead. So we don't create the matrix object, and instead * will never perform any operations on it. */ if ((m > 0) && (n > 0)) { /* Wrap raw data into cuSPARSE API matrix */ checkCudaErrors(hipsparseCreateCsr( &M->SpMatDescr, m, n, M->nnz, (void*)M->row_ptr, (void*)M->col_ind, (void*)M->val, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, CUDA_FLOAT)); if (!M->SpMatBufferSize) { cuda_malloc((void **) &d_x, n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_y, m * sizeof(OSQPFloat)); cuda_vec_create(&vecx, d_x, n); cuda_vec_create(&vecy, d_y, m); /* Allocate workspace for hipsparseSpMV */ checkCudaErrors(hipsparseSpMV_bufferSize( CUDA_handle->cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &alpha, M->SpMatDescr, vecx, &alpha, vecy, CUDA_FLOAT, CUSPARSE_SPMV_ALG_DEFAULT, &M->SpMatBufferSize)); if (M->SpMatBufferSize) cuda_malloc((void **) &M->SpMatBuffer, M->SpMatBufferSize); cuda_vec_destroy(vecx); cuda_vec_destroy(vecy); cuda_free((void **) &d_x); cuda_free((void **) &d_y); } } } /* * Creates a CSR matrix with the specified dimension (m,n,nnz). * * If specified, it allocates proper amount of device memory * allocate_on_device = 1: device memory for CSR * allocate_on_device = 2: device memory for CSR (+ col_ind) */ csr* csr_alloc(OSQPInt m, OSQPInt n, OSQPInt nnz, OSQPInt allocate_on_device) { csr* dev_mat = (csr*) c_calloc(1, sizeof(csr)); if (!dev_mat) return NULL; dev_mat->m = m; dev_mat->n = n; dev_mat->nnz = nnz; if (allocate_on_device > 0) { cuda_calloc((void **) &dev_mat->val, (dev_mat->nnz + 1) * sizeof(OSQPFloat)); cuda_malloc((void **) &dev_mat->row_ptr, (dev_mat->m + 1) * sizeof(OSQPInt)); cuda_malloc((void **) &dev_mat->col_ind, dev_mat->nnz * sizeof(OSQPInt)); if (allocate_on_device > 1) cuda_malloc((void **) &dev_mat->row_ind, dev_mat->nnz * sizeof(OSQPInt)); } dev_mat->SpMatBufferSize = 0; dev_mat->SpMatBuffer = NULL; return dev_mat; } csr* csr_init(OSQPInt m, OSQPInt n, const OSQPInt* h_row_ptr, const OSQPInt* h_col_ind, const OSQPFloat* h_val) { csr* dev_mat = csr_alloc(m, n, h_row_ptr[m], 1); if (!dev_mat) return NULL; if (m == 0) return dev_mat; /* copy_matrix_to_device */ checkCudaErrors(hipMemcpy(dev_mat->row_ptr, h_row_ptr, (dev_mat->m + 1) * sizeof(OSQPInt), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_mat->col_ind, h_col_ind, dev_mat->nnz * sizeof(OSQPInt), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_mat->val, h_val, dev_mat->nnz * sizeof(OSQPFloat), hipMemcpyHostToDevice)); return dev_mat; } /* * Compress row indices from the COO format to the row pointer * of the CSR format. */ void compress_row_ind(csr* mat) { cuda_free((void** ) &mat->row_ptr); cuda_malloc((void** ) &mat->row_ptr, (mat->m + 1) * sizeof(OSQPFloat)); checkCudaErrors(hipsparseXcoo2csr(CUDA_handle->cusparseHandle, mat->row_ind, mat->nnz, mat->m, mat->row_ptr, HIPSPARSE_INDEX_BASE_ZERO)); } void csr_expand_row_ind(csr* mat) { if (!mat->row_ind) { cuda_malloc((void** ) &mat->row_ind, mat->nnz * sizeof(OSQPFloat)); checkCudaErrors(hipsparseXcsr2coo(CUDA_handle->cusparseHandle, mat->row_ptr, mat->nnz, mat->m, mat->row_ind, HIPSPARSE_INDEX_BASE_ZERO)); } } /* * Sorts matrix in COO format by row. It returns a permutation * vector that describes reordering of the elements. */ OSQPInt* coo_sort(csr* A) { OSQPInt* A_to_At_permutation; char* pBuffer; size_t pBufferSizeInBytes; cuda_malloc((void **) &A_to_At_permutation, A->nnz * sizeof(OSQPInt)); checkCudaErrors(hipsparseCreateIdentityPermutation(CUDA_handle->cusparseHandle, A->nnz, A_to_At_permutation)); checkCudaErrors(hipsparseXcoosort_bufferSizeExt(CUDA_handle->cusparseHandle, A->m, A->n, A->nnz, A->row_ind, A->col_ind, &pBufferSizeInBytes)); cuda_malloc((void **) &pBuffer, pBufferSizeInBytes * sizeof(char)); checkCudaErrors(hipsparseXcoosortByRow(CUDA_handle->cusparseHandle, A->m, A->n, A->nnz, A->row_ind, A->col_ind, A_to_At_permutation, pBuffer)); cuda_free((void **) &pBuffer); return A_to_At_permutation; } /* * Compute transpose of a matrix in COO format. */ void coo_tranpose(csr* A) { OSQPInt m = A->m; A->m = A->n; A->n = m; OSQPInt *row_ind = A->row_ind; A->row_ind = A->col_ind; A->col_ind = row_ind; } /* * values[i] = values[permutation[i]] for i in [0,n-1] */ void permute_vector(OSQPFloat* values, const OSQPInt* permutation, OSQPInt n) { OSQPFloat* permuted_values; cuda_malloc((void **) &permuted_values, n * sizeof(OSQPFloat)); cuda_vec_gather(n, values, permuted_values, permutation); checkCudaErrors(hipMemcpy(values, permuted_values, n * sizeof(OSQPFloat), hipMemcpyDeviceToDevice)); cuda_free((void **) &permuted_values); } /* * Copy the values and pointers form target to the source matrix. * The device memory of source has to be freed first to avoid a * memory leak in case it holds allocated memory. * * The MatrixDescription has to be destroyed first since it is a * pointer hidded by a typedef. * * The pointers of source matrix are set to NULL to avoid * accidental freeing of the associated memory blocks. */ void copy_csr(csr* target, csr* source) { target->m = source->m; target->n = source->n; target->nnz = source->nnz; cuda_free((void **) &target->val); cuda_free((void **) &target->row_ind); cuda_free((void **) &target->row_ptr); cuda_free((void **) &target->col_ind); target->val = source->val; target->row_ind = source->row_ind; target->row_ptr = source->row_ptr; target->col_ind = source->col_ind; source->val = NULL; source->row_ind = NULL; source->row_ptr = NULL; source->col_ind = NULL; } void csr_triu_to_full(csr* P_triu, OSQPInt** P_triu_to_full_permutation, OSQPInt** P_diag_indices) { OSQPInt number_of_blocks; OSQPInt* has_non_zero_diag_element; OSQPInt* d_nnz_diag; OSQPInt h_nnz_diag, Full_nnz, nnz_triu, n, nnz_max_Full; OSQPInt offset; nnz_triu = P_triu->nnz; n = P_triu->n; nnz_max_Full = 2*nnz_triu + n; csr* Full_P = csr_alloc(n, n, nnz_max_Full, 2); cuda_calloc((void **) &has_non_zero_diag_element, n * sizeof(OSQPInt)); cuda_calloc((void **) &d_nnz_diag, sizeof(OSQPInt)); csr_expand_row_ind(P_triu); number_of_blocks = (nnz_triu / THREADS_PER_BLOCK) + 1; hipLaunchKernelGGL(( fill_full_matrix_kernel), dim3(number_of_blocks), dim3(THREADS_PER_BLOCK), 0, 0, Full_P->row_ind, Full_P->col_ind, d_nnz_diag, has_non_zero_diag_element, P_triu->row_ind, P_triu->col_ind, nnz_triu, n); offset = 2 * nnz_triu; number_of_blocks = (n / THREADS_PER_BLOCK) + 1; hipLaunchKernelGGL(( add_diagonal_kernel), dim3(number_of_blocks), dim3(THREADS_PER_BLOCK), 0, 0, Full_P->row_ind + offset, Full_P->col_ind + offset, has_non_zero_diag_element, n); /* The Full matrix now is of size (2n)x(2n) * [P 0] * [0 D] * where P is the desired full matrix and D is * a diagonal that contains dummy values */ checkCudaErrors(hipMemcpy(&h_nnz_diag, d_nnz_diag, sizeof(OSQPInt), hipMemcpyDeviceToHost)); Full_nnz = (2 * (nnz_triu - h_nnz_diag)) + n; OSQPInt* d_P = coo_sort(Full_P); number_of_blocks = (nnz_triu / THREADS_PER_BLOCK) + 1; hipLaunchKernelGGL(( reduce_permutation_kernel), dim3(number_of_blocks),dim3(THREADS_PER_BLOCK), 0, 0, d_P, nnz_triu, Full_nnz); /* permute vector */ cuda_vec_gather(Full_nnz, P_triu->val, Full_P->val, d_P); cuda_malloc((void **) P_triu_to_full_permutation, Full_nnz * sizeof(OSQPInt)); checkCudaErrors(hipMemcpy(*P_triu_to_full_permutation, d_P, Full_nnz * sizeof(OSQPInt), hipMemcpyDeviceToDevice)); cuda_malloc((void **) P_diag_indices, n * sizeof(OSQPInt)); number_of_blocks = (Full_nnz / THREADS_PER_BLOCK) + 1; hipLaunchKernelGGL(( get_diagonal_indices_kernel), dim3(number_of_blocks), dim3(THREADS_PER_BLOCK), 0, 0, Full_P->row_ind, Full_P->col_ind, Full_nnz, *P_diag_indices); Full_P->nnz = Full_nnz; compress_row_ind(Full_P); copy_csr(P_triu, Full_P); cuda_mat_free(Full_P); cuda_free((void **) &d_P); cuda_free((void **) &d_nnz_diag); cuda_free((void **) &has_non_zero_diag_element); } /** * Matrix A is converted from CSC to CSR. The data in A is interpreted as * being in CSC format, even if it is in CSR. * This operation is equivalent to a transpose. We temporarily allocate space * for the new matrix since this operation cannot be done inplace. * Additionally, a gather indices vector is generated to perform the conversion * from A to A' faster during a matrix update. */ void csr_transpose(csr* A, OSQPInt** A_to_At_permutation) { (*A_to_At_permutation) = NULL; if (A->nnz == 0) { OSQPInt tmp = A->n; A->n = A->m; A->m = tmp; return; } csr_expand_row_ind(A); coo_tranpose(A); (*A_to_At_permutation) = coo_sort(A); compress_row_ind(A); permute_vector(A->val, *A_to_At_permutation, A->nnz); } /******************************************************************************* * API Functions * *******************************************************************************/ void cuda_mat_init_P(const OSQPCscMatrix* mat, csr** P, OSQPFloat** d_P_triu_val, OSQPInt** d_P_triu_to_full_ind, OSQPInt** d_P_diag_ind) { OSQPInt n = mat->n; OSQPInt nnz = mat->p[n]; /* Initialize upper triangular part of P */ *P = csr_init(n, n, mat->p, mat->i, mat->x); /* Convert P to a full matrix. Store indices of diagonal and triu elements. */ csr_triu_to_full(*P, d_P_triu_to_full_ind, d_P_diag_ind); csr_expand_row_ind(*P); /* We need 0.0 at val[nzz] -> nnz+1 elements */ cuda_calloc((void **) d_P_triu_val, (nnz+1) * sizeof(OSQPFloat)); /* Store triu elements */ checkCudaErrors(hipMemcpy(*d_P_triu_val, mat->x, nnz * sizeof(OSQPFloat), hipMemcpyHostToDevice)); init_SpMV_interface(*P); } void cuda_mat_init_A(const OSQPCscMatrix* mat, csr** A, csr** At, OSQPInt** d_A_to_At_ind) { OSQPInt m = mat->m; OSQPInt n = mat->n; /* Initializing At is easy since it is equal to A in CSC */ *At = csr_init(n, m, mat->p, mat->i, mat->x); csr_expand_row_ind(*At); /* We need to take transpose of At to get A */ *A = csr_init(n, m, mat->p, mat->i, mat->x); csr_transpose(*A, d_A_to_At_ind); csr_expand_row_ind(*A); init_SpMV_interface(*A); init_SpMV_interface(*At); } void cuda_mat_update_P(const OSQPFloat* Px, const OSQPInt* Px_idx, OSQPInt Px_n, csr** P, OSQPFloat* d_P_triu_val, OSQPInt* d_P_triu_to_full_ind, OSQPInt* d_P_diag_ind, OSQPInt P_triu_nnz) { if (!Px_idx) { /* Update whole P */ OSQPFloat* d_P_val_new; /* Allocate memory */ cuda_malloc((void **) &d_P_val_new, (P_triu_nnz + 1) * sizeof(OSQPFloat)); /* Copy new values from host to device */ checkCudaErrors(hipMemcpy(d_P_val_new, Px, P_triu_nnz * sizeof(OSQPFloat), hipMemcpyHostToDevice)); cuda_vec_gather((*P)->nnz, d_P_val_new, (*P)->val, d_P_triu_to_full_ind); cuda_free((void **) &d_P_val_new); } else { /* Update P partially */ OSQPFloat* d_P_val_new; OSQPInt* d_P_ind_new; /* Allocate memory */ cuda_malloc((void **) &d_P_val_new, Px_n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_P_ind_new, Px_n * sizeof(OSQPInt)); /* Copy new values and indices from host to device */ checkCudaErrors(hipMemcpy(d_P_val_new, Px, Px_n * sizeof(OSQPFloat), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_P_ind_new, Px_idx, Px_n * sizeof(OSQPInt), hipMemcpyHostToDevice)); /* Update d_P_triu_val */ scatter(d_P_triu_val, d_P_val_new, d_P_ind_new, Px_n); /* Gather from d_P_triu_val to update full P */ cuda_vec_gather((*P)->nnz, d_P_triu_val, (*P)->val, d_P_triu_to_full_ind); cuda_free((void **) &d_P_val_new); cuda_free((void **) &d_P_ind_new); } } void cuda_mat_update_A(const OSQPFloat* Ax, const OSQPInt* Ax_idx, OSQPInt Ax_n, csr** A, csr** At, OSQPInt* d_A_to_At_ind) { OSQPInt Annz = (*A)->nnz; OSQPFloat* Aval = (*A)->val; OSQPFloat* Atval = (*At)->val; if (!Ax_idx) { /* Update whole A */ /* Updating At is easy since it is equal to A in CSC */ checkCudaErrors(hipMemcpy(Atval, Ax, Annz * sizeof(OSQPFloat), hipMemcpyHostToDevice)); /* Updating A requires transpose of A_new */ cuda_vec_gather(Annz, Atval, Aval, d_A_to_At_ind); } else { /* Update A partially */ OSQPFloat* d_At_val_new; OSQPInt* d_At_ind_new; /* Allocate memory */ cuda_malloc((void **) &d_At_val_new, Ax_n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_At_ind_new, Ax_n * sizeof(OSQPInt)); /* Copy new values and indices from host to device */ checkCudaErrors(hipMemcpy(d_At_val_new, Ax, Ax_n * sizeof(OSQPFloat), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_At_ind_new, Ax_idx, Ax_n * sizeof(OSQPInt), hipMemcpyHostToDevice)); /* Update At first since it is equal to A in CSC */ scatter(Atval, d_At_val_new, d_At_ind_new, Ax_n); cuda_free((void **) &d_At_val_new); cuda_free((void **) &d_At_ind_new); /* Gather from Atval to construct Aval */ cuda_vec_gather(Annz, Atval, Aval, d_A_to_At_ind); } } void cuda_mat_free(csr* mat) { if (mat) { cuda_free((void **) &mat->val); cuda_free((void **) &mat->row_ptr); cuda_free((void **) &mat->col_ind); cuda_free((void **) &mat->row_ind); cuda_free((void **) &mat->SpMatBuffer); checkCudaErrors(hipsparseDestroySpMat(mat->SpMatDescr)); c_free(mat); } } OSQPInt cuda_csr_is_eq(const csr* A, const csr* B, OSQPFloat tol) { OSQPInt h_res = 0; OSQPInt *d_res; // If number of columns, rows and non-zeros are not the same, they are not equal. if ((A->n != B->n) || (A->m != B->m) || (A->nnz != B->nnz)) { return 0; } OSQPInt nnz = A->nnz; OSQPInt number_of_blocks = (nnz / THREADS_PER_BLOCK) / ELEMENTS_PER_THREAD + 1; cuda_malloc((void **) &d_res, sizeof(OSQPInt)); /* Initialize d_res to 1 */ h_res = 1; checkCudaErrors(hipMemcpy(d_res, &h_res, sizeof(OSQPInt), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( csr_eq_kernel), dim3(number_of_blocks), dim3(THREADS_PER_BLOCK), 0, 0, A->row_ptr, A->col_ind, A->val, B->row_ptr, B->col_ind, B->val, A->m, tol, d_res); checkCudaErrors(hipMemcpy(&h_res, d_res, sizeof(OSQPInt), hipMemcpyDeviceToHost)); cuda_free((void **) &d_res); return h_res; } void cuda_submat_byrows(const csr* A, const OSQPInt* d_rows, csr** Ared, csr** Aredt) { OSQPInt new_m = 0; OSQPInt n = A->n; OSQPInt m = A->m; OSQPInt nnz = A->nnz; OSQPInt* d_predicate; OSQPInt* d_compact_address; OSQPInt* d_row_predicate; OSQPInt* d_new_row_number; cuda_malloc((void **) &d_row_predicate, m * sizeof(OSQPInt)); cuda_malloc((void **) &d_new_row_number, m * sizeof(OSQPInt)); cuda_malloc((void **) &d_predicate, nnz * sizeof(OSQPInt)); cuda_malloc((void **) &d_compact_address, nnz * sizeof(OSQPInt)); // Copy rows array to device and set -1s to ones checkCudaErrors(hipMemcpy(d_row_predicate, d_rows, m * sizeof(OSQPInt), hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( vector_init_abs_kernel), dim3((m/THREADS_PER_BLOCK) + 1),dim3(THREADS_PER_BLOCK), 0, 0, d_row_predicate, d_row_predicate, m); // Calculate new row numbering and get new number of rows thrust::inclusive_scan(thrust::device, d_row_predicate, d_row_predicate + m, d_new_row_number); if (m) { checkCudaErrors(hipMemcpy(&new_m, &d_new_row_number[m-1], sizeof(OSQPInt), hipMemcpyDeviceToHost)); } else { (*Ared) = (csr *) c_calloc(1, sizeof(csr)); (*Ared)->n = n; (*Aredt) = (csr *) c_calloc(1, sizeof(csr)); (*Aredt)->m = n; return; } // Generate predicates per element from per row predicate hipLaunchKernelGGL(( predicate_generator_kernel), dim3((nnz/THREADS_PER_BLOCK) + 1), dim3(THREADS_PER_BLOCK), 0, 0, A->row_ind, d_row_predicate, d_predicate, nnz); // Get array offset for compacting and new nnz thrust::inclusive_scan(thrust::device, d_predicate, d_predicate + nnz, d_compact_address); OSQPInt nnz_new; if (nnz) checkCudaErrors(hipMemcpy(&nnz_new, &d_compact_address[nnz-1], sizeof(OSQPInt), hipMemcpyDeviceToHost)); // allocate new matrix (2 -> allocate row indices as well) (*Ared) = csr_alloc(new_m, n, nnz_new, 2); // Compact arrays according to given predicates, special care has to be taken for the rows hipLaunchKernelGGL(( compact_rows), dim3((nnz/THREADS_PER_BLOCK) + 1), dim3(THREADS_PER_BLOCK), 0, 0, A->row_ind, (*Ared)->row_ind, d_new_row_number, d_predicate, d_compact_address, nnz); hipLaunchKernelGGL(( compact), dim3((nnz/THREADS_PER_BLOCK) + 1), dim3(THREADS_PER_BLOCK), 0, 0, A->col_ind, (*Ared)->col_ind, d_predicate, d_compact_address, nnz); hipLaunchKernelGGL(( compact), dim3((nnz/THREADS_PER_BLOCK) + 1), dim3(THREADS_PER_BLOCK), 0, 0, A->val, (*Ared)->val, d_predicate, d_compact_address, nnz); // Generate row pointer compress_row_ind(*Ared); // We first make a copy of Ared *Aredt = csr_alloc(new_m, n, nnz_new, 1); checkCudaErrors(hipMemcpy((*Aredt)->val, (*Ared)->val, nnz_new * sizeof(OSQPFloat), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy((*Aredt)->row_ptr, (*Ared)->row_ptr, (new_m+1) * sizeof(OSQPInt), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy((*Aredt)->col_ind, (*Ared)->col_ind, nnz_new * sizeof(OSQPInt), hipMemcpyDeviceToDevice)); OSQPInt* d_A_to_At_ind; csr_transpose(*Aredt, &d_A_to_At_ind); csr_expand_row_ind(*Ared); csr_expand_row_ind(*Aredt); init_SpMV_interface(*Ared); init_SpMV_interface(*Aredt); cuda_free((void**)&d_A_to_At_ind); cuda_free((void**)&d_predicate); cuda_free((void**)&d_compact_address); cuda_free((void**)&d_row_predicate); cuda_free((void**)&d_new_row_number); }
941716216dfdcda3080bc1dfbf349c86482f33ae.cu
/** * Copyright (c) 2019-2021 ETH Zurich, Automatic Control Lab, * Michel Schubiger, Goran Banjac. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_csr.h" #include "cuda_configure.h" #include "cuda_handler.h" #include "cuda_lin_alg.h" /* --> cuda_vec_gather */ #include "cuda_malloc.h" #include "helper_cuda.h" /* --> checkCudaErrors */ #include "csr_type.h" #include "glob_opts.h" #include <thrust/scan.h> #include <thrust/execution_policy.h> extern CUDA_Handle_t *CUDA_handle; /* This function is implemented in cuda_lin_alg.cu */ extern void scatter(OSQPFloat *out, const OSQPFloat *in, const OSQPInt *ind, OSQPInt n); /******************************************************************************* * GPU Kernels * *******************************************************************************/ /* * Expand an upper triangular matrix given in COO format to a symmetric * matrix. Each entry is duplicated with its column- and row index switched. * In the case of a diagonal element we set the indices to a value that is * larger than n to easily remove it later. This is done to keep the memory * patern one to one (MAP operation). * * Additionally, it adds additional n diagonal elements to have a full * diagonal. * * The output arrays row_ind_out and col_ind_out have to be of size 2*nnz+n. */ __global__ void fill_full_matrix_kernel(OSQPInt* row_ind_out, OSQPInt* col_ind_out, OSQPInt* nnz_on_diag, OSQPInt* has_non_zero_diag_element, const OSQPInt* __restrict__ row_ind_in, const OSQPInt* __restrict__ col_ind_in, OSQPInt nnz, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt i = idx; i < nnz; i += grid_size) { OSQPInt row = row_ind_in[i]; OSQPInt column = col_ind_in[i]; row_ind_out[i] = row; col_ind_out[i] = column; if (row == column) { has_non_zero_diag_element[row] = 1; row_ind_out[i + nnz] = column + n; /* dummy value for sorting and removal later on */ col_ind_out[i + nnz] = row + n; atomicAdd(nnz_on_diag, 1); } else { row_ind_out[i + nnz] = column; col_ind_out[i + nnz] = row; } } } /** * Insert elements at structural zeros on the diagonal of the sparse matrix * specified by row and column index (COO format). To keep a one-to-one memory * patern we add n new elements to the matrix. In case where there already is a * diagonal element we add a dummy entry. The dummy entries will be removed later. */ __global__ void add_diagonal_kernel(OSQPInt* row_ind, OSQPInt* col_ind, const OSQPInt* has_non_zero_diag_element, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt row = idx; row < n; row += grid_size) { if (has_non_zero_diag_element[row] == 0) { row_ind[row] = row; col_ind[row] = row; } else { row_ind[row] = row + n; /* dummy value, for easy removal after sorting */ col_ind[row] = row + n; } } } /* * Permutation in: (size n, range 2*nnz+n): * * Gathers from the following array to create the full matrix : * * |P_lower->val|P_lower->val|zeros(n)| * * * Permutation out: (size n, range new_range) * * Gathers from the following array to create the full matrix : * * |P_lower->val|zeros(1)| * * | x[i] mod new_range if x[i] < 2 * new_range * x[i] -> | new_range if x[i] >= 2 * new_range * */ __global__ void reduce_permutation_kernel(OSQPInt* permutation, OSQPInt new_range, OSQPInt n) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for(OSQPInt i = idx; i < n; i += grid_size) { if (permutation[i] < 2 * new_range) { permutation[i] = permutation[i] % new_range; } else { permutation[i] = new_range; /* gets the 0 element at nnz+1 of the value array */ } } } __global__ void get_diagonal_indices_kernel(OSQPInt* row_ind, OSQPInt* col_ind, OSQPInt nnz, OSQPInt* diag_index) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_size = blockDim.x * gridDim.x; for (OSQPInt index = idx; index < nnz; index += grid_size) { OSQPInt row = row_ind[index]; OSQPInt column = col_ind[index]; if (row == column) { diag_index[row] = index; } } } __global__ void predicate_generator_kernel(const OSQPInt* row_ind, const OSQPInt* row_predicate, OSQPInt* predicate, OSQPInt nnz) { OSQPInt idx = threadIdx.x + blockDim.x * blockIdx.x; OSQPInt grid_stride = gridDim.x * blockDim.x; for(OSQPInt i = idx; i < nnz; i += grid_stride) { OSQPInt row = row_ind[i]; predicate[i] = row_predicate[row]; } } template<typename T> __global__ void compact(const T* data_in, T* data_out, OSQPInt* predicate, OSQPInt* scatter_addres, OSQPInt n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < n) { if(predicate[idx]) { int write_ind = scatter_addres[idx] - 1; data_out[write_ind] = data_in[idx]; } } } __global__ void compact_rows(const OSQPInt* row_ind, OSQPInt* data_out, OSQPInt* new_row_number, OSQPInt* predicate, OSQPInt* scatter_addres, OSQPInt n) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < n) { if(predicate[idx]) { OSQPInt write_ind = scatter_addres[idx] - 1; OSQPInt row = row_ind[idx]; data_out[write_ind] = new_row_number[row]-1; } } } __global__ void vector_init_abs_kernel(const OSQPInt* a, OSQPInt* b, OSQPInt n) { OSQPInt i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { b[i] = abs(a[i]); } } __global__ void csr_eq_kernel(const OSQPInt* A_row_ptr, const OSQPInt* A_col_ind, const OSQPFloat* A_val, const OSQPInt* B_row_ptr, const OSQPInt* B_col_ind, const OSQPFloat* B_val, OSQPInt m, OSQPFloat tol, OSQPInt* res) { OSQPInt i = 0; OSQPInt j = 0; OSQPFloat diff = 0.0; *res = 1; for (j = 0; j < m; j++) { // Cycle over rows j // if row pointer of next row does not coincide, they are not equal // NB: first row always has A->p[0] = B->p[0] = 0 by construction. if (A_row_ptr[j+1] != B_row_ptr[j+1]) { *res = 0; return; } for (i = A_row_ptr[j]; i < A_row_ptr[j + 1]; i++) { // Cycle columns i in row j if (A_col_ind[i] != B_col_ind[i]) { // Different column indices *res = 0; return; } #ifdef OSQP_USE_FLOAT diff = fabsf(A_val[i] - B_val[i]); #else diff = fabs(A_val[i] - B_val[i]); #endif if (diff > tol) { // The actual matrix values are different *res = 0; return; } } } } /******************************************************************************* * Private Functions * *******************************************************************************/ static void init_SpMV_interface(csr *M) { OSQPFloat* d_x; OSQPFloat* d_y; cusparseDnVecDescr_t vecx, vecy; OSQPFloat alpha = 1.0; OSQPInt m = M->m; OSQPInt n = M->n; /* Only create the matrix if it has non-zero dimensions. * Some versions of CUDA don't allow creating matrices with rows/columns of * size 0 and assert instead. So we don't create the matrix object, and instead * will never perform any operations on it. */ if ((m > 0) && (n > 0)) { /* Wrap raw data into cuSPARSE API matrix */ checkCudaErrors(cusparseCreateCsr( &M->SpMatDescr, m, n, M->nnz, (void*)M->row_ptr, (void*)M->col_ind, (void*)M->val, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_FLOAT)); if (!M->SpMatBufferSize) { cuda_malloc((void **) &d_x, n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_y, m * sizeof(OSQPFloat)); cuda_vec_create(&vecx, d_x, n); cuda_vec_create(&vecy, d_y, m); /* Allocate workspace for cusparseSpMV */ checkCudaErrors(cusparseSpMV_bufferSize( CUDA_handle->cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, M->SpMatDescr, vecx, &alpha, vecy, CUDA_FLOAT, CUSPARSE_SPMV_ALG_DEFAULT, &M->SpMatBufferSize)); if (M->SpMatBufferSize) cuda_malloc((void **) &M->SpMatBuffer, M->SpMatBufferSize); cuda_vec_destroy(vecx); cuda_vec_destroy(vecy); cuda_free((void **) &d_x); cuda_free((void **) &d_y); } } } /* * Creates a CSR matrix with the specified dimension (m,n,nnz). * * If specified, it allocates proper amount of device memory * allocate_on_device = 1: device memory for CSR * allocate_on_device = 2: device memory for CSR (+ col_ind) */ csr* csr_alloc(OSQPInt m, OSQPInt n, OSQPInt nnz, OSQPInt allocate_on_device) { csr* dev_mat = (csr*) c_calloc(1, sizeof(csr)); if (!dev_mat) return NULL; dev_mat->m = m; dev_mat->n = n; dev_mat->nnz = nnz; if (allocate_on_device > 0) { cuda_calloc((void **) &dev_mat->val, (dev_mat->nnz + 1) * sizeof(OSQPFloat)); cuda_malloc((void **) &dev_mat->row_ptr, (dev_mat->m + 1) * sizeof(OSQPInt)); cuda_malloc((void **) &dev_mat->col_ind, dev_mat->nnz * sizeof(OSQPInt)); if (allocate_on_device > 1) cuda_malloc((void **) &dev_mat->row_ind, dev_mat->nnz * sizeof(OSQPInt)); } dev_mat->SpMatBufferSize = 0; dev_mat->SpMatBuffer = NULL; return dev_mat; } csr* csr_init(OSQPInt m, OSQPInt n, const OSQPInt* h_row_ptr, const OSQPInt* h_col_ind, const OSQPFloat* h_val) { csr* dev_mat = csr_alloc(m, n, h_row_ptr[m], 1); if (!dev_mat) return NULL; if (m == 0) return dev_mat; /* copy_matrix_to_device */ checkCudaErrors(cudaMemcpy(dev_mat->row_ptr, h_row_ptr, (dev_mat->m + 1) * sizeof(OSQPInt), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_mat->col_ind, h_col_ind, dev_mat->nnz * sizeof(OSQPInt), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_mat->val, h_val, dev_mat->nnz * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); return dev_mat; } /* * Compress row indices from the COO format to the row pointer * of the CSR format. */ void compress_row_ind(csr* mat) { cuda_free((void** ) &mat->row_ptr); cuda_malloc((void** ) &mat->row_ptr, (mat->m + 1) * sizeof(OSQPFloat)); checkCudaErrors(cusparseXcoo2csr(CUDA_handle->cusparseHandle, mat->row_ind, mat->nnz, mat->m, mat->row_ptr, CUSPARSE_INDEX_BASE_ZERO)); } void csr_expand_row_ind(csr* mat) { if (!mat->row_ind) { cuda_malloc((void** ) &mat->row_ind, mat->nnz * sizeof(OSQPFloat)); checkCudaErrors(cusparseXcsr2coo(CUDA_handle->cusparseHandle, mat->row_ptr, mat->nnz, mat->m, mat->row_ind, CUSPARSE_INDEX_BASE_ZERO)); } } /* * Sorts matrix in COO format by row. It returns a permutation * vector that describes reordering of the elements. */ OSQPInt* coo_sort(csr* A) { OSQPInt* A_to_At_permutation; char* pBuffer; size_t pBufferSizeInBytes; cuda_malloc((void **) &A_to_At_permutation, A->nnz * sizeof(OSQPInt)); checkCudaErrors(cusparseCreateIdentityPermutation(CUDA_handle->cusparseHandle, A->nnz, A_to_At_permutation)); checkCudaErrors(cusparseXcoosort_bufferSizeExt(CUDA_handle->cusparseHandle, A->m, A->n, A->nnz, A->row_ind, A->col_ind, &pBufferSizeInBytes)); cuda_malloc((void **) &pBuffer, pBufferSizeInBytes * sizeof(char)); checkCudaErrors(cusparseXcoosortByRow(CUDA_handle->cusparseHandle, A->m, A->n, A->nnz, A->row_ind, A->col_ind, A_to_At_permutation, pBuffer)); cuda_free((void **) &pBuffer); return A_to_At_permutation; } /* * Compute transpose of a matrix in COO format. */ void coo_tranpose(csr* A) { OSQPInt m = A->m; A->m = A->n; A->n = m; OSQPInt *row_ind = A->row_ind; A->row_ind = A->col_ind; A->col_ind = row_ind; } /* * values[i] = values[permutation[i]] for i in [0,n-1] */ void permute_vector(OSQPFloat* values, const OSQPInt* permutation, OSQPInt n) { OSQPFloat* permuted_values; cuda_malloc((void **) &permuted_values, n * sizeof(OSQPFloat)); cuda_vec_gather(n, values, permuted_values, permutation); checkCudaErrors(cudaMemcpy(values, permuted_values, n * sizeof(OSQPFloat), cudaMemcpyDeviceToDevice)); cuda_free((void **) &permuted_values); } /* * Copy the values and pointers form target to the source matrix. * The device memory of source has to be freed first to avoid a * memory leak in case it holds allocated memory. * * The MatrixDescription has to be destroyed first since it is a * pointer hidded by a typedef. * * The pointers of source matrix are set to NULL to avoid * accidental freeing of the associated memory blocks. */ void copy_csr(csr* target, csr* source) { target->m = source->m; target->n = source->n; target->nnz = source->nnz; cuda_free((void **) &target->val); cuda_free((void **) &target->row_ind); cuda_free((void **) &target->row_ptr); cuda_free((void **) &target->col_ind); target->val = source->val; target->row_ind = source->row_ind; target->row_ptr = source->row_ptr; target->col_ind = source->col_ind; source->val = NULL; source->row_ind = NULL; source->row_ptr = NULL; source->col_ind = NULL; } void csr_triu_to_full(csr* P_triu, OSQPInt** P_triu_to_full_permutation, OSQPInt** P_diag_indices) { OSQPInt number_of_blocks; OSQPInt* has_non_zero_diag_element; OSQPInt* d_nnz_diag; OSQPInt h_nnz_diag, Full_nnz, nnz_triu, n, nnz_max_Full; OSQPInt offset; nnz_triu = P_triu->nnz; n = P_triu->n; nnz_max_Full = 2*nnz_triu + n; csr* Full_P = csr_alloc(n, n, nnz_max_Full, 2); cuda_calloc((void **) &has_non_zero_diag_element, n * sizeof(OSQPInt)); cuda_calloc((void **) &d_nnz_diag, sizeof(OSQPInt)); csr_expand_row_ind(P_triu); number_of_blocks = (nnz_triu / THREADS_PER_BLOCK) + 1; fill_full_matrix_kernel<<<number_of_blocks, THREADS_PER_BLOCK>>>(Full_P->row_ind, Full_P->col_ind, d_nnz_diag, has_non_zero_diag_element, P_triu->row_ind, P_triu->col_ind, nnz_triu, n); offset = 2 * nnz_triu; number_of_blocks = (n / THREADS_PER_BLOCK) + 1; add_diagonal_kernel<<<number_of_blocks, THREADS_PER_BLOCK>>>(Full_P->row_ind + offset, Full_P->col_ind + offset, has_non_zero_diag_element, n); /* The Full matrix now is of size (2n)x(2n) * [P 0] * [0 D] * where P is the desired full matrix and D is * a diagonal that contains dummy values */ checkCudaErrors(cudaMemcpy(&h_nnz_diag, d_nnz_diag, sizeof(OSQPInt), cudaMemcpyDeviceToHost)); Full_nnz = (2 * (nnz_triu - h_nnz_diag)) + n; OSQPInt* d_P = coo_sort(Full_P); number_of_blocks = (nnz_triu / THREADS_PER_BLOCK) + 1; reduce_permutation_kernel<<<number_of_blocks,THREADS_PER_BLOCK>>>(d_P, nnz_triu, Full_nnz); /* permute vector */ cuda_vec_gather(Full_nnz, P_triu->val, Full_P->val, d_P); cuda_malloc((void **) P_triu_to_full_permutation, Full_nnz * sizeof(OSQPInt)); checkCudaErrors(cudaMemcpy(*P_triu_to_full_permutation, d_P, Full_nnz * sizeof(OSQPInt), cudaMemcpyDeviceToDevice)); cuda_malloc((void **) P_diag_indices, n * sizeof(OSQPInt)); number_of_blocks = (Full_nnz / THREADS_PER_BLOCK) + 1; get_diagonal_indices_kernel<<<number_of_blocks, THREADS_PER_BLOCK>>>(Full_P->row_ind, Full_P->col_ind, Full_nnz, *P_diag_indices); Full_P->nnz = Full_nnz; compress_row_ind(Full_P); copy_csr(P_triu, Full_P); cuda_mat_free(Full_P); cuda_free((void **) &d_P); cuda_free((void **) &d_nnz_diag); cuda_free((void **) &has_non_zero_diag_element); } /** * Matrix A is converted from CSC to CSR. The data in A is interpreted as * being in CSC format, even if it is in CSR. * This operation is equivalent to a transpose. We temporarily allocate space * for the new matrix since this operation cannot be done inplace. * Additionally, a gather indices vector is generated to perform the conversion * from A to A' faster during a matrix update. */ void csr_transpose(csr* A, OSQPInt** A_to_At_permutation) { (*A_to_At_permutation) = NULL; if (A->nnz == 0) { OSQPInt tmp = A->n; A->n = A->m; A->m = tmp; return; } csr_expand_row_ind(A); coo_tranpose(A); (*A_to_At_permutation) = coo_sort(A); compress_row_ind(A); permute_vector(A->val, *A_to_At_permutation, A->nnz); } /******************************************************************************* * API Functions * *******************************************************************************/ void cuda_mat_init_P(const OSQPCscMatrix* mat, csr** P, OSQPFloat** d_P_triu_val, OSQPInt** d_P_triu_to_full_ind, OSQPInt** d_P_diag_ind) { OSQPInt n = mat->n; OSQPInt nnz = mat->p[n]; /* Initialize upper triangular part of P */ *P = csr_init(n, n, mat->p, mat->i, mat->x); /* Convert P to a full matrix. Store indices of diagonal and triu elements. */ csr_triu_to_full(*P, d_P_triu_to_full_ind, d_P_diag_ind); csr_expand_row_ind(*P); /* We need 0.0 at val[nzz] -> nnz+1 elements */ cuda_calloc((void **) d_P_triu_val, (nnz+1) * sizeof(OSQPFloat)); /* Store triu elements */ checkCudaErrors(cudaMemcpy(*d_P_triu_val, mat->x, nnz * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); init_SpMV_interface(*P); } void cuda_mat_init_A(const OSQPCscMatrix* mat, csr** A, csr** At, OSQPInt** d_A_to_At_ind) { OSQPInt m = mat->m; OSQPInt n = mat->n; /* Initializing At is easy since it is equal to A in CSC */ *At = csr_init(n, m, mat->p, mat->i, mat->x); csr_expand_row_ind(*At); /* We need to take transpose of At to get A */ *A = csr_init(n, m, mat->p, mat->i, mat->x); csr_transpose(*A, d_A_to_At_ind); csr_expand_row_ind(*A); init_SpMV_interface(*A); init_SpMV_interface(*At); } void cuda_mat_update_P(const OSQPFloat* Px, const OSQPInt* Px_idx, OSQPInt Px_n, csr** P, OSQPFloat* d_P_triu_val, OSQPInt* d_P_triu_to_full_ind, OSQPInt* d_P_diag_ind, OSQPInt P_triu_nnz) { if (!Px_idx) { /* Update whole P */ OSQPFloat* d_P_val_new; /* Allocate memory */ cuda_malloc((void **) &d_P_val_new, (P_triu_nnz + 1) * sizeof(OSQPFloat)); /* Copy new values from host to device */ checkCudaErrors(cudaMemcpy(d_P_val_new, Px, P_triu_nnz * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); cuda_vec_gather((*P)->nnz, d_P_val_new, (*P)->val, d_P_triu_to_full_ind); cuda_free((void **) &d_P_val_new); } else { /* Update P partially */ OSQPFloat* d_P_val_new; OSQPInt* d_P_ind_new; /* Allocate memory */ cuda_malloc((void **) &d_P_val_new, Px_n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_P_ind_new, Px_n * sizeof(OSQPInt)); /* Copy new values and indices from host to device */ checkCudaErrors(cudaMemcpy(d_P_val_new, Px, Px_n * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_P_ind_new, Px_idx, Px_n * sizeof(OSQPInt), cudaMemcpyHostToDevice)); /* Update d_P_triu_val */ scatter(d_P_triu_val, d_P_val_new, d_P_ind_new, Px_n); /* Gather from d_P_triu_val to update full P */ cuda_vec_gather((*P)->nnz, d_P_triu_val, (*P)->val, d_P_triu_to_full_ind); cuda_free((void **) &d_P_val_new); cuda_free((void **) &d_P_ind_new); } } void cuda_mat_update_A(const OSQPFloat* Ax, const OSQPInt* Ax_idx, OSQPInt Ax_n, csr** A, csr** At, OSQPInt* d_A_to_At_ind) { OSQPInt Annz = (*A)->nnz; OSQPFloat* Aval = (*A)->val; OSQPFloat* Atval = (*At)->val; if (!Ax_idx) { /* Update whole A */ /* Updating At is easy since it is equal to A in CSC */ checkCudaErrors(cudaMemcpy(Atval, Ax, Annz * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); /* Updating A requires transpose of A_new */ cuda_vec_gather(Annz, Atval, Aval, d_A_to_At_ind); } else { /* Update A partially */ OSQPFloat* d_At_val_new; OSQPInt* d_At_ind_new; /* Allocate memory */ cuda_malloc((void **) &d_At_val_new, Ax_n * sizeof(OSQPFloat)); cuda_malloc((void **) &d_At_ind_new, Ax_n * sizeof(OSQPInt)); /* Copy new values and indices from host to device */ checkCudaErrors(cudaMemcpy(d_At_val_new, Ax, Ax_n * sizeof(OSQPFloat), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_At_ind_new, Ax_idx, Ax_n * sizeof(OSQPInt), cudaMemcpyHostToDevice)); /* Update At first since it is equal to A in CSC */ scatter(Atval, d_At_val_new, d_At_ind_new, Ax_n); cuda_free((void **) &d_At_val_new); cuda_free((void **) &d_At_ind_new); /* Gather from Atval to construct Aval */ cuda_vec_gather(Annz, Atval, Aval, d_A_to_At_ind); } } void cuda_mat_free(csr* mat) { if (mat) { cuda_free((void **) &mat->val); cuda_free((void **) &mat->row_ptr); cuda_free((void **) &mat->col_ind); cuda_free((void **) &mat->row_ind); cuda_free((void **) &mat->SpMatBuffer); checkCudaErrors(cusparseDestroySpMat(mat->SpMatDescr)); c_free(mat); } } OSQPInt cuda_csr_is_eq(const csr* A, const csr* B, OSQPFloat tol) { OSQPInt h_res = 0; OSQPInt *d_res; // If number of columns, rows and non-zeros are not the same, they are not equal. if ((A->n != B->n) || (A->m != B->m) || (A->nnz != B->nnz)) { return 0; } OSQPInt nnz = A->nnz; OSQPInt number_of_blocks = (nnz / THREADS_PER_BLOCK) / ELEMENTS_PER_THREAD + 1; cuda_malloc((void **) &d_res, sizeof(OSQPInt)); /* Initialize d_res to 1 */ h_res = 1; checkCudaErrors(cudaMemcpy(d_res, &h_res, sizeof(OSQPInt), cudaMemcpyHostToDevice)); csr_eq_kernel<<<number_of_blocks, THREADS_PER_BLOCK>>>(A->row_ptr, A->col_ind, A->val, B->row_ptr, B->col_ind, B->val, A->m, tol, d_res); checkCudaErrors(cudaMemcpy(&h_res, d_res, sizeof(OSQPInt), cudaMemcpyDeviceToHost)); cuda_free((void **) &d_res); return h_res; } void cuda_submat_byrows(const csr* A, const OSQPInt* d_rows, csr** Ared, csr** Aredt) { OSQPInt new_m = 0; OSQPInt n = A->n; OSQPInt m = A->m; OSQPInt nnz = A->nnz; OSQPInt* d_predicate; OSQPInt* d_compact_address; OSQPInt* d_row_predicate; OSQPInt* d_new_row_number; cuda_malloc((void **) &d_row_predicate, m * sizeof(OSQPInt)); cuda_malloc((void **) &d_new_row_number, m * sizeof(OSQPInt)); cuda_malloc((void **) &d_predicate, nnz * sizeof(OSQPInt)); cuda_malloc((void **) &d_compact_address, nnz * sizeof(OSQPInt)); // Copy rows array to device and set -1s to ones checkCudaErrors(cudaMemcpy(d_row_predicate, d_rows, m * sizeof(OSQPInt), cudaMemcpyDeviceToDevice)); vector_init_abs_kernel<<<(m/THREADS_PER_BLOCK) + 1,THREADS_PER_BLOCK>>>(d_row_predicate, d_row_predicate, m); // Calculate new row numbering and get new number of rows thrust::inclusive_scan(thrust::device, d_row_predicate, d_row_predicate + m, d_new_row_number); if (m) { checkCudaErrors(cudaMemcpy(&new_m, &d_new_row_number[m-1], sizeof(OSQPInt), cudaMemcpyDeviceToHost)); } else { (*Ared) = (csr *) c_calloc(1, sizeof(csr)); (*Ared)->n = n; (*Aredt) = (csr *) c_calloc(1, sizeof(csr)); (*Aredt)->m = n; return; } // Generate predicates per element from per row predicate predicate_generator_kernel<<<(nnz/THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>>(A->row_ind, d_row_predicate, d_predicate, nnz); // Get array offset for compacting and new nnz thrust::inclusive_scan(thrust::device, d_predicate, d_predicate + nnz, d_compact_address); OSQPInt nnz_new; if (nnz) checkCudaErrors(cudaMemcpy(&nnz_new, &d_compact_address[nnz-1], sizeof(OSQPInt), cudaMemcpyDeviceToHost)); // allocate new matrix (2 -> allocate row indices as well) (*Ared) = csr_alloc(new_m, n, nnz_new, 2); // Compact arrays according to given predicates, special care has to be taken for the rows compact_rows<<<(nnz/THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>>(A->row_ind, (*Ared)->row_ind, d_new_row_number, d_predicate, d_compact_address, nnz); compact<<<(nnz/THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>>(A->col_ind, (*Ared)->col_ind, d_predicate, d_compact_address, nnz); compact<<<(nnz/THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>>(A->val, (*Ared)->val, d_predicate, d_compact_address, nnz); // Generate row pointer compress_row_ind(*Ared); // We first make a copy of Ared *Aredt = csr_alloc(new_m, n, nnz_new, 1); checkCudaErrors(cudaMemcpy((*Aredt)->val, (*Ared)->val, nnz_new * sizeof(OSQPFloat), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy((*Aredt)->row_ptr, (*Ared)->row_ptr, (new_m+1) * sizeof(OSQPInt), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy((*Aredt)->col_ind, (*Ared)->col_ind, nnz_new * sizeof(OSQPInt), cudaMemcpyDeviceToDevice)); OSQPInt* d_A_to_At_ind; csr_transpose(*Aredt, &d_A_to_At_ind); csr_expand_row_ind(*Ared); csr_expand_row_ind(*Aredt); init_SpMV_interface(*Ared); init_SpMV_interface(*Aredt); cuda_free((void**)&d_A_to_At_ind); cuda_free((void**)&d_predicate); cuda_free((void**)&d_compact_address); cuda_free((void**)&d_row_predicate); cuda_free((void**)&d_new_row_number); }
ccca9e7192cc41eb78ba81aeadc74dd06d08c5c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* gpuSysHelper.cu Nicolas Sawaya July 2013 */ //Kernel for setting up pseudo-random number generator __global__ void setupPRNG(int inputSeed, curandStateMRG32k3a *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number, no offset */ // hiprand_init(0, id, 0, &state[id]); //"Sequences generated with the same seed and different sequence //numbers will not have statistically correlated values." hiprand_init( inputSeed, //seed id, //sequence (there are several 'sequences' running in parallel) 0, //offset &state[id] //hiprandState_t *state ); } //Kernel for updating HSR Hamiltonian __global__ void addDiagToRealGaussNoise( typeMat *HamVals, int *diagIndices, // float *gaussNoiseArrSD1, curandStateMRG32k3a *crStates, typeReal *siteStddevs, typeMat *diagInitVals, int vectorSize, //To know when you're past array's end int curandLength ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; if(elem<vectorSize) { //Take modulus so you don't go past cuRand memory limit int crId = elem % curandLength; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = crStates[crId]; float randNorm = hiprand_normal(&localState); //typeReal noise = (typeReal)gaussNoiseArrSD1[elem] * siteStddevs[elem]; typeReal noise = (typeReal)randNorm * siteStddevs[elem]; HamVals[ diagIndices[elem] ].x = noise + diagInitVals[elem].x; /* Copy state back to global memory */ crStates[crId] = localState; } } //Kernel for updating Kubo-Anderson's delta_e and Hamiltonian __global__ void addAndUpdateKANoise( typeMat *HamVals, int *diagIndices, typeReal *dEps, curandStateMRG32k3a *crStates, typeReal *invRelaxTimes, typeReal *stochCoeffs, typeMat *diagInitVals, int numSubSteps, typeReal subdt, //this is dt/numSubSteps. No division in kernel. typeReal sqrt_subdt, int vectorSize, //To know when you're past array's end int curandLength ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; if(elem<vectorSize) { //Take modulus so you don't go past cuRand memory limit int crId = elem % curandLength; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = crStates[crId]; float randNorm; //Get current dEps typeReal cur_dEps = dEps[elem]; //Get parameters typeReal invTau = invRelaxTimes[elem]; typeReal coeff = stochCoeffs[elem]; for(int i=0;i<numSubSteps;i++) { //Get random from Normal(1,0) randNorm = hiprand_normal(&localState); //Propagate Langevin equation using Euler-Maruyama Scheme cur_dEps = cur_dEps - cur_dEps*invTau*subdt + coeff*randNorm*sqrt_subdt; } //Write back dEps dEps[elem] = cur_dEps; //Update Hamiltonian HamVals[ diagIndices[elem] ].x = cur_dEps + diagInitVals[elem].x; /* Copy state back to global memory */ crStates[crId] = localState; } } //Kernel for updating Hamiltonian for ZZReal __global__ void updateZZRealNoise( typeMat *HamVals, int *diagIndices, typeMat *diagInitVals, typeReal *phiVals, typeReal *omegaVals, typeReal *flucCoeffs, typeReal timeVal, int numSites, //To know when you're past array's end int totNumOscPerSite //number of oscillators per site ) { int site = blockIdx.x*blockDim.x + threadIdx.x; if(site<numSites) { typeReal omega, coeff, phi; typeReal noise = 0; int index; for(int osc=0; osc<totNumOscPerSite; osc++) { index = numSites*osc + site; // omega = omegaVals[index]; // coeff = flucCoeffs[index]; // phi = phiVals[index]; omega = omegaVals[osc]; coeff = flucCoeffs[osc]; phi = phiVals[index]; noise = noise + coeff*cos(omega*timeVal + phi); } HamVals[ diagIndices[site] ].x = noise + diagInitVals[site].x; } } //Kernel for adding up the populations of each ensemble run __global__ void kernel_addPopToEnsembleAvg( bool isFirstInEnsemble, typeMat *stateMat, typeReal *ensemblePopMat, int numElems) { int elem = blockIdx.x*blockDim.x + threadIdx.x; //Ensure element is within limits if(elem < numElems) { typeReal thisPop; if(isFirstInEnsemble) { thisPop = 0.; } else { thisPop = ensemblePopMat[elem]; } ensemblePopMat[elem] = thisPop + pow(stateMat[elem].x,2) + pow(stateMat[elem].y,2); } } //Kernel for dividing ensemble sum by number of runs __global__ void divideForEnsembleAvg( typeReal *ensemblePopMat, typeReal inverseNumRuns, int numElems ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; //Ensure element is within limits if(elem < numElems) { ensemblePopMat[elem] = ensemblePopMat[elem] * inverseNumRuns; } }
ccca9e7192cc41eb78ba81aeadc74dd06d08c5c5.cu
/* gpuSysHelper.cu Nicolas Sawaya July 2013 */ //Kernel for setting up pseudo-random number generator __global__ void setupPRNG(int inputSeed, curandStateMRG32k3a *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number, no offset */ // curand_init(0, id, 0, &state[id]); //"Sequences generated with the same seed and different sequence //numbers will not have statistically correlated values." curand_init( inputSeed, //seed id, //sequence (there are several 'sequences' running in parallel) 0, //offset &state[id] //curandState_t *state ); } //Kernel for updating HSR Hamiltonian __global__ void addDiagToRealGaussNoise( typeMat *HamVals, int *diagIndices, // float *gaussNoiseArrSD1, curandStateMRG32k3a *crStates, typeReal *siteStddevs, typeMat *diagInitVals, int vectorSize, //To know when you're past array's end int curandLength ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; if(elem<vectorSize) { //Take modulus so you don't go past cuRand memory limit int crId = elem % curandLength; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = crStates[crId]; float randNorm = curand_normal(&localState); //typeReal noise = (typeReal)gaussNoiseArrSD1[elem] * siteStddevs[elem]; typeReal noise = (typeReal)randNorm * siteStddevs[elem]; HamVals[ diagIndices[elem] ].x = noise + diagInitVals[elem].x; /* Copy state back to global memory */ crStates[crId] = localState; } } //Kernel for updating Kubo-Anderson's delta_e and Hamiltonian __global__ void addAndUpdateKANoise( typeMat *HamVals, int *diagIndices, typeReal *dEps, curandStateMRG32k3a *crStates, typeReal *invRelaxTimes, typeReal *stochCoeffs, typeMat *diagInitVals, int numSubSteps, typeReal subdt, //this is dt/numSubSteps. No division in kernel. typeReal sqrt_subdt, int vectorSize, //To know when you're past array's end int curandLength ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; if(elem<vectorSize) { //Take modulus so you don't go past cuRand memory limit int crId = elem % curandLength; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = crStates[crId]; float randNorm; //Get current dEps typeReal cur_dEps = dEps[elem]; //Get parameters typeReal invTau = invRelaxTimes[elem]; typeReal coeff = stochCoeffs[elem]; for(int i=0;i<numSubSteps;i++) { //Get random from Normal(1,0) randNorm = curand_normal(&localState); //Propagate Langevin equation using Euler-Maruyama Scheme cur_dEps = cur_dEps - cur_dEps*invTau*subdt + coeff*randNorm*sqrt_subdt; } //Write back dEps dEps[elem] = cur_dEps; //Update Hamiltonian HamVals[ diagIndices[elem] ].x = cur_dEps + diagInitVals[elem].x; /* Copy state back to global memory */ crStates[crId] = localState; } } //Kernel for updating Hamiltonian for ZZReal __global__ void updateZZRealNoise( typeMat *HamVals, int *diagIndices, typeMat *diagInitVals, typeReal *phiVals, typeReal *omegaVals, typeReal *flucCoeffs, typeReal timeVal, int numSites, //To know when you're past array's end int totNumOscPerSite //number of oscillators per site ) { int site = blockIdx.x*blockDim.x + threadIdx.x; if(site<numSites) { typeReal omega, coeff, phi; typeReal noise = 0; int index; for(int osc=0; osc<totNumOscPerSite; osc++) { index = numSites*osc + site; // omega = omegaVals[index]; // coeff = flucCoeffs[index]; // phi = phiVals[index]; omega = omegaVals[osc]; coeff = flucCoeffs[osc]; phi = phiVals[index]; noise = noise + coeff*cos(omega*timeVal + phi); } HamVals[ diagIndices[site] ].x = noise + diagInitVals[site].x; } } //Kernel for adding up the populations of each ensemble run __global__ void kernel_addPopToEnsembleAvg( bool isFirstInEnsemble, typeMat *stateMat, typeReal *ensemblePopMat, int numElems) { int elem = blockIdx.x*blockDim.x + threadIdx.x; //Ensure element is within limits if(elem < numElems) { typeReal thisPop; if(isFirstInEnsemble) { thisPop = 0.; } else { thisPop = ensemblePopMat[elem]; } ensemblePopMat[elem] = thisPop + pow(stateMat[elem].x,2) + pow(stateMat[elem].y,2); } } //Kernel for dividing ensemble sum by number of runs __global__ void divideForEnsembleAvg( typeReal *ensemblePopMat, typeReal inverseNumRuns, int numElems ) { int elem = blockIdx.x*blockDim.x + threadIdx.x; //Ensure element is within limits if(elem < numElems) { ensemblePopMat[elem] = ensemblePopMat[elem] * inverseNumRuns; } }