text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
namespace minerva { namespace cuda { static void FindConfiguration(size_t size, int& num_blocks, int& num_threads) { num_threads = 0; if(size <= 32) num_threads = 32; else if(size <= 64) num_threads = 64; else if(size <= 128) num_threads = 128; else if(size <= 256) num_threads = 256; else if(size <= 512) num_threads = 512; else num_threads = 1024; num_blocks = static_cast<int>((size + num_threads - 1) / num_threads); if (num_blocks < 0 || 128 < num_blocks) { num_blocks = 128; } } void CudaPerformDotMult(float* a, float* b, float* c, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(a, b, c, size, MultOp()); CheckCudaError("CudaPerformDotMult"); } void CudaPerformDotDiv(float* a, float* b, float* c, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(a, b, c, size, DivOp()); CheckCudaError("CudaPerformDotDiv"); } void CudaPerformAdd(float* a, float* b, float* c, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(a, b, c, size, SumOp()); CheckCudaError("CudaPerformAdd"); //float one = 1.0; //CUBLAS_CALL(cublasScopy(handle, size, a, 1, c, 1)); //CUBLAS_CALL(cublasSaxpy(handle, size, &one, b, 1, c, 1)); } void CudaPerformCopy(float* a, float* b, size_t size, cublasHandle_t handle) { CUBLAS_CALL(cublasScopy(handle, size, a, 1, b, 1)); } void CudaPerformSub(float* a, float* b, float* c, size_t size, cublasHandle_t handle) { float minus_one = -1.0; CUBLAS_CALL(cublasScopy(handle, size, a, 1, c, 1)); CUBLAS_CALL(cublasSaxpy(handle, size, &minus_one, b, 1, c, 1)); } void CudaPerformMatMult(float* a, float* b, float* c, int m, int n, int k, cublasHandle_t handle) { float one = 1.0; float zero = 0.0; CUBLAS_CALL(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &one, a, m, b, k, &zero, c, m)); } void CudaPerformScale(float* in_data, float* res_data, size_t size, float val, cublasHandle_t handle) { CUBLAS_CALL(cublasScopy(handle, size, in_data, 1, res_data, 1)); CUBLAS_CALL(cublasSscal(handle, size, &val, res_data, 1)); } void CudaPerformTranspose(float* a, float* c, int m, int n, cublasHandle_t handle) { float zero = 0.0; float one = 1.0; CUBLAS_CALL(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, &one, a, m, &zero, c, n, c, n)); } void CudaPerformConstAdd(float* in, float* out, float val, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, val, size, SumOp()); CheckCudaError("CudaPerformConstAdd"); } void CudaPerformLeftConstSub(float* in, float* out, float val, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, val, size, ReverseSubOp()); CheckCudaError("CudaPerformLeftConstSub"); } void CudaPerformLeftConstDiv(float* in, float* out, float val, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, val, size, ReverseDivOp()); CheckCudaError("CudaPerformLeftConstDiv"); } void CudaPerformNormAddOnCol(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnColKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, SumOp()); CheckCudaError("CudaPerformNormAddOnCol"); } void CudaPerformNormSubOnCol(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnColKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, SubOp()); CheckCudaError("CudaPerformNormSubOnCol"); } void CudaPerformNormMultOnCol(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnColKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, MultOp()); CheckCudaError("CudaPerformNormMultOnCol"); } void CudaPerformNormDivOnCol(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnColKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, DivOp()); CheckCudaError("CudaPerformNormDivOnCol"); } void CudaPerformNormAddOnRow(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnRowKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, SumOp()); CheckCudaError("CudaPerformNormAddOnRow"); } void CudaPerformNormSubOnRow(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnRowKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, SubOp()); CheckCudaError("CudaPerformNormSubOnRow"); } void CudaPerformNormMultOnRow(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnRowKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, MultOp()); CheckCudaError("CudaPerformNormMultOnRow"); } void CudaPerformNormDivOnRow(float* matrix, float* row, float* res, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformNormOnRowKernel<<<block, thread, 0, stream>>>(matrix, row, res, m, n, DivOp()); CheckCudaError("CudaPerformNormDivOnRow"); } void CudaPerformReductionSumOnCol(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(n, block, thread); CudaPerformReductionOnColKernel<<<block, thread, 0, stream>>>(in, out, m, n, SumOp()); CheckCudaError("CudaPerformReductionSumOnCol"); } void CudaPerformReductionMaxOnCol(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(n, block, thread); CudaPerformReductionOnColKernel<<<block, thread, 0, stream>>>(in, out, m, n, MaxOp()); CheckCudaError("CudaPerformReductionMaxOnCol"); } void CudaPerformReductionSumOnRow(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(n, block, thread); CudaPerformReductionOnRowKernel<<<block, thread, 0, stream>>>(in, out, m, n, SumOp()); CheckCudaError("CudaPerformReductionSumOnRow"); } void CudaPerformReductionMaxOnRow(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(n, block, thread); CudaPerformReductionOnRowKernel<<<block, thread, 0, stream>>>(in, out, m, n, MaxOp()); CheckCudaError("CudaPerformReductionMaxOnRow"); } void CudaPerformMaxIndexOnCol(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(n, block, thread); CudaPerformMaxIndexOnColKernel<<<block, thread, 0, stream>>>(in, out, m, n); CheckCudaError("CudaPerformMaxIndexOnCol"); } void CudaPerformMaxIndexOnRow(float* in, float* out, int m, int n, cudaStream_t stream) { int block, thread; FindConfiguration(m, block, thread); CudaPerformMaxIndexOnRowKernel << <block, thread, 0, stream>>>(in, out, m, n); CheckCudaError("CudaPerformMaxIndexOnRow"); } void CudaPerformReshape(float* in, float* out, size_t size, cudaStream_t stream) { CUDA_CALL(cudaMemcpyAsync(out, in, size, cudaMemcpyDefault, stream)); } void CudaPerformElewiseExp(float* in, float* out, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, size, ExpOp()); CheckCudaError("CudaPerformEleWiseExp"); } void CudaPerformElewiseLn(float* in, float* out, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, size, LnOp()); CheckCudaError("CudaPerformEleWiseLn"); } void CudaPerformElewiseNegative(float* in, float* out, size_t size, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformDotKernel<<<block, thread, 0, stream>>>(in, out, size, NegativeOp()); CheckCudaError("CudaPerformEleWiseNegative"); } void CudaPerformConvForward(float* bottom, float* filter, float* bias, float* top, int num_images, int bottom_num_channels, int top_num_channels, int bottom_height, int bottom_width, int pad_height, int pad_width, int stride_vertical, int stride_horizontal, int filter_height, int filter_width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t bottom_desc; cudnnFilterDescriptor_t filter_desc; cudnnTensorDescriptor_t bias_desc; cudnnConvolutionDescriptor_t conv_desc; cudnnTensorDescriptor_t top_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, bottom_num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_FLOAT, top_num_channels, bottom_num_channels, filter_height, filter_width)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, top_num_channels, 1, 1)); CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc, pad_height, pad_width, stride_vertical, stride_horizontal, 1, 1, CUDNN_CONVOLUTION)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, top_num_channels, (bottom_height + 2 * pad_height - filter_height) / stride_vertical + 1, (bottom_width + 2 * pad_width - filter_width) / stride_horizontal + 1)); float one = 1; float zero = 0; cudnnConvolutionFwdAlgo_t algorithm; size_t workspace_size; void* workspace; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(handle, bottom_desc, filter_desc, conv_desc, top_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algorithm)); CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(handle, bottom_desc, filter_desc, conv_desc, top_desc, algorithm, &workspace_size)); CUDA_CALL(cudaMalloc(&workspace, workspace_size)); CUDNN_CALL(cudnnConvolutionForward(handle, &one, bottom_desc, bottom, filter_desc, filter, conv_desc, algorithm, workspace, workspace_size, &zero, top_desc, top)); CUDNN_CALL(cudnnAddTensor(handle, CUDNN_ADD_SAME_C, &one, bias_desc, bias, &one, top_desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDA_CALL(cudaFree(workspace)); CUDNN_CALL(cudnnDestroyTensorDescriptor(top_desc)); CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bias_desc)); CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformConvBackwardData(float* top_diff, float* filter, float* bottom_diff, int num_images, int bottom_num_channels, int top_num_channels, int top_height, int top_width, int pad_height, int pad_width, int stride_vertical, int stride_horizontal, int filter_height, int filter_width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t bottom_diff_desc; cudnnFilterDescriptor_t filter_desc; cudnnConvolutionDescriptor_t conv_desc; cudnnTensorDescriptor_t top_diff_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_diff_desc)); CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc)); CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_diff_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_diff_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, bottom_num_channels, (top_height - 1) * stride_vertical + filter_height - 2 * pad_height, (top_width - 1) * stride_horizontal + filter_width - 2 * pad_width)); CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_FLOAT, top_num_channels, bottom_num_channels, filter_height, filter_width)); CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc, pad_height, pad_width, stride_vertical, stride_horizontal, 1, 1, CUDNN_CONVOLUTION)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_diff_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, top_num_channels, top_height, top_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnConvolutionBackwardData(handle, &one, filter_desc, filter, top_diff_desc, top_diff, conv_desc, &zero, bottom_diff_desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_diff_desc)); CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc)); CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_diff_desc)); } void CudaPerformConvBackwardFilter(float* bottom, float* top_diff, float* filter_diff, int num_images, int bottom_num_channels, int top_num_channels, int bottom_height, int bottom_width, int pad_height, int pad_width, int stride_vertical, int stride_horizontal, int filter_height, int filter_width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t bottom_desc; cudnnFilterDescriptor_t filter_diff_desc; cudnnConvolutionDescriptor_t conv_desc; cudnnTensorDescriptor_t top_diff_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_diff_desc)); CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_diff_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, bottom_num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_diff_desc, CUDNN_DATA_FLOAT, top_num_channels, bottom_num_channels, filter_height, filter_width)); CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc, pad_height, pad_width, stride_vertical, stride_horizontal, 1, 1, CUDNN_CONVOLUTION)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_diff_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, top_num_channels, (bottom_height + 2 * pad_height - filter_height) / stride_vertical + 1, (bottom_width + 2 * pad_width - filter_width) / stride_horizontal + 1)); float one = 1; float zero = 0; CUDNN_CALL(cudnnConvolutionBackwardFilter(handle, &one, bottom_desc, bottom, top_diff_desc, top_diff, conv_desc, &zero, filter_diff_desc, filter_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_diff_desc)); CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc)); CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_diff_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformConvBackwardBias(float* top_diff, float* bias_diff, int num_images, int top_num_channels, int top_height, int top_width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t bias_diff_desc; cudnnTensorDescriptor_t top_diff_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_diff_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_diff_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bias_diff_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, top_num_channels, 1, 1)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_diff_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, top_num_channels, top_height, top_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnConvolutionBackwardBias(handle, &one, top_diff_desc, top_diff, &zero, bias_diff_desc, bias_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_diff_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bias_diff_desc)); } void CudaPerformInstanceSoftmaxForward(float* bottom, float* top, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnSoftmaxForward(handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &one, desc, bottom, &zero, desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformChannelSoftmaxForward(float* bottom, float* top, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnSoftmaxForward(handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &one, desc, bottom, &zero, desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformInstanceSoftmaxBackward(float* diff, float* top, float* bottom, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnSoftmaxBackward(handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &one, desc, top, desc, diff, &zero, desc, bottom)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformChannelSoftmaxBackward(float* diff, float* top, float* bottom, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnSoftmaxBackward(handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &one, desc, top, desc, diff, &zero, desc, bottom)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformSigmoidForward(float* bottom, float* top, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationForward(handle, CUDNN_ACTIVATION_SIGMOID, &one, desc, bottom, &zero, desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformReluForward(float* bottom, float* top, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationForward(handle, CUDNN_ACTIVATION_RELU, &one, desc, bottom, &zero, desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformTanhForward(float* bottom, float* top, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationForward(handle, CUDNN_ACTIVATION_TANH, &one, desc, bottom, &zero, desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformSigmoidBackward(float* bottom, float* top, float* top_diff, float* bottom_diff, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationBackward(handle, CUDNN_ACTIVATION_SIGMOID, &one, desc, top, desc, top_diff, desc, bottom, &zero, desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformReluBackward(float* bottom, float* top, float* top_diff, float* bottom_diff, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationBackward(handle, CUDNN_ACTIVATION_RELU, &one, desc, top, desc, top_diff, desc, bottom, &zero, desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformTanhBackward(float* bottom, float* top, float* top_diff, float* bottom_diff, int num_images, int num_channels, int height, int width, cudaStream_t stream, cudnnHandle_t handle) { cudnnTensorDescriptor_t desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, height, width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnActivationBackward(handle, CUDNN_ACTIVATION_TANH, &one, desc, top, desc, top_diff, desc, bottom, &zero, desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(desc)); } void CudaPerformMaxPoolingForward(float* bottom, float* top, int num_images, int num_channels, int bottom_height, int bottom_width, int stride_vertical, int stride_horizontal, int window_height, int window_width, int pad_height, int pad_width, cudaStream_t stream, cudnnHandle_t handle) { // Calculate the dimension after pooling int pooled_height = (bottom_height + 2 * pad_height - window_height + stride_vertical - 1) / stride_vertical + 1; if (0 <= (pooled_height - 1) * stride_vertical - bottom_height - pad_height) { --pooled_height; } int pooled_width = (bottom_width + 2 * pad_width - window_width + stride_horizontal - 1) / stride_horizontal + 1; if (0 <= (pooled_width - 1) * stride_horizontal - bottom_width - pad_width) { --pooled_width; } cudnnTensorDescriptor_t bottom_desc; cudnnPoolingDescriptor_t pool_desc; cudnnTensorDescriptor_t top_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc, CUDNN_POOLING_MAX, window_height, window_width, pad_height, pad_width, stride_vertical, stride_horizontal)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, pooled_height, pooled_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnPoolingForward(handle, pool_desc, &one, bottom_desc, bottom, &zero, top_desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_desc)); CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformAveragePoolingForward(float* bottom, float* top, int num_images, int num_channels, int bottom_height, int bottom_width, int stride_vertical, int stride_horizontal, int window_height, int window_width, int pad_height, int pad_width, cudaStream_t stream, cudnnHandle_t handle) { // Calculate the dimension after pooling int pooled_height = (bottom_height + 2 * pad_height - window_height + stride_vertical - 1) / stride_vertical + 1; if (0 <= (pooled_height - 1) * stride_vertical - bottom_height - pad_height) { --pooled_height; } int pooled_width = (bottom_width + 2 * pad_width - window_width + stride_horizontal - 1) / stride_horizontal + 1; if (0 <= (pooled_width - 1) * stride_horizontal - bottom_width - pad_width) { --pooled_width; } cudnnTensorDescriptor_t bottom_desc; cudnnPoolingDescriptor_t pool_desc; cudnnTensorDescriptor_t top_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc, CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING, window_height, window_width, pad_height, pad_width, stride_vertical, stride_horizontal)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, pooled_height, pooled_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnPoolingForward(handle, pool_desc, &one, bottom_desc, bottom, &zero, top_desc, top)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_desc)); CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformMaxPoolingBackward(float* bottom, float* top, float* top_diff, float* bottom_diff, int num_images, int num_channels, int bottom_height, int bottom_width, int stride_vertical, int stride_horizontal, int window_height, int window_width, int pad_height, int pad_width, cudaStream_t stream, cudnnHandle_t handle) { // Calculate the dimension after pooling int pooled_height = (bottom_height + 2 * pad_height - window_height + stride_vertical - 1) / stride_vertical + 1; if (0 <= (pooled_height - 1) * stride_vertical - bottom_height - pad_height) { --pooled_height; } int pooled_width = (bottom_width + 2 * pad_width - window_width + stride_horizontal - 1) / stride_horizontal + 1; if (0 <= (pooled_width - 1) * stride_horizontal - bottom_width - pad_width) { --pooled_width; } cudnnTensorDescriptor_t bottom_desc; cudnnPoolingDescriptor_t pool_desc; cudnnTensorDescriptor_t top_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc, CUDNN_POOLING_MAX, window_height, window_width, pad_height, pad_width, stride_vertical, stride_horizontal)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, pooled_height, pooled_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnPoolingBackward(handle, pool_desc, &one, top_desc, top, top_desc, top_diff, bottom_desc, bottom, &zero, bottom_desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_desc)); CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformAveragePoolingBackward(float* bottom, float* top, float* top_diff, float* bottom_diff, int num_images, int num_channels, int bottom_height, int bottom_width, int stride_vertical, int stride_horizontal, int window_height, int window_width, int pad_height, int pad_width, cudaStream_t stream, cudnnHandle_t handle) { // Calculate the dimension after pooling int pooled_height = (bottom_height + 2 * pad_height - window_height + stride_vertical - 1) / stride_vertical + 1; if (0 <= (pooled_height - 1) * stride_vertical - bottom_height - pad_height) { --pooled_height; } int pooled_width = (bottom_width + 2 * pad_width - window_width + stride_horizontal - 1) / stride_horizontal + 1; if (0 <= (pooled_width - 1) * stride_horizontal - bottom_width - pad_width) { --pooled_width; } cudnnTensorDescriptor_t bottom_desc; cudnnPoolingDescriptor_t pool_desc; cudnnTensorDescriptor_t top_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bottom_desc)); CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc)); CUDNN_CALL(cudnnCreateTensorDescriptor(&top_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor(bottom_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, bottom_height, bottom_width)); CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc, CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING, window_height, window_width, pad_height, pad_width, stride_vertical, stride_horizontal)); CUDNN_CALL(cudnnSetTensor4dDescriptor(top_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, num_images, num_channels, pooled_height, pooled_width)); float one = 1; float zero = 0; CUDNN_CALL(cudnnPoolingBackward(handle, pool_desc, &one, top_desc, top, top_desc, top_diff, bottom_desc, bottom, &zero, bottom_desc, bottom_diff)); CUDA_CALL(cudaStreamSynchronize(stream)); // Synchronize before destruction CUDNN_CALL(cudnnDestroyTensorDescriptor(top_desc)); CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc)); CUDNN_CALL(cudnnDestroyTensorDescriptor(bottom_desc)); } void CudaPerformRandn(float* dst, size_t size, unsigned int seed, float mean, float var) { curandGenerator_t gen; CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed)); CURAND_CALL(curandGenerateNormal(gen, dst, size, mean, var)); CURAND_CALL(curandDestroyGenerator(gen)); } void CudaPerformRandBernoulli(float* dst, size_t size, unsigned int seed, float p, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformRandBernoulliKernel<<<block, thread, 0, stream>>>(dst, size, seed, p); # if defined(_MSC_VER) CheckCudaError(__FUNCTION__); # else CheckCudaError(__func__); # endif } void CudaPerformFill(float* dst, size_t size, float val, cudaStream_t stream) { int block, thread; FindConfiguration(size, block, thread); CudaPerformFillKernel<<<block, thread, 0, stream>>>(dst, size, val); CheckCudaError("CudaPerformFill"); } void CudaPerformLRNForward(float* bottom, float* scale, float* res, int local_size, float alpha, float beta, int num_img, int channel, int width, int height, cudaStream_t stream) { int block, thread, size; size = num_img * height * width; FindConfiguration(size, block, thread); LRNFillScale<<<block, thread, 0, stream>>>( size, bottom, num_img, channel, height, width, local_size, alpha / local_size, scale); CheckCudaError("LRNFillScale"); size = num_img * channel * width * height; FindConfiguration(size, block, thread); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput<<<block, thread, 0, stream>>>(size, bottom, scale, -beta, res); CheckCudaError("LRNComputeOutput"); } void CudaPerformLRNBackward(float* bottom_data, float* top_data, float* scale, float* top_diff, float* bottom_diff, int local_size, float alpha, float beta, int num_img, int channel, int width, int height, cudaStream_t stream) { int block, thread; int size = num_img * width * height; FindConfiguration(size, block, thread); LRNComputeDiff<<<block, thread, 0, stream>>>( size, bottom_data, top_data, scale, top_diff, num_img, channel, height, width, local_size, -beta, float(2. * alpha * beta / local_size), bottom_diff); CheckCudaError("LRNBackward"); } void CudaPerformSelect(float* dst, float* src, std::vector<int> indices, size_t cols, size_t rows, cudaStream_t stream) { int block, thread; int size = cols * rows; int* indices_ptr; FindConfiguration(size, block, thread); CUDA_CALL(cudaMalloc(&indices_ptr, indices.size() * sizeof(int))); CUDA_CALL(cudaMemcpyAsync(indices_ptr, &indices[0], indices.size() * sizeof(int), cudaMemcpyDefault, stream)); SelectKernel<<<block, thread, 0, stream>>>(dst, src, &indices[0], cols, rows, indices.size()); CheckCudaError("Select"); } } // namespace cuda } // namespace minerva
the_stack
#include <vector> #include <assert.h> using namespace std; #define SAMPLECOLLECT_DEBUG 2 HistoryCollection::HistoryCollection(const ImgRegParams & img_region_params, const int numFlowsInHistory,const int maxCompressedFrames,const int uncompressedFrames ){ writeBuffer = 0; collectedFlows = 0; numFlows = numFlowsInHistory; latestSampleFlowOnHost = 0; latestEmptyFlowOnHost = 0; latestRegParamOnHost = 0; ImgP = img_region_params; if(numFlowsInHistory>1) cout << "CUDA: HistoryCollection: creating Sample Bead History of " << numFlowsInHistory << " flows for Regional Parameter Fitting" <<endl; for(int i=0;i<MAX_FLOW_HISTORY; i++){ ppSampleCompressedTracesBuffers[i] = NULL; realFlow[i] = -1; nucIds[i] = -1; if (i<numFlows) { HostSampleCompressedTraces.push_back(new LayoutCubeWithRegions<short>(ImgP.getGridParam(NUM_SAMPLES_RF),maxCompressedFrames, HostMem)); HostSampleCompressedTraces.back()->memSet(0); HostSampleCompressedTraces.back()->setRWStrideZ(); HostEmptyTraceAvg.push_back(new LayoutCubeWithRegions<float>(ImgP.getGridParam(uncompressedFrames), 1, HostMem)); HostEmptyTraceAvg.back()->memSet(0); HostEmptyTraceAvg.back()->setRWStrideX(); } } HostPerFlowRegP = new LayoutCubeWithRegions<PerFlowParamsRegion>(ImgP.getGridParam(),1,HostMem); DeviceSampleCompressedTraces.clear(); DeviceEmptyTraceAvg.clear(); DevicePerFlowRegP = NULL; } HistoryCollection::~HistoryCollection() { for (vector<LayoutCubeWithRegions<short>*>::iterator it = HostSampleCompressedTraces.begin(); it != HostSampleCompressedTraces.end(); ++it){ if (*it != NULL) delete *it; *it = NULL; } for (vector<LayoutCubeWithRegions<short>*>::iterator it = DeviceSampleCompressedTraces.begin(); it != DeviceSampleCompressedTraces.end(); ++it){ if (*it != NULL) delete *it; *it = NULL; } for (vector<LayoutCubeWithRegions<float>*>::iterator it = HostEmptyTraceAvg.begin(); it != HostEmptyTraceAvg.end(); ++it){ if (*it != NULL) delete *it; *it = NULL; } for (vector<LayoutCubeWithRegions<float>*>::iterator it = DeviceEmptyTraceAvg.begin(); it != DeviceEmptyTraceAvg.end(); ++it){ if (*it != NULL) delete *it; *it = NULL; } if(HostPerFlowRegP != NULL) delete HostPerFlowRegP; if(DevicePerFlowRegP != NULL) delete DevicePerFlowRegP; } //////////////////////////////////////////////////////////////////////// // HOST SIDE SAMPLE COLLECTIN FIRST BLOCK OF FLOWS void HistoryCollection::writeSampleBeadToFlowBuffers(const FG_BUFFER_TYPE * fgbuffer, const int numFrames) { //only sample beads get passed into this function //for each flow buffer write all frames: int flows = 0; for (std::vector<LayoutCubeWithRegions<short>*>::iterator it = HostSampleCompressedTraces.begin(); it != HostSampleCompressedTraces.end(); ++it) { for(int frame=0; frame < numFrames; frame++){ (*it)->write(*fgbuffer); fgbuffer++; //next frame } flows++; if(flows == collectedFlows) break; } } size_t HistoryCollection::extractSamplesOneRegionAllFlows(BkgModelWorkInfo* region_bkinfo, const int flowBlockSize, const size_t regId) { WorkSet myJob(region_bkinfo); int startFlow = flowBlockSize - collectedFlows; int numFrames = myJob.getNumFrames(); int numLiveBeads = myJob.getNumBeads(); FG_BUFFER_TYPE * localFgBuffer = myJob.getFgBuffer() + numFrames * startFlow; // move to first frame of first bead of first history flow //ToDo: remove after debugging for(int i = 0; i < numFlows; i++){ realFlow[i] = startFlow + i; nucIds[i] = myJob.getNucIdForFlow(realFlow[i]); } cout << "extractSamplesOneRegionAllFlows: " << regId << " numFrames: " << numFrames << " numLiveBeads: " << numLiveBeads << endl; size_t numSamples = 0; for(int n=0; n<numLiveBeads; n++){ // only if sampled and HQ add bead to sample set: if(region_bkinfo->bkgObj->region_data->my_beads.sampled[n]){ if(region_bkinfo->bkgObj->region_data->my_beads.high_quality[n]){ //set write pointer for all flow buffers to the current region/bead assert(numSamples < NUM_SAMPLES_RF && "GPU Flow by FLow Pipeline, Region Sample limit exceeded!"); for (vector<LayoutCubeWithRegions<short>*>::iterator it = HostSampleCompressedTraces.begin(); it != HostSampleCompressedTraces.end(); ++it) (*it)->setRWPtrRegion(regId,numSamples); writeSampleBeadToFlowBuffers(localFgBuffer,numFrames); numSamples++; } } localFgBuffer += numFrames * flowBlockSize; //next bead } return numSamples; } void HistoryCollection::extractEmptyTracesOneRegionAllFlows(BkgModelWorkInfo* region_bkinfo, const int flowBlockSize, const size_t regId) { WorkSet myJob(region_bkinfo); int startFlow = flowBlockSize - collectedFlows; int numFrames = myJob.getNumFrames(); float * perFlowEmptyTraces = myJob.getShiftedBackground() + numFrames * startFlow; int flow = 0; for (std::vector<LayoutCubeWithRegions<float>*>::iterator it = HostEmptyTraceAvg.begin(); it != HostEmptyTraceAvg.end(); ++it) { (*it)->setRWPtrRegion(regId); #if SAMPLECOLLECT_DEBUG > 1 cout << "DEBUG GPU EmptytraceAvg History," << regId <<"," << flow << ","; #endif for(int frame=0; frame < numFrames; frame++){ (*it)->write(*perFlowEmptyTraces); #if SAMPLECOLLECT_DEBUG > 1 cout << *perFlowEmptyTraces <<","; #endif perFlowEmptyTraces++; //next frame } #if SAMPLECOLLECT_DEBUG > 1 cout << endl; #endif flow++; if( flow == collectedFlows) break; } } void HistoryCollection::initRegionalParametersOneRegion(BkgModelWorkInfo* region_bkinfo, const size_t regId) { WorkSet myJob(region_bkinfo); reg_params * rp = &(region_bkinfo->bkgObj->region_data->my_regions.rp); PerFlowParamsRegion & ref = HostPerFlowRegP->refAtReg(regId); ref.setCopyDrift(rp->CopyDrift); ref.setDarkness(rp->darkness[0]); ref.setRatioDrift(rp->RatioDrift); ref.setSigma(*(rp->AccessSigma())); ref.setCoarseStart(region_bkinfo->bkgObj->region_data->my_regions.cache_step.i_start_coarse_step[0]); ref.setFineStart(region_bkinfo->bkgObj->region_data->my_regions.cache_step.i_start_fine_step[0]); ref.setTMidNuc(rp->AccessTMidNuc()[0]); ref.setTMidNucShift(rp->nuc_shape.t_mid_nuc_shift_per_flow[0]); ref.setTshift(rp->tshift); #if SAMPLECOLLECT_DEBUG > 1 cout << "DEBUG GPU Regional Param initialization regId " << regId << endl; ref.print(); #endif } ////////////////////////////////////// //all regions extration void HistoryCollection::extractHistoryAllRegionsAllFlows(BkgModelWorkInfo* bkinfo, int flowBlockSize, int extractNumFlows){ if(extractNumFlows <= 0) extractNumFlows=numFlows; //no extract Num Flows provided extract maximum assert(extractNumFlows <= flowBlockSize);//we cannot extract more than number of flows in block assert(extractNumFlows <= numFlows);//we cannot extract more flows as we have buffers created writeBuffer = wrapAroundIdx(extractNumFlows); //next buffer to write to (or oldest buffer) in history collectedFlows = extractNumFlows; //number of flows in history collected (only needed if we want to collect a history of > flowBlockSize (20)) getHostPerFlowRegParams().memSet(0); #if SAMPLECOLLECT_DEBUG > 0 LayoutCubeWithRegions<size_t> NumSampleMap(ImgP.getGridParam(),1,HostMem); ImgP.print(); #endif for(size_t r=0; r < ImgP.getNumRegions(); r++){ BkgModelWorkInfo* region_bkinfo = (BkgModelWorkInfo*) &bkinfo[r]; WorkSet myJob(region_bkinfo); if (!myJob.getNumBeads()) continue; size_t regId = ImgP.getRegId(myJob.getRegCol(),myJob.getRegRow()); #if SAMPLECOLLECT_DEBUG > 0 NumSampleMap[regId] = extractSamplesOneRegionAllFlows(region_bkinfo,flowBlockSize,regId); #else extractSamplesOneRegionAllFlows(region_bkinfo,flowBlockSize,regId); #endif extractEmptyTracesOneRegionAllFlows(region_bkinfo,flowBlockSize,regId); initRegionalParametersOneRegion(region_bkinfo,regId); } int flow = bkinfo->flow; latestSampleFlowOnHost = flow; latestEmptyFlowOnHost = flow; latestRegParamOnHost = flow; #if SAMPLECOLLECT_DEBUG > 0 cout << "DEBUG: GPU Pipeline, collected samples for: " << collectedFlows << " flows for Regional Fitting" <<endl; cout << "DEBUG: GPU Pipeline, num Samples per region Host side:" <<endl; NumSampleMap.printRegionTable<size_t>(); #endif } // END HOST SIDE SAMPLE COLLECTIN FIRST BLOCK OF FLOWS //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // INITIAL DEVICE INIT: void HistoryCollection::InitDeviceBuffersAndSymbol( const ConstantFrameParams & cFrmP) { //allocate and cop host buffers to device: DevicePerFlowRegP = new LayoutCubeWithRegions<PerFlowParamsRegion>(ImgP.getGridParam(),1,DeviceGlobal); DevicePerFlowRegP->copy(getHostPerFlowRegParams()); int hId=0; for (vector<LayoutCubeWithRegions<short>*>::iterator it = HostSampleCompressedTraces.begin(); it != HostSampleCompressedTraces.end(); ++it) { DeviceSampleCompressedTraces.push_back(new LayoutCubeWithRegions<short>( *(*it) ,DeviceGlobal)); (*it)->setRWStrideZ(); for(size_t regId =0 ; regId < ImgP.getNumRegions(); regId++) { for(int sId = 0; sId < 20; sId++) cout << "Sample:"<< hId <<":"<< regId << ":" << sId <<","<< (*it)->getCSVatReg<short>(regId,sId,0,0,(*it)->getDimZ()) << endl; } hId++; } hId=0; for (vector<LayoutCubeWithRegions<float>*>::iterator it = HostEmptyTraceAvg.begin(); it != HostEmptyTraceAvg.end(); ++it) { DeviceEmptyTraceAvg.push_back(new LayoutCubeWithRegions<float>( *(*it) ,DeviceGlobal)); (*it)->setRWStrideX(); //for(size_t regId =0 ; regId < ImgP.getNumRegions(); regId++) //{ // cout << "emptyAvg:"<< hId << ":" << regId << ","<< (*it)->getCSVatReg<float>(regId,0,0,0,(*it)->getRegW(regId)) << endl; //} hId++; } assert(DeviceSampleCompressedTraces.size() == DeviceEmptyTraceAvg.size()); while( DeviceSampleCompressedTraces.size() < numFlows ){ DeviceSampleCompressedTraces.push_back(new LayoutCubeWithRegions<short>(ImgP.getGridParam(NUM_SAMPLES_RF), cFrmP.getMaxCompFrames(), DeviceGlobal)); DeviceSampleCompressedTraces.back()->memSet(0); DeviceEmptyTraceAvg.push_back(new LayoutCubeWithRegions<float>(ImgP.getGridParam(cFrmP.getUncompFrames()), 1, DeviceGlobal)); DeviceEmptyTraceAvg.back()->memSet(0); } int i=0; for (vector<LayoutCubeWithRegions<short>*>::iterator it = DeviceSampleCompressedTraces.begin(); it != DeviceSampleCompressedTraces.end(); ++it) ppSampleCompressedTracesBuffers[i++] = (*it)->getPtr(); //store device pointer in aray for constant memory symbol i=0; for (vector<LayoutCubeWithRegions<float>*>::iterator it = DeviceEmptyTraceAvg.begin(); it != DeviceEmptyTraceAvg.end(); ++it) ppEmptyTraceAvg[i++] = (*it)->getPtr(); //store device pointer in aray for constant memory symbol //update symbol on device copySymbolsToDevice((*this)); cout << "CUDA: HistoryCollection: InitDeviceBuffersAndSymbol: created " << DeviceSampleCompressedTraces.size() << " Device History Buffers (" << getSize()/(1024.0*1024.0) << "MB), and initialized Device control symbol" <<endl; print(); } bool HistoryCollection::deviceBuffersInitialized() { return (DevicePerFlowRegP != NULL && DeviceSampleCompressedTraces.size() > 0 && DeviceEmptyTraceAvg.size()>0); } //////////////////////////////////////////////////////////////////////// //has to be called right after all history elements are updated. int HistoryCollection::UpdateHistoryCollection(const PerFlowParamsGlobal & Fp) { if(collectedFlows < numFlows) collectedFlows++; //todo: remove after debuging realFlow[writeBuffer] = Fp.getRealFnum(); nucIds[writeBuffer] = Fp.getNucId(); writeBuffer = wrapAroundIdx(writeBuffer + 1); //update symbol on device copySymbolsToDevice((*this)); //print(); return collectedFlows; } void HistoryCollection::RezeroWriteBuffer() { DeviceSampleCompressedTraces[writeBuffer]->memSet(0); DeviceEmptyTraceAvg[writeBuffer]->memSet(0); } LayoutCubeWithRegions<short> * HistoryCollection::getLatestSampleTraces(int currentFlow) { int idx = writeBuffer-1; idx = (idx < 0)?(collectedFlows+idx):(idx); if(currentFlow == 0 || currentFlow != latestSampleFlowOnHost){ if(DeviceSampleCompressedTraces[idx] != NULL){ (*HostSampleCompressedTraces[idx]).copy((*DeviceSampleCompressedTraces[idx])); latestSampleFlowOnHost = currentFlow; } } return (HostSampleCompressedTraces[idx]); } LayoutCubeWithRegions<float> * HistoryCollection::getLatestEmptyTraceAvgs(int currentFlow) { int idx = writeBuffer-1; idx = (idx < 0)?(collectedFlows+idx):(idx); if(currentFlow == 0 || currentFlow != latestEmptyFlowOnHost){ if(DeviceEmptyTraceAvg[idx] != NULL){ (*HostEmptyTraceAvg[idx]).copy((*DeviceEmptyTraceAvg[idx])); latestEmptyFlowOnHost = 0; } } return (HostEmptyTraceAvg[idx]); } LayoutCubeWithRegions<PerFlowParamsRegion> * HistoryCollection::getCurrentRegParams(int currentFlow) { int idx = writeBuffer-1; idx = (idx < 0)?(collectedFlows+idx):(idx); if(currentFlow == 0 || currentFlow != latestRegParamOnHost){ if(DevicePerFlowRegP != NULL){ HostPerFlowRegP->copy(*DevicePerFlowRegP); latestRegParamOnHost=currentFlow; } } return HostPerFlowRegP; } void HistoryCollection::CopySerializationDataFromDeviceToHost() { if(deviceBuffersInitialized()){ cout << "CUDA: HistoryCollection: Copying history, regional param and polyclonal buffers from device to Host for serialization" << endl; vector<LayoutCubeWithRegions<short>*>::iterator hTraceit = HostSampleCompressedTraces.begin(); for (vector<LayoutCubeWithRegions<short>*>::iterator dTraceit = DeviceSampleCompressedTraces.begin(); dTraceit != DeviceSampleCompressedTraces.end(); ++dTraceit, ++hTraceit){ if (*dTraceit != NULL) (*hTraceit)->copy(*(*dTraceit)); } vector<LayoutCubeWithRegions<float>*>::iterator hEmptyit = HostEmptyTraceAvg.begin(); for (vector<LayoutCubeWithRegions<float>*>::iterator dEmptyit = DeviceEmptyTraceAvg.begin(); dEmptyit != DeviceEmptyTraceAvg.end(); ++dEmptyit, ++hEmptyit){ if (*dEmptyit != NULL) (*hEmptyit)->copy(*(*dEmptyit)); } if(DevicePerFlowRegP != NULL) getHostPerFlowRegParams().copy(getDevPerFlowRegParams()); } } /* void HistoryCollection::CopyHistoryToDeviceDeviceAfterRestart() { vector<LayoutCubeWithRegions<short>*>::iterator hTraceit = HostSampleCompressedTraces.begin(); for (vector<LayoutCubeWithRegions<short>*>::iterator dTraceit = DeviceSampleCompressedTraces.begin(); dTraceit != DeviceSampleCompressedTraces.end(); ++dTraceit, ++hTraceit){ if (*dTraceit != NULL) (*dTraceit)->copy(*(*hTraceit)); } vector<LayoutCubeWithRegions<float>*>::iterator hEmptyit = HostEmptyTraceAvg.begin(); for (vector<LayoutCubeWithRegions<float>*>::iterator dEmptyit = DeviceEmptyTraceAvg.begin(); dEmptyit != DeviceEmptyTraceAvg.end(); ++dEmptyit, ++hEmptyit){ if (*dEmptyit != NULL) (*dEmptyit)->copy(*(*hEmptyit)); } getDevPerFlowRegParams().copy(getHostPerFlowRegParams()); } */
the_stack
#include "index_host_copy.cuh" #include <unordered_set> #include <claraparabricks/genomeworks/cudamapper/index.hpp> #include <claraparabricks/genomeworks/io/fasta_parser.hpp> namespace claraparabricks { namespace genomeworks { namespace cudamapper { DeviceIndexCache::DeviceIndexCache(const CacheType cache_type, HostIndexCache* host_cache) : cache_type_(cache_type) , host_cache_(host_cache) , is_ready_(false) { host_cache_->register_device_cache(cache_type_, this); } DeviceIndexCache::~DeviceIndexCache() { host_cache_->deregister_device_cache(cache_type_, this); } void DeviceIndexCache::add_index(const IndexDescriptor index_descriptor, std::shared_ptr<Index> device_index) { cache_[index_descriptor] = device_index; } std::shared_ptr<Index> DeviceIndexCache::get_index(const IndexDescriptor index_descriptor) const { if (!is_ready()) { throw DeviceCacheNotReadyException(cache_type_, index_descriptor); } return get_index_no_check_if_ready(index_descriptor); } std::shared_ptr<Index> DeviceIndexCache::get_index_no_check_if_ready(IndexDescriptor index_descriptor) const { const auto index_iter = cache_.find(index_descriptor); if (index_iter == cache_.end()) { throw IndexNotFoundException(cache_type_, IndexNotFoundException::IndexLocation::device_cache, index_descriptor); } return index_iter->second; } bool DeviceIndexCache::has_index(const IndexDescriptor index_descriptor) const { // TODO: use optional instead return 0 != cache_.count(index_descriptor); } void DeviceIndexCache::wait_for_data_to_be_ready() { if (!is_ready()) { for (const auto& index_it : cache_) { index_it.second->wait_to_be_ready(); } is_ready_ = true; } } bool DeviceIndexCache::is_ready() const { return is_ready_; } HostIndexCache::HostIndexCache(const bool same_query_and_target, genomeworks::DefaultDeviceAllocator allocator, std::shared_ptr<genomeworks::io::FastaParser> query_parser, std::shared_ptr<genomeworks::io::FastaParser> target_parser, const std::uint64_t kmer_size, const std::uint64_t window_size, const bool hash_representations, const double filtering_parameter, cudaStream_t cuda_stream_generation, cudaStream_t cuda_stream_copy) : same_query_and_target_(same_query_and_target) , allocator_(allocator) , query_parser_(query_parser) , target_parser_(target_parser) , kmer_size_(kmer_size) , window_size_(window_size) , hash_representations_(hash_representations) , filtering_parameter_(filtering_parameter) , cuda_stream_generation_(cuda_stream_generation) , cuda_stream_copy_(cuda_stream_copy) { } void HostIndexCache::generate_content(const CacheType cache_type, const std::vector<IndexDescriptor>& descriptors_of_indices_to_cache, const std::vector<IndexDescriptor>& descriptors_of_indices_to_keep_on_device, const bool skip_copy_to_host) { // skip_copy_to_host only makes sense if descriptors_of_indices_to_cache and descriptors_of_indices_to_keep_on_device are the same // otherwise some indices would be created and not saved on either host or device assert(!skip_copy_to_host || (descriptors_of_indices_to_cache == descriptors_of_indices_to_keep_on_device)); assert(!descriptors_of_indices_to_cache.empty()); host_cache_t& this_cache = (CacheType::query_cache == cache_type) ? query_host_cache_ : target_host_cache_; const host_cache_t& other_cache = (CacheType::query_cache == cache_type) ? target_host_cache_ : query_host_cache_; device_cache_t& indices_kept_on_device = (CacheType::query_cache == cache_type) ? query_indices_kept_on_device_ : target_indices_kept_on_device_; const genomeworks::io::FastaParser* parser = (CacheType::query_cache == cache_type) ? query_parser_.get() : target_parser_.get(); // convert descriptors_of_indices_to_keep_on_device into set for faster search std::unordered_set<IndexDescriptor, IndexDescriptorHash> descriptors_of_indices_to_keep_on_device_set(begin(descriptors_of_indices_to_keep_on_device), end(descriptors_of_indices_to_keep_on_device)); host_cache_t new_cache; indices_kept_on_device.clear(); // normally this should be empty anyway // In most cases index is generated on device and then moved to host. These two operations can be overlapped, i.e. while one index is being copied // to host the next index can be generated. // Index cache is expected to be larger than the available device memory, meaning it is not possible to keep all indices on device while they are // being copied to host. In this implementation only two copies of index are kept on device: the one currently being generated and the one currently // being copied to host (from the previous step). std::shared_ptr<const IndexHostCopyBase> index_on_host = nullptr; std::shared_ptr<Index> index_on_device = nullptr; // Only one pair of (host, device) indices can be copied at a time. Whenever a copy should be started if these pointers are not null (= copy is in progress) // wait for that current copy to be done first std::shared_ptr<const IndexHostCopyBase> index_on_host_copy_in_flight = nullptr; std::shared_ptr<Index> index_on_device_copy_in_flight = nullptr; const bool host_copy_needed = !skip_copy_to_host; for (const IndexDescriptor& descriptor_of_index_to_cache : descriptors_of_indices_to_cache) { index_on_host = nullptr; index_on_device = nullptr; const bool device_copy_needed = descriptors_of_indices_to_keep_on_device_set.count(descriptor_of_index_to_cache) != 0; // check if host copy already exists // check if index is already in this cache auto index_in_this_cache = this_cache.find(descriptor_of_index_to_cache); if (index_in_this_cache != this_cache.end()) { // index already cached index_on_host = index_in_this_cache->second; } // if index not found in this cache and query and target input files are the same check the other cache as well if (!index_on_host && same_query_and_target_) { auto index_in_other_cache = other_cache.find(descriptor_of_index_to_cache); if (index_in_other_cache != other_cache.end()) { index_on_host = index_in_other_cache->second; } } if (!index_on_host) { // create index index_on_device = Index::create_index_async(allocator_, *parser, descriptor_of_index_to_cache, kmer_size_, window_size_, hash_representations_, filtering_parameter_, cuda_stream_generation_, cuda_stream_copy_); index_on_device->wait_to_be_ready(); if (host_copy_needed) { // if a copy is already in progress wait for it to finish if (index_on_host_copy_in_flight) { assert(index_on_host_copy_in_flight && index_on_device_copy_in_flight); index_on_host_copy_in_flight->finish_copying(); } index_on_host = IndexHostCopy::create_host_copy_async(*index_on_device, descriptor_of_index_to_cache.first_read(), kmer_size_, window_size_, cuda_stream_copy_); index_on_host_copy_in_flight = index_on_host; index_on_device_copy_in_flight = index_on_device; } } if (index_on_host) { new_cache[descriptor_of_index_to_cache] = index_on_host; } // Device copy of index is only saved if is already exists, i.e. if the index has been generated // If the index has been found on host it won't be copied back to device at this point // TODO: check whether this index is already present in device cache, this is not expected to happen frequently so performance gains are going to be small if (device_copy_needed && index_on_device) { indices_kept_on_device[descriptor_of_index_to_cache] = index_on_device; } } // wait for the last copy to finish if (index_on_host_copy_in_flight) { assert(index_on_host_copy_in_flight && index_on_device_copy_in_flight); index_on_host_copy_in_flight->finish_copying(); } std::swap(new_cache, this_cache); } std::shared_ptr<DeviceIndexCache> HostIndexCache::start_copying_indices_to_device(const CacheType cache_type, const std::vector<IndexDescriptor>& descriptors_of_indices_to_cache) { const host_cache_t& host_cache = (CacheType::query_cache == cache_type) ? query_host_cache_ : target_host_cache_; const std::vector<DeviceIndexCache*>& this_device_caches = (CacheType::query_cache == cache_type) ? query_device_caches_ : target_device_caches_; const std::vector<DeviceIndexCache*>& other_device_caches = (CacheType::query_cache == cache_type) ? target_device_caches_ : query_device_caches_; device_cache_t& indices_kept_on_device = (CacheType::query_cache == cache_type) ? query_indices_kept_on_device_ : target_indices_kept_on_device_; std::shared_ptr<DeviceIndexCache> device_cache = std::make_shared<DeviceIndexCache>(cache_type, this); for (const IndexDescriptor& descriptor : descriptors_of_indices_to_cache) { std::shared_ptr<Index> device_index = nullptr; // check if index was kept on device after creation auto index_kept_on_device = indices_kept_on_device.find(descriptor); if (index_kept_on_device != indices_kept_on_device.end()) { device_index = index_kept_on_device->second; indices_kept_on_device.erase(descriptor); } // check if value is already in this cache if (!device_index) { for (const DeviceIndexCache* const existing_cache : this_device_caches) { if (existing_cache != device_cache.get() && existing_cache->has_index(descriptor)) { device_index = existing_cache->get_index_no_check_if_ready(descriptor); break; } } } // if query and target files are the same check the other index as well if (!device_index && same_query_and_target_) { for (const DeviceIndexCache* const existing_cache : other_device_caches) { if (existing_cache->has_index(descriptor)) { device_index = existing_cache->get_index_no_check_if_ready(descriptor); break; } } } // if index has not been found on device copy it from host if (!device_index) { const auto index_on_host_iter = host_cache.find(descriptor); if (index_on_host_iter == host_cache.end()) { throw IndexNotFoundException(cache_type, IndexNotFoundException::IndexLocation::host_cache, descriptor); } device_index = index_on_host_iter->second->copy_index_to_device(allocator_, cuda_stream_copy_); } assert(device_index); device_cache->add_index(descriptor, device_index); } return device_cache; } void HostIndexCache::register_device_cache(const CacheType cache_type, DeviceIndexCache* index_cache) { assert(cache_type == CacheType::query_cache || cache_type == CacheType::target_cache); std::vector<DeviceIndexCache*>& device_caches = cache_type == CacheType::query_cache ? query_device_caches_ : target_device_caches_; device_caches.push_back(index_cache); } void HostIndexCache::deregister_device_cache(const CacheType cache_type, DeviceIndexCache* index_cache) { assert(cache_type == CacheType::query_cache || cache_type == CacheType::target_cache); std::vector<DeviceIndexCache*>& device_caches = cache_type == CacheType::query_cache ? query_device_caches_ : target_device_caches_; auto new_end = std::remove(begin(device_caches), end(device_caches), index_cache); device_caches.erase(new_end, end(device_caches)); } IndexNotFoundException::IndexNotFoundException(const CacheType cache_type, const IndexLocation index_location, const IndexDescriptor index_descriptor) : message_(std::string(cache_type == CacheType::query_cache ? "Query " : "Target ") + "index not found in " + std::string(index_location == IndexLocation::host_cache ? "host " : "device ") + "cache. First read: " + std::to_string(index_descriptor.first_read()) + ", number of reads: " + std::to_string(index_descriptor.number_of_reads())) { } const char* IndexNotFoundException::what() const noexcept { return message_.c_str(); } DeviceCacheNotReadyException::DeviceCacheNotReadyException(const CacheType cache_type, const IndexDescriptor index_descriptor) : message_("Cache for " + std::string(cache_type == CacheType::query_cache ? "query " : "target ") + "index is not ready. First read: " + std::to_string(index_descriptor.first_read()) + ", number of reads: " + std::to_string(index_descriptor.number_of_reads())) { } const char* DeviceCacheNotReadyException::what() const noexcept { return message_.c_str(); } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
the_stack
namespace faiss { namespace gpu { // Kernel responsible for calculating distance from residual vector to // each product quantizer code centroid template < typename OutCodeT, typename CentroidT, int DimsPerSubQuantizer, bool L2Distance> __global__ void __launch_bounds__(288, 3) pqCodeDistances( Tensor<float, 2, true> queries, int queriesPerBlock, Tensor<CentroidT, 2, true> coarseCentroids, Tensor<float, 3, true> pqCentroids, Tensor<int, 2, true> coarseIndices, // (query id)(coarse)(subquantizer)(code) -> dist Tensor<OutCodeT, 4, true> outCodeDistances) { const auto numSubQuantizers = pqCentroids.getSize(0); const auto dimsPerSubQuantizer = pqCentroids.getSize(1); assert(DimsPerSubQuantizer == dimsPerSubQuantizer); const auto codesPerSubQuantizer = pqCentroids.getSize(2); bool isLoadingThread = threadIdx.x >= codesPerSubQuantizer; int loadingThreadId = threadIdx.x - codesPerSubQuantizer; extern __shared__ float smem[]; // Each thread calculates a single code float subQuantizerData[DimsPerSubQuantizer]; auto code = threadIdx.x; auto subQuantizer = blockIdx.y; // Each thread will load the pq centroid data for the code that it // is processing // The loading threads are out of bounds for the number of codes available if (!isLoadingThread) { #pragma unroll for (int i = 0; i < DimsPerSubQuantizer; ++i) { subQuantizerData[i] = pqCentroids[subQuantizer][i][code].ldg(); } } // Where we store our query vector float* smemQuery = smem; // Where we store our residual vector; this is double buffered so we // can be loading the next one while processing the current one float* smemResidual1 = &smemQuery[DimsPerSubQuantizer]; float* smemResidual2 = &smemResidual1[DimsPerSubQuantizer]; // Where we pre-load the coarse centroid IDs int* coarseIds = (int*)&smemResidual2[DimsPerSubQuantizer]; // Each thread is calculating the distance for a single code, // performing the reductions locally // Handle multiple queries per block auto startQueryId = blockIdx.x * queriesPerBlock; auto numQueries = queries.getSize(0) - startQueryId; if (numQueries > queriesPerBlock) { numQueries = queriesPerBlock; } for (int query = 0; query < numQueries; ++query) { auto queryId = startQueryId + query; auto querySubQuantizer = queries[queryId][subQuantizer * DimsPerSubQuantizer].data(); // Load current query vector for (int i = threadIdx.x; i < DimsPerSubQuantizer; i += blockDim.x) { smemQuery[i] = querySubQuantizer[i]; } // Load list of coarse centroids found for (int i = threadIdx.x; i < coarseIndices.getSize(1); i += blockDim.x) { coarseIds[i] = coarseIndices[queryId][i]; } // We need coarseIds below // FIXME: investigate loading separately, so we don't need this __syncthreads(); // Preload first buffer of residual data if (isLoadingThread) { for (int i = loadingThreadId; i < DimsPerSubQuantizer; i += blockDim.x - codesPerSubQuantizer) { auto coarseId = coarseIds[0]; // In case NaNs were in the original query data coarseId = coarseId == -1 ? 0 : coarseId; auto coarseCentroidSubQuantizer = coarseCentroids[coarseId] [subQuantizer * dimsPerSubQuantizer] .data(); if (L2Distance) { smemResidual1[i] = smemQuery[i] - ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } else { smemResidual1[i] = ConvertTo<float>::to(coarseCentroidSubQuantizer[i]); } } } // The block walks the list for a single query for (int coarse = 0; coarse < coarseIndices.getSize(1); ++coarse) { // Wait for smemResidual1 to be loaded __syncthreads(); if (isLoadingThread) { // Preload second buffer of residual data for (int i = loadingThreadId; i < DimsPerSubQuantizer; i += blockDim.x - codesPerSubQuantizer) { // FIXME: try always making this centroid id 0 so we can // terminate if (coarse != (coarseIndices.getSize(1) - 1)) { auto coarseId = coarseIds[coarse + 1]; // In case NaNs were in the original query data coarseId = coarseId == -1 ? 0 : coarseId; auto coarseCentroidSubQuantizer = coarseCentroids[coarseId] [subQuantizer * dimsPerSubQuantizer] .data(); if (L2Distance) { smemResidual2[i] = smemQuery[i] - ConvertTo<float>::to( coarseCentroidSubQuantizer[i]); } else { smemResidual2[i] = ConvertTo<float>::to( coarseCentroidSubQuantizer[i]); } } } } else { // These are the processing threads float dist = 0.0f; constexpr int kUnroll = 4; constexpr int kRemainder = DimsPerSubQuantizer % kUnroll; constexpr int kRemainderBase = DimsPerSubQuantizer - kRemainder; float vals[kUnroll]; // Calculate residual - pqCentroid for each dim that we're // processing // Unrolled loop if (L2Distance) { #pragma unroll for (int i = 0; i < DimsPerSubQuantizer / kUnroll; ++i) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] = smemResidual1[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] -= subQuantizerData[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] *= vals[j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { dist += vals[j]; } } } else { // Inner product: query slice against the reconstructed // sub-quantizer for this coarse cell (query o (centroid + // subQCentroid)) #pragma unroll for (int i = 0; i < DimsPerSubQuantizer / kUnroll; ++i) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] = smemResidual1[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] += subQuantizerData[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { vals[j] *= smemQuery[i * kUnroll + j]; } #pragma unroll for (int j = 0; j < kUnroll; ++j) { dist += vals[j]; } } } // Remainder loop if (L2Distance) { #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] = smemResidual1[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] -= subQuantizerData[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] *= vals[j]; } } else { // Inner product // Inner product: query slice against the reconstructed // sub-quantizer for this coarse cell (query o (centroid + // subQCentroid)) #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] = smemResidual1[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] += subQuantizerData[kRemainderBase + j]; } #pragma unroll for (int j = 0; j < kRemainder; ++j) { vals[j] *= smemQuery[kRemainderBase + j]; } } #pragma unroll for (int j = 0; j < kRemainder; ++j) { dist += vals[j]; } // We have the distance for our code; write it out outCodeDistances[queryId][coarse][subQuantizer][code] = ConvertTo<OutCodeT>::to(dist); } // !isLoadingThread // Swap residual buffers float* tmp = smemResidual1; smemResidual1 = smemResidual2; smemResidual2 = tmp; } } } template <typename CentroidT, bool L2Residual> __global__ void pqResidualVector( Tensor<float, 2, true> queries, Tensor<CentroidT, 2, true> coarseCentroids, Tensor<int, 2, true> coarseIndices, int numSubDim, // output is transposed: // (sub q)(query id)(centroid id)(sub dim) Tensor<float, 4, true> residual) { auto queryId = blockIdx.x; auto centroidId = blockIdx.y; int realCentroidId = coarseIndices[queryId][centroidId]; for (int dim = threadIdx.x; dim < queries.getSize(1); dim += blockDim.x) { float q = queries[queryId][dim]; float c = ConvertTo<float>::to(coarseCentroids[realCentroidId][dim]); float r; if (L2Residual) { r = q - c; } else { // IP does not use a residual. Instead, the estimated distance is // (query . (centroid + sub quantizer centroid). // // This kernel is used to calculate (query . sub quantizer // centroid), providing the query value replicated across all of the // sub quantizers. The batch matrix multiplication in // runPQCodeDistancesMM will perform this inner product. The // adjustment (query . centroid) is added later. r = q; } residual[dim / numSubDim][queryId][centroidId][dim % numSubDim] = r; } } template <typename CentroidT> void runPQResidualVector( Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<int, 2, true>& coarseIndices, Tensor<float, 4, true>& residual, bool l2Residual, cudaStream_t stream) { auto grid = dim3(coarseIndices.getSize(0), coarseIndices.getSize(1)); auto block = dim3(std::min(queries.getSize(1), getMaxThreadsCurrentDevice())); if (l2Residual) { pqResidualVector<CentroidT, true><<<grid, block, 0, stream>>>( queries, coarseCentroids, coarseIndices, pqCentroids.getSize(1), residual); } else { pqResidualVector<CentroidT, false><<<grid, block, 0, stream>>>( queries, coarseCentroids, coarseIndices, pqCentroids.getSize(1), residual); } CUDA_TEST_ERROR(); } template <typename T> __global__ void pqDistanceIPCorrection( Tensor<T, 3, true> codeDistances, Tensor<T, 2, true> coarseDistances, int numSubQ) { int centroid = blockIdx.x; int query = blockIdx.y; // We need to add the (query . centroid) correction factor (coarseDistances) // to all output code distances (q)(c)(sub q)(code). // However, there are numSubQ code distance sums per each approximated // distance, so we need to divide this correction by numSubQ since we will // be adding it numSubQ times. auto d = coarseDistances[query][centroid] / (float)numSubQ; auto base = codeDistances[query][centroid].data(); for (int i = threadIdx.x; i < codeDistances.getSize(2); i += blockDim.x) { base[i] += d; } } // We have previously calculated (query . sub quantizer centroid), but we // need to calculate (query . (centroid + sub quantizer centroid). This will add // in the correction factor to each calculated code distance. template <typename T> void runPQDistanceIPCorrection( Tensor<T, 4, true>& codeDistances, Tensor<T, 2, true>& coarseDistances, cudaStream_t stream) { auto grid = dim3(coarseDistances.getSize(1), coarseDistances.getSize(0)); auto block = 512; auto codeView = codeDistances.template downcastInner<3>(); pqDistanceIPCorrection<<<grid, block, 0, stream>>>( codeView, coarseDistances, codeDistances.getSize(2)); } // This is a general purpose implementation that leverages GEMM to calculate // code distances for PQ codes for any number of dimensions per sub-quantizer / // number of sub-quantizers template <typename CentroidT> void runPQCodeDistancesMM( GpuResources* res, Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<float, 2, true>& coarseDistances, Tensor<int, 2, true>& coarseIndices, // Output is (query)(centroid)(sub q)(code) NoTypeTensor<4, true>& outCodeDistances, bool l2Distance, bool useFloat16Lookup, cudaStream_t stream) { // We construct our float32 output in outCodeDistancesF Tensor<float, 4, true> outCodeDistancesF; DeviceTensor<float, 4, true> outCodeDistancesFloatMem; if (useFloat16Lookup) { // outCodeDistances has half memory, we need to allocate a buffer for // float outCodeDistancesFloatMem = DeviceTensor<float, 4, true>( res, makeTempAlloc(AllocType::Other, stream), {outCodeDistances.getSize(0), outCodeDistances.getSize(1), outCodeDistances.getSize(2), outCodeDistances.getSize(3)}); outCodeDistancesF = outCodeDistancesFloatMem; } else { // We can use the memory that we were given outCodeDistancesF = outCodeDistances.toTensor<float>(); } // Calculate (q - c) residual vector if L2. Otherwise, for IP, this kernel // will just replicate q // // (sub q)(query id)(centroid id)(sub dim) DeviceTensor<float, 4, true> residual( res, makeTempAlloc(AllocType::Other, stream), {pqCentroids.getSize(0), coarseIndices.getSize(0), coarseIndices.getSize(1), pqCentroids.getSize(1)}); runPQResidualVector( pqCentroids, queries, coarseCentroids, coarseIndices, residual, l2Distance, stream); // Perform a batch MM: // (sub q) x {(q * c)(sub dim) x (sub dim)(code)} => // (sub q) x {(q * c)(code)} auto residualView3 = residual.view<3>( {pqCentroids.getSize(0), coarseIndices.getSize(0) * coarseIndices.getSize(1), pqCentroids.getSize(1)}); DeviceTensor<float, 3, true> residualDistance( res, makeTempAlloc(AllocType::Other, stream), {pqCentroids.getSize(0), coarseIndices.getSize(0) * coarseIndices.getSize(1), pqCentroids.getSize(2)}); runBatchMatrixMult( residualDistance, false, residualView3, false, pqCentroids, false, l2Distance ? -2.0f : 1.0f, 0.0f, res->getBlasHandleCurrentDevice(), stream); if (l2Distance) { // Calculate ||q - c||^2 DeviceTensor<float, 1, true> residualNorms( res, makeTempAlloc(AllocType::Other, stream), {pqCentroids.getSize(0) * coarseIndices.getSize(0) * coarseIndices.getSize(1)}); auto residualView2 = residual.view<2>( {pqCentroids.getSize(0) * coarseIndices.getSize(0) * coarseIndices.getSize(1), pqCentroids.getSize(1)}); runL2Norm(residualView2, true, residualNorms, true, stream); // Sum ||q - c||^2 along rows auto residualDistanceView2 = residualDistance.view<2>( {pqCentroids.getSize(0) * coarseIndices.getSize(0) * coarseIndices.getSize(1), pqCentroids.getSize(2)}); runSumAlongRows(residualNorms, residualDistanceView2, false, stream); } // Transpose (sub q)(q * c)(code) to (q * c)(sub q)(code) (which // is where we build our output distances). L2 version of this has an added // -2 multiplicative factor auto outCodeDistancesView = outCodeDistancesF.view<3>( {coarseIndices.getSize(0) * coarseIndices.getSize(1), outCodeDistances.getSize(2), outCodeDistances.getSize(3)}); runTransposeAny(residualDistance, 0, 1, outCodeDistancesView, stream); if (l2Distance) { // Calculate code norms per each sub-dim // (sub q)(sub dim)(code) is pqCentroids // transpose to (sub q)(code)(sub dim) DeviceTensor<float, 3, true> pqCentroidsTranspose( res, makeTempAlloc(AllocType::Other, stream), {pqCentroids.getSize(0), pqCentroids.getSize(2), pqCentroids.getSize(1)}); runTransposeAny(pqCentroids, 1, 2, pqCentroidsTranspose, stream); auto pqCentroidsTransposeView = pqCentroidsTranspose.view<2>( {pqCentroids.getSize(0) * pqCentroids.getSize(2), pqCentroids.getSize(1)}); // The norm of each (sub q)(code) DeviceTensor<float, 1, true> pqCentroidsNorm( res, makeTempAlloc(AllocType::Other, stream), {pqCentroids.getSize(0) * pqCentroids.getSize(2)}); runL2Norm( pqCentroidsTransposeView, true, pqCentroidsNorm, true, stream); // View output as (q * c)(sub q * code), and add centroid norm to // each row auto outDistancesCodeViewCols = outCodeDistancesView.view<2>( {coarseIndices.getSize(0) * coarseIndices.getSize(1), outCodeDistances.getSize(2) * outCodeDistances.getSize(3)}); runSumAlongColumns(pqCentroidsNorm, outDistancesCodeViewCols, stream); } else { // We have previously calculated (query . sub quantizer centroid), but // we need to calculate (query . (centroid + sub quantizer centroid). // // We need to add the (query . centroid) correction factor // (coarseDistances) to all output code distances (q)(c)(sub q)(code). runPQDistanceIPCorrection(outCodeDistancesF, coarseDistances, stream); } HostTensor<float, 4, true> debugT(outCodeDistancesF, stream); if (useFloat16Lookup) { // Need to convert back to half in the output memory auto outCodeDistancesH = outCodeDistances.toTensor<half>(); convertTensor<float, half, 4>( stream, outCodeDistancesF, outCodeDistancesH); } } // Must be kept in sync with runPQDistances inline bool isSpecializedPQCodeDistanceDims(int dims) { switch (dims) { case 1: case 2: case 3: case 4: case 5: case 6: case 8: case 10: case 12: case 16: case 20: case 24: case 28: case 32: return true; default: return false; } } template <typename CentroidT> void runPQCodeDistances( GpuResources* res, Tensor<float, 3, true>& pqCentroids, Tensor<float, 2, true>& queries, Tensor<CentroidT, 2, true>& coarseCentroids, Tensor<float, 2, true>& coarseDistances, Tensor<int, 2, true>& coarseIndices, NoTypeTensor<4, true>& outCodeDistances, bool useMMImplementation, bool l2Distance, bool useFloat16Lookup, cudaStream_t stream) { const auto numSubQuantizers = pqCentroids.getSize(0); const auto dimsPerSubQuantizer = pqCentroids.getSize(1); const auto codesPerSubQuantizer = pqCentroids.getSize(2); // Only a certain number of dimensions per sub quantizer are supported by // the specialized implementation. Every other value falls back to the // generalized MM implementation. if (!isSpecializedPQCodeDistanceDims(dimsPerSubQuantizer) || useMMImplementation) { // Use the general purpose matrix multiplication implementation which // handles any number of sub-quantizers and dimensions per sub-quantizer runPQCodeDistancesMM<CentroidT>( res, pqCentroids, queries, coarseCentroids, coarseDistances, coarseIndices, outCodeDistances, l2Distance, useFloat16Lookup, stream); return; } // FIXME: tune // Reuse of pq centroid data is based on both # of queries * nprobe, // and we should really be tiling in both dimensions constexpr int kQueriesPerBlock = 8; auto grid = dim3(utils::divUp(queries.getSize(0), kQueriesPerBlock), numSubQuantizers); // Reserve one block of threads for double buffering // FIXME: probably impractical for large # of dims? auto loadingThreads = utils::roundUp(dimsPerSubQuantizer, kWarpSize); auto block = dim3(codesPerSubQuantizer + loadingThreads); auto smem = (3 * dimsPerSubQuantizer) * sizeof(float) + coarseIndices.getSize(1) * sizeof(int); #define RUN_CODE(DIMS, L2) \ do { \ if (useFloat16Lookup) { \ auto outCodeDistancesT = outCodeDistances.toTensor<half>(); \ \ pqCodeDistances<half, CentroidT, DIMS, L2> \ <<<grid, block, smem, stream>>>( \ queries, \ kQueriesPerBlock, \ coarseCentroids, \ pqCentroids, \ coarseIndices, \ outCodeDistancesT); \ } else { \ auto outCodeDistancesT = outCodeDistances.toTensor<float>(); \ \ pqCodeDistances<float, CentroidT, DIMS, L2> \ <<<grid, block, smem, stream>>>( \ queries, \ kQueriesPerBlock, \ coarseCentroids, \ pqCentroids, \ coarseIndices, \ outCodeDistancesT); \ } \ } while (0) #define CODE_L2(DIMS) \ do { \ if (l2Distance) { \ RUN_CODE(DIMS, true); \ } else { \ RUN_CODE(DIMS, false); \ } \ } while (0) switch (dimsPerSubQuantizer) { case 1: CODE_L2(1); break; case 2: CODE_L2(2); break; case 3: CODE_L2(3); break; case 4: CODE_L2(4); break; case 5: CODE_L2(5); break; case 6: CODE_L2(6); break; case 8: CODE_L2(8); break; case 10: CODE_L2(10); break; case 12: CODE_L2(12); break; case 16: CODE_L2(16); break; case 20: CODE_L2(20); break; case 24: CODE_L2(24); break; case 28: CODE_L2(28); break; case 32: CODE_L2(32); break; default: // This should not be reached, we should fall back to the MM // implementation FAISS_ASSERT(false); break; } #undef RUN_CODE #undef CODE_L2 CUDA_TEST_ERROR(); } } // namespace gpu } // namespace faiss
the_stack
using namespace megdnn; using namespace cuda; using namespace relayout_format; using namespace internal; namespace { template < bool with_pad, int pack_w, int pack_c, bool same_scale, bool all_pad, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType> struct RelayoutKern { using InnerDtype = typename DTypeRWHelper< typename DTypeTrait<DnnSrcType>::ctype, pack_w>::InnerDtype; using DstDtype = typename DTypeRWHelper< typename DTypeTrait<DnnSrcType>::ctype, pack_w>::DstDtype; static inline __device__ void write( DstDtype* dst_ptr, DstDtype (&dst_width)[pack_w]) { DstDtype* dst_inner_ptr = (DstDtype*)dst_ptr; #pragma unroll for (int iw_idx = 0; iw_idx < pack_w; ++iw_idx) { write_helper(dst_inner_ptr + iw_idx, dst_width[iw_idx]); } } static inline __device__ void read( const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = *(InnerDtype*)(src_ptr + ic_idx * ic_stride); } } static inline __device__ void read_with_pad( const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride, const int remain_ic, const InnerDtype zero_point) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = ic_idx < remain_ic ? *(InnerDtype*)(src_ptr + ic_idx * ic_stride) : zero_point; } } static inline __device__ void fake_read( const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride, const int remain_ic, const InnerDtype zero_point) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = zero_point; } } static inline __device__ void core_relayout_kern( const SrcType* src, DstType* dst, const int ic_stride, const int remain_ic, CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const uint8_t zero_point) { InnerDtype read_channel[pack_c]; if (all_pad) { const InnerDtype zero_pad = make_zero_pad<InnerDtype>(zero_point); fake_read(src, read_channel, ic_stride, remain_ic, zero_pad); } else { if (with_pad) { const InnerDtype zero_pad = make_zero_pad<InnerDtype>(zero_point); read_with_pad(src, read_channel, ic_stride, remain_ic, zero_pad); } else { read(src, read_channel, ic_stride); } } DstDtype dst_width[pack_w]; Translayout<pack_w, pack_c, SrcType, DnnSrcType, DnnDstType, same_scale>::trans( dst_width, read_channel, post_process, zero_point); write(reinterpret_cast<DstDtype*>(dst), dst_width); } }; template < int pack_w, int pack_c, bool same_scale, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType, int size_nbits = 8> __global__ void kern_nchw_nchwx( const SrcType* src, DstType* dst, int in_n, int ic, int ihw, int n_stride_src, int ic_stride, int n_stride_dst, int oc_stride, CudaPostProcess<DnnSrcType, DnnDstType, same_scale> post_process, const uint8_t zero_point, const int group, const int ocpg) { static constexpr int size_src_type = sizeof(SrcType); static constexpr int size_dst_type = sizeof(DstType); #ifndef MEGDNN_COMMA #define MEGDNN_COMMA , #endif MEGDNN_STATIC_ASSERT( std::is_same<SrcType MEGDNN_COMMA DstType>::value, "Currently this kernel only support accessing tensor " "src and dst in same data type."); n_stride_src /= size_src_type; ic_stride /= size_src_type; n_stride_dst /= size_dst_type; oc_stride /= size_dst_type; const int n_idx = blockIdx.y; const int ihw_block_idx = blockIdx.x * blockDim.x + threadIdx.x; const int ihw_offset = ihw_block_idx * pack_w; const int ihw_offset_in_type = ihw_offset * size_nbits / (8 * size_src_type); if (ihw_offset < ihw) { const int src_offset_base = n_idx * n_stride_src + ihw_offset_in_type; const int dst_offset_base = n_idx * n_stride_dst + ihw_offset_in_type * pack_c; if (n_idx < in_n) { const int icpg = ic / group; const int ic_block = icpg / pack_c; const int remain_ic = icpg % pack_c; const int src_group_stride = icpg * ic_stride; const int dst_group_stride = (ocpg / pack_c) * oc_stride; for (int g_idx = 0; g_idx < group; ++g_idx) { const int src_offset = src_offset_base + g_idx * src_group_stride; const int dst_offset = dst_offset_base + g_idx * dst_group_stride; for (int ic_blk_idx = 0; ic_blk_idx < ic_block; ++ic_blk_idx) { const int ic_offset = ic_blk_idx * pack_c * ic_stride; const int oc_offset = ic_blk_idx * oc_stride; RelayoutKern< false, pack_w, pack_c, same_scale, false, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset + ic_offset, dst + dst_offset + oc_offset, ic_stride, remain_ic, post_process, zero_point); } if (remain_ic > 0) { const int ic_offset = ic_block * pack_c * ic_stride; const int oc_offset = ic_block * oc_stride; RelayoutKern< true, pack_w, pack_c, same_scale, false, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset + ic_offset, dst + dst_offset + oc_offset, ic_stride, remain_ic, post_process, zero_point); } } } else { //! pad n const int ic_full_block = group * ocpg / pack_c; for (int ic_blk_idx = 0; ic_blk_idx < ic_full_block; ++ic_blk_idx) { RelayoutKern< false, pack_w, pack_c, same_scale, true, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset_base, dst + dst_offset_base, ic_stride, 0, post_process, zero_point); } } } } __global__ void kern_nchw4_nchw( const int8_t* src, int8_t* dst, int n, int ic, int oc, int oh, int ow, int group) { constexpr int pack_w = 1; constexpr int pack_ic = 4; const int n_idx = blockIdx.y; const int hw_block_idx = blockIdx.x * blockDim.x + threadIdx.x; const int hw_offset = hw_block_idx * pack_w; const int hw = oh * ow; const int n_stride_src = ic * hw; const int n_stride_dst = oc * hw; const int c_stride = hw; if (hw_offset < hw) { const int icpg = ic / group; const int ocpg = oc / group; const int src_group_stride = icpg * c_stride; const int dst_group_stride = ocpg * c_stride; for (int g_idx = 0; g_idx < group; ++g_idx) { const int oc_block = ocpg / pack_ic; const int remain_oc = ocpg % pack_ic; const int src_offset_base = n_idx * n_stride_src + g_idx * src_group_stride + hw_offset * pack_ic; const int dst_offset_base = n_idx * n_stride_dst + g_idx * dst_group_stride + hw_offset; for (int ic_blk_idx = 0; ic_blk_idx < oc_block; ++ic_blk_idx) { const int oc_offset = ic_blk_idx * pack_ic * c_stride; char4 temp = *(char4*)(src + src_offset_base + oc_offset); dst[dst_offset_base + oc_offset + 0 * c_stride] = temp.x; dst[dst_offset_base + oc_offset + 1 * c_stride] = temp.y; dst[dst_offset_base + oc_offset + 2 * c_stride] = temp.z; dst[dst_offset_base + oc_offset + 3 * c_stride] = temp.w; } if (remain_oc > 0) { const int oc_offset = oc_block * pack_ic * c_stride; char4 temp = *(char4*)(src + src_offset_base + oc_offset); dst[dst_offset_base + oc_offset + 0 * c_stride] = temp.x; if (remain_oc > 1) { dst[dst_offset_base + oc_offset + 1 * c_stride] = temp.y; } if (remain_oc > 2) { dst[dst_offset_base + oc_offset + 2 * c_stride] = temp.z; } } } } } __global__ void kern_nchw_nchw4_weight( const char* src, char* dst, int in_oc, int ic, int ihw, int oc_stride_src, int ic_stride, int oc_stride_dst, int group_stride_src, int group_stride_dst, const char zero_point, CudaPostProcess<dtype::QuantizedS8, dtype::QuantizedS8, true> post_process) { typedef char SrcType; typedef char DstType; typedef dtype::QuantizedS8 DnnSrcType; typedef dtype::QuantizedS8 DnnDstType; constexpr int pack_c = 4; constexpr int pack_w = 1; constexpr bool same_scale = true; const int group_idx = blockIdx.z; const int oc_idx = blockIdx.y; const int ihw_block_idx = blockIdx.x * blockDim.x + threadIdx.x; const int ihw_offset = ihw_block_idx * pack_w; if (ihw_offset < ihw) { const int ic_block = ic / pack_c; const int remain_ic = ic % pack_c; const int src_offset_base = group_idx * group_stride_src + oc_idx * oc_stride_src + ihw_offset; const int dst_offset_base = group_idx * group_stride_dst + oc_idx * oc_stride_dst + ihw_offset * pack_c; if (oc_idx < in_oc) { for (int ic_blk_idx = 0; ic_blk_idx < ic_block; ++ic_blk_idx) { const int ic_offset = ic_blk_idx * pack_c * ic_stride; RelayoutKern< false, pack_w, pack_c, same_scale, false, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset_base + ic_offset, dst + dst_offset_base + ic_offset, ic_stride, remain_ic, post_process, zero_point); } if (remain_ic > 0) { const int ic_offset = ic_block * pack_c * ic_stride; RelayoutKern< true, pack_w, pack_c, same_scale, false, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset_base + ic_offset, dst + dst_offset_base + ic_offset, ic_stride, remain_ic, post_process, zero_point); } } else { //! pad oc per group const int ic_full_block = (ic + pack_c - 1) / pack_c; for (int ic_blk_idx = 0; ic_blk_idx < ic_full_block; ++ic_blk_idx) { const int ic_offset = ic_blk_idx * pack_c * ic_stride; RelayoutKern< false, pack_w, pack_c, same_scale, true, SrcType, DstType, DnnSrcType, DnnDstType>:: core_relayout_kern( src + src_offset_base + ic_offset, dst + dst_offset_base + ic_offset, ic_stride, remain_ic, post_process, zero_point); } } } } } // namespace void relayout_format::relayout_format_cuda_nchw_nchwx( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point, int group) { auto&& stype = src.layout.dtype; auto&& dtype = dst.layout.dtype; auto& src_layout = src.layout; auto& dst_layout = dst.layout; // check pack size int pack_oc = std::numeric_limits<int>::min(); #define DEF(_pack_oc, _src_type, _dst_type) \ if (stype.enumv().ev == DTypeEnum::Ev::_src_type && \ dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \ pack_oc = _pack_oc; \ } // clang-format off DEF(64, QuantizedS4, QuantizedS4) DEF(64, Quantized4Asymm, Quantized4Asymm) DEF(4, QuantizedS8, QuantizedS8) DEF(4, Uint8, QuantizedS8) DEF(4, Quantized8Asymm, QuantizedS8) DEF(4, QuantizedS32, QuantizedS32) // clang-format on megdnn_assert( pack_oc == 4 || pack_oc == 64, "Unsupport pack size(pack_oc:%d, src:%s, dst:%s)", pack_oc, stype.name(), dtype.name()); #undef DEF // no padding if (stype.enumv().ev != DTypeEnum::Ev::QuantizedS4 && stype.enumv().ev != DTypeEnum::Ev::Quantized4Asymm) { const int in_n = src.layout[0]; const int out_n = dst.layout[0]; const int ic = src.layout[1]; const int h = src.layout[2]; const int w = src.layout[3]; const int oc = dst.layout[1] * pack_oc; const int hw = h * w; const int ocpg = oc / group; // stride in byte const int n_stride_src = src_layout.dtype.size(src_layout.stride[0]); const int ic_stride = src_layout.dtype.size(src_layout.stride[1]); const int n_stride_dst = dst_layout.dtype.size(dst_layout.stride[0]); const int oc_stride = dst_layout.dtype.size(dst_layout.stride[1]); bool same_scale = src_scale == dst_scale; #define DISPATCH_RAW( \ _same_scale, _pack_w, _pack_oc, _src_type, _dst_type, _src_c_type, \ _dst_c_type, _size_nbits) \ if (same_scale == _same_scale && hw % _pack_w == 0 && \ stype.enumv().ev == DTypeEnum::Ev::_src_type && \ dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \ auto kernel = kern_nchw_nchwx< \ _pack_w, _pack_oc, _same_scale, _src_c_type, _dst_c_type, \ dtype::_src_type, dtype::_dst_type, _size_nbits>; \ int nr_threads = query_blocksize_for_kernel(kernel); \ const dim3 block_dim(DIVUP(hw, nr_threads* _pack_w), out_n); \ const dim3 thread_dim(nr_threads); \ return kernel<<<block_dim, thread_dim, 0, stream>>>( \ (_src_c_type*)src.raw_ptr, (_dst_c_type*)dst.raw_ptr, in_n, ic, hw, \ n_stride_src, ic_stride, n_stride_dst, oc_stride, \ CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>( \ src_scale, src_zero_point, dst_scale, dst_zero_point), \ src_zero_point, group, ocpg); \ } #define DISPATCH_INT(_src_type, _dst_type) \ DISPATCH_RAW(true, 4, 4, _src_type, _dst_type, int, int, 32); \ DISPATCH_RAW(false, 4, 4, _src_type, _dst_type, int, int, 32); \ DISPATCH_RAW(true, 1, 4, _src_type, _dst_type, int, int, 32); \ DISPATCH_RAW(false, 1, 4, _src_type, _dst_type, int, int, 32); #define DISPATCH_BYTE(_src_type, _dst_type) \ DISPATCH_RAW(true, 4, 4, _src_type, _dst_type, char, char, 8); \ DISPATCH_RAW(false, 4, 4, _src_type, _dst_type, char, char, 8); \ DISPATCH_RAW(true, 1, 4, _src_type, _dst_type, char, char, 8); \ DISPATCH_RAW(false, 1, 4, _src_type, _dst_type, char, char, 8); DISPATCH_INT(QuantizedS32, QuantizedS32); DISPATCH_BYTE(Uint8, QuantizedS8); DISPATCH_BYTE(Quantized8Asymm, QuantizedS8); DISPATCH_BYTE(QuantizedS8, QuantizedS8); #undef DISPATCH_BYTE #undef DISPATCH_INT #undef DISPATCH_RAW megdnn_assert( false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).", stype.name(), dtype.name(), h, w); } else { megdnn_assert(src_layout.dtype.is_low_bit()); int n = src.layout[0]; int ic = src.layout[1]; int oc = dst.layout[1] * pack_oc; int h = src.layout[2]; // align to byte int w = src.layout[3]; int w_pad = DIVUP(w, 2) * 2; int hw = h * w_pad; int n_stride_src = src_layout.stride[0]; int ic_stride = src_layout.stride[1]; int n_stride_dst = dst_layout.stride[0]; int oc_stride = dst_layout.stride[1]; int problem_size = n * (oc / pack_oc) * hw; bool same_scale = src_scale == dst_scale; bool padding = w % 2 != 0; #define DISPATCH_RAW( \ _padding, _same_scale, _pack_w, _pack_oc, _src_type, _dst_type, _src_c_type, \ _dst_c_type, _size_nbits) \ if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \ stype.enumv().ev == DTypeEnum::Ev::_src_type && \ dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \ using InnerDtype_ = typename DTypeRWHelper< \ typename DTypeTrait<dtype::_src_type>::ctype, _pack_w>::InnerDtype; \ using SrcIterator_ = TensorIteratorOverChannel< \ InnerDtype_, 1, _pack_oc, _pack_w, _size_nbits>; \ using DstIterator_ = typename TensorIteratorPolicy< \ _padding, _dst_c_type, _pack_oc, _pack_oc, _pack_w, \ _size_nbits>::TensorIterator; \ using CudaPostProcess_ = \ CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \ using Transpose_ = Translayout< \ _pack_w, _pack_oc, _src_c_type, dtype::_src_type, dtype::_dst_type, \ _same_scale>; \ using RelayoutProblem_ = RelayoutProblem< \ SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \ n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(InnerDtype_)); \ ic_stride = ic_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \ n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(_dst_c_type)); \ oc_stride = oc_stride * _size_nbits / (8 * sizeof(_dst_c_type)); \ typename RelayoutProblem_::Param param{ \ SrcIterator_{(InnerDtype_*)src.raw_ptr, ic_stride, ic, w, w_pad}, \ DstIterator_{(_dst_c_type*)dst.raw_ptr, oc_stride, oc, w, w_pad}, \ CudaPostProcess_{ \ src_scale, src_zero_point, dst_scale, dst_zero_point}, \ n_stride_src, \ n_stride_dst, \ n, \ oc, \ hw, \ src_zero_point}; \ auto kernel = relayout_kern<RelayoutProblem_>; \ int nr_threads = query_blocksize_for_kernel(kernel); \ nr_threads = std::min(nr_threads, DIVUP(problem_size, _pack_w)); \ const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \ const dim3 thread_dim(nr_threads); \ return kernel<<<block_dim, thread_dim, 0, stream>>>(param); \ } #define DISPATCH_4BITS(_src_type, _dst_type) \ DISPATCH_RAW(true, true, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, false, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, true, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, false, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, true, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, false, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, true, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, false, 2, 64, _src_type, _dst_type, char, char, 4); DISPATCH_4BITS(QuantizedS4, QuantizedS4); DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm); #undef DISPATCH_4BITS #undef DISPATCH_RAW megdnn_assert( false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).", stype.name(), dtype.name(), h, w); } } void relayout_format::relayout_format_cuda_nchwx_nchw( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point) { auto&& stype = src.layout.dtype; auto&& dtype = dst.layout.dtype; auto& src_layout = src.layout; auto& dst_layout = dst.layout; // check pack size int pack_ic = std::numeric_limits<int>::min(); #define DEF(_pack_ic, _src_type, _dst_type) \ if (stype.enumv().ev == DTypeEnum::Ev::_src_type && \ dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \ pack_ic = _pack_ic; \ } // clang-format off DEF(64, QuantizedS4, QuantizedS4) DEF(64, Quantized4Asymm, Quantized4Asymm) // clang-format on megdnn_assert(pack_ic == 64, "Unsupport pack size(pack_ic:%d)", pack_ic); #undef DEF int n = src.layout[0]; int ic = src.layout[1] * pack_ic; int h = src.layout[2]; // align to byte int w = src.layout[3]; int w_pad = DIVUP(w, 2) * 2; int hw = h * w_pad; int n_stride_src = src_layout.stride[0]; int ic_stride = src_layout.stride[1]; int n_stride_dst = dst_layout.stride[0]; int oc_stride = dst_layout.stride[1]; int problem_size = n * (ic / pack_ic) * hw; int oc = dst.layout[1]; bool same_scale = src_scale == dst_scale; bool padding = w % 2 != 0; #define DISPATCH_RAW( \ _padding, _same_scale, _pack_w, _pack_oc, _src_type, _dst_type, _src_c_type, \ _dst_c_type, _size_nbits) \ if (padding == _padding && same_scale == _same_scale && hw % _pack_w == 0 && \ stype.enumv().ev == DTypeEnum::Ev::_src_type && \ dtype.enumv().ev == DTypeEnum::Ev::_dst_type) { \ using SrcIterator_ = typename TensorIteratorPolicy< \ _padding, _src_c_type, _pack_oc, _pack_oc, _pack_w, \ _size_nbits>::TensorIterator; \ using InnerDtype_ = typename DTypeRWHelper< \ typename DTypeTrait<dtype::_src_type>::ctype, _pack_w>::InnerDtype; \ using DstIterator_ = TensorIteratorOverChannel< \ InnerDtype_, 1, _pack_oc, _pack_w, _size_nbits>; \ using CudaPostProcess_ = \ CudaPostProcess<dtype::_src_type, dtype::_dst_type, _same_scale>; \ using Transpose_ = Translayout< \ _pack_oc, _pack_w, _src_c_type, dtype::_src_type, dtype::_dst_type, \ _same_scale>; \ using RelayoutProblem_ = RelayoutProblem< \ SrcIterator_, DstIterator_, Transpose_, CudaPostProcess_>; \ n_stride_src = n_stride_src * _size_nbits / (8 * sizeof(_src_c_type)); \ ic_stride = ic_stride * _size_nbits / (8 * sizeof(_src_c_type)); \ n_stride_dst = n_stride_dst * _size_nbits / (8 * sizeof(InnerDtype_)); \ oc_stride = oc_stride * _size_nbits / (8 * sizeof(InnerDtype_)); \ typename RelayoutProblem_::Param param{ \ SrcIterator_{(_src_c_type*)src.raw_ptr, ic_stride, ic, w, w_pad}, \ DstIterator_{(InnerDtype_*)dst.raw_ptr, oc_stride, oc, w, w_pad}, \ CudaPostProcess_{ \ src_scale, src_zero_point, dst_scale, dst_zero_point}, \ n_stride_src, \ n_stride_dst, \ n, \ ic, \ hw, \ src_zero_point}; \ auto kernel = relayout_kern<RelayoutProblem_>; \ int nr_threads = query_blocksize_for_kernel(kernel); \ nr_threads = std::min(nr_threads, DIVUP(problem_size, _pack_w)); \ const dim3 block_dim(DIVUP(problem_size, nr_threads* _pack_w)); \ const dim3 thread_dim(nr_threads); \ return kernel<<<block_dim, thread_dim, 0, stream>>>(param); \ } #define DISPATCH_4BITS(_src_type, _dst_type) \ DISPATCH_RAW(true, true, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, false, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, true, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(true, false, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, true, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, false, 8, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, true, 2, 64, _src_type, _dst_type, char, char, 4); \ DISPATCH_RAW(false, false, 2, 64, _src_type, _dst_type, char, char, 4); DISPATCH_4BITS(QuantizedS4, QuantizedS4); DISPATCH_4BITS(Quantized4Asymm, Quantized4Asymm); #undef DISPATCH_4BITS #undef DISPATCH_RAW megdnn_assert( false, "Unsupported data type(src:%s, dst:%s) or image size(%dx%d).", stype.name(), dtype.name(), h, w); } void relayout_format::relayout_format_cuda_nchw4_nchw( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const int group) { constexpr int pack_w = 1; const int n = src.layout[0]; const int ic = src.layout[1] * 4; const int h = src.layout[2]; const int w = src.layout[3]; const int oc = dst.layout[1]; const int hw = h * w; int nr_threads = query_blocksize_for_kernel(kern_nchw4_nchw); const dim3 block_dim(DIVUP(hw, nr_threads * pack_w), n); const dim3 thread_dim(nr_threads); kern_nchw4_nchw<<<block_dim, thread_dim, 0, stream>>>( (int8_t*)src.raw_ptr, (int8_t*)dst.raw_ptr, n, ic, oc, h, w, group); after_kernel_launch(); } void relayout_format::relayout_format_cuda_nchw_nchw4_weight( const TensorND& src, const TensorND& dst, const cudaStream_t& stream) { constexpr int pack_c = 4; const bool is_group = src.layout.ndim == 5; const int group = is_group ? src.layout[0] : 1; const int oc = is_group ? src.layout[1] : src.layout[0]; const int ic = is_group ? src.layout[2] : src.layout[1]; const int kh = is_group ? src.layout[3] : src.layout[2]; const int kw = is_group ? src.layout[4] : src.layout[3]; const int hw = kh * kw; const int oc_round = ROUNDUP(oc, pack_c); const int ic_round = ROUNDUP(ic, pack_c); const int ic_stride = hw; const int oc_stride_src = ic * ic_stride; const int oc_stride_dst = ic_round * ic_stride; const int group_stride_src = oc * oc_stride_src; const int group_stride_dst = oc_round * oc_stride_dst; int nr_threads = 32; const dim3 block_dim(DIVUP(hw, nr_threads), oc_round, group); const dim3 thread_dim(nr_threads); kern_nchw_nchw4_weight<<<block_dim, thread_dim, 0, stream>>>( (char*)src.raw_ptr, (char*)dst.raw_ptr, oc, ic, hw, oc_stride_src, ic_stride, oc_stride_dst, group_stride_src, group_stride_dst, 0, {}); after_kernel_launch(); }
the_stack
#include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/reverse.h> #include <thrust/reduce.h> #include <thrust/merge.h> #include <thrust/fill.h> #include <thrust/iterator/reverse_iterator.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #if __CUDA_ARCH__ > 200 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif __device__ float op_add(float a, float b) {return a+b;} __device__ float op_sub(float a, float b) {return a-b;} __device__ float op_mul(float a, float b) {return a*b;} __device__ float op_div(float a, float b) {return a/b;} __device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;} __device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;} __device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;} __device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;} __device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;} __device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;} __device__ float op_max(float a, float b) {return max(a,b);} __device__ float op_min(float a, float b) {return min(a,b);} __device__ float op_atan2(float a, float b) {return atan2f(a, b);} __device__ float op_pow(float a, float b) {return powf(a, b);} __device__ float op_ifpos(float a, float b) {return (a > 0) ? b : 0;} __device__ int iop_add(int a, int b) {return a+b;} __device__ int iop_sub(int a, int b) {return a-b;} __device__ int iop_mul(int a, int b) {return a*b;} __device__ int iop_div(int a, int b) {return a/b;} __device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;} __device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;} __device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;} __device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;} __device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;} __device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;} __device__ int iop_max(int a, int b) {return max(a,b);} __device__ int iop_min(int a, int b) {return min(a,b);} __device__ long long lop_add(long long a, long long b) {return a+b;} __device__ long long lop_sub(long long a, long long b) {return a-b;} __device__ long long lop_mul(long long a, long long b) {return a*b;} __device__ long long lop_div(long long a, long long b) {return a/b;} __device__ long long lop_gt(long long a, long long b) {return (a > b) ? 1 : 0;} __device__ long long lop_lt(long long a, long long b) {return (a < b) ? 1 : 0;} __device__ long long lop_eq(long long a, long long b) {return (a == b) ? 1 : 0;} __device__ long long lop_ge(long long a, long long b) {return (a >= b) ? 1 : 0;} __device__ long long lop_le(long long a, long long b) {return (a <= b) ? 1 : 0;} __device__ long long lop_ne(long long a, long long b) {return (a != b) ? 1 : 0;} __device__ long long lop_max(long long a, long long b) {return max(a,b);} __device__ long long lop_min(long long a, long long b) {return max(a,b);} __device__ double dop_add(double a, double b) {return a+b;} __device__ double dop_sub(double a, double b) {return a-b;} __device__ double dop_mul(double a, double b) {return a*b;} __device__ double dop_div(double a, double b) {return a/b;} __device__ double dop_gt(double a, double b) {return (a > b) ? 1.0 : 0;} __device__ double dop_lt(double a, double b) {return (a < b) ? 1.0 : 0;} __device__ double dop_eq(double a, double b) {return (a == b) ? 1.0 : 0;} __device__ double dop_ge(double a, double b) {return (a >= b) ? 1.0 : 0;} __device__ double dop_le(double a, double b) {return (a <= b) ? 1.0 : 0;} __device__ double dop_ne(double a, double b) {return (a != b) ? 1.0 : 0;} __device__ double dop_max(double a, double b) {return max(a,b);} __device__ double dop_min(double a, double b) {return min(a,b);} __device__ double dop_atan2(double a, double b) {return atan2(a, b);} __device__ double dop_pow(double a, double b) {return pow(a, b);} __device__ double dop_ifpos(double a, double b) {return (a > 0) ? b : 0;} // Check reducevec if these ever get changed. __device__ const optype operators[] = { op_add, op_sub, op_mul, op_div, op_gt, op_lt, op_eq, op_ge, op_le, op_ne, op_max, op_min, op_atan2, op_pow, op_ifpos}; __device__ const ioptype ioperators[] = { iop_add, iop_sub, iop_mul, iop_div, iop_gt, iop_lt, iop_eq, iop_ge, iop_le, iop_ne, iop_max, iop_min}; __device__ const loptype loperators[] = { lop_add, lop_sub, lop_mul, lop_div, lop_gt, lop_lt, lop_eq, lop_ge, lop_le, lop_ne, lop_max, lop_min}; __device__ const doptype doperators[] = { dop_add, dop_sub, dop_mul, dop_div, dop_gt, dop_lt, dop_eq, dop_ge, dop_le, dop_ne, dop_max, dop_min, dop_atan2, dop_pow, dop_ifpos}; __device__ float fn_abs(float a) {return abs(a);} __device__ float fn_exp(float a) {return expf(a);} __device__ float fn_log(float a) {return logf(a);} __device__ float fn_expm1(float a) {return expm1f(a);} __device__ float fn_sqrt(float a) {return sqrtf(a);} __device__ float fn_ln(float a) {return logf(a);} __device__ float fn_log10(float a) {return log10f(a);} __device__ float fn_log1p(float a) {return log1pf(a);} __device__ float fn_cos(float a) {return cosf(a);} __device__ float fn_sin(float a) {return sinf(a);} __device__ float fn_tan(float a) {return tanf(a);} __device__ float fn_cosh(float a) {return coshf(a);} __device__ float fn_sinh(float a) {return sinhf(a);} __device__ float fn_tanh(float a) {return tanhf(a);} __device__ float fn_acos(float a) {return acosf(a);} __device__ float fn_asin(float a) {return asinf(a);} __device__ float fn_atan(float a) {return atanf(a);} __device__ float fn_acosh(float a) {return acoshf(a);} __device__ float fn_asinh(float a) {return asinhf(a);} __device__ float fn_atanh(float a) {return atanhf(a);} __device__ float fn_erf(float a) {return erff(a);} __device__ float fn_erfinv(float a) {return erfinvf(a);} __device__ float fn_erfc(float a) {return erfcf(a);} __device__ float fn_erfcinv(float a) {return erfcinvf(a);} __device__ float fn_gammaln(float a) {return lgammaf(a);} __device__ float fn_gamma(float a) {return tgammaf(a);} __device__ float fn_ceil(float a) {return ceilf(a);} __device__ float fn_floor(float a) {return floorf(a);} __device__ float fn_round(float a) {return roundf(a);} __device__ float fn_trunc(float a) {return truncf(a);} __device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);} __device__ float fn_j0(float a) {return j0f(a);} __device__ float fn_j1(float a) {return j1f(a);} //__device__ float fn_jn(float a) {return jnf(a);} __device__ float fn_y0(float a) {return y0f(a);} __device__ float fn_y1(float a) {return y1f(a);} //__device__ float fn_yn(float a) {return ynf(a);} __device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;} __device__ float fn_normcdf(float a) {return normcdff(a);} __device__ float fn_normcdfinv(float a) {return normcdfinvf(a);} __device__ float fn_logistic(float a) {return 0.5f * (tanhf(a * 0.5f) + 1.0f);} __device__ float fn_atan2(float a, float b) {return atan2f(a, b);} __device__ float fn_pow(float a, float b) {return powf(a, b);} __device__ const fntype fctns[] = { fn_abs, fn_exp, fn_expm1, fn_sqrt, fn_ln, fn_log10, fn_log1p, fn_cos, fn_sin, fn_tan, fn_cosh, fn_sinh, fn_tanh, fn_acos, fn_asin, fn_atan, fn_acosh, fn_asinh, fn_atanh, fn_erf, fn_erfinv, fn_erfc, fn_erfcinv, fn_gammaln, fn_gamma, fn_ceil, fn_floor, fn_round, fn_trunc, fn_sign, fn_j0, fn_j1, fn_y0, fn_y1, fn_exppsi, fn_normcdf, fn_normcdfinv, fn_logistic}; // Some SLATEC functions // fn_psi, // fn_psiinv}; __device__ const optype fctns2[] = { fn_atan2, fn_pow}; // Some SLATEC functions // fn_psifn}; __device__ double dfn_abs(double a) {return abs(a);} __device__ double dfn_exp(double a) {return exp(a);} __device__ double dfn_log(double a) {return log(a);} __device__ double dfn_expm1(double a) {return expm1(a);} __device__ double dfn_sqrt(double a) {return sqrt(a);} __device__ double dfn_ln(double a) {return log(a);} __device__ double dfn_log10(double a) {return log10(a);} __device__ double dfn_log1p(double a) {return log1p(a);} __device__ double dfn_cos(double a) {return cos(a);} __device__ double dfn_sin(double a) {return sin(a);} __device__ double dfn_tan(double a) {return tan(a);} __device__ double dfn_cosh(double a) {return cosh(a);} __device__ double dfn_sinh(double a) {return sinh(a);} __device__ double dfn_tanh(double a) {return tanh(a);} __device__ double dfn_acos(double a) {return acos(a);} __device__ double dfn_asin(double a) {return asin(a);} __device__ double dfn_atan(double a) {return atan(a);} __device__ double dfn_acosh(double a) {return acosh(a);} __device__ double dfn_asinh(double a) {return asinh(a);} __device__ double dfn_atanh(double a) {return atanh(a);} __device__ double dfn_erf(double a) {return erf(a);} __device__ double dfn_erfinv(double a) {return erfinv(a);} __device__ double dfn_erfc(double a) {return erfc(a);} __device__ double dfn_erfcinv(double a) {return erfcinv(a);} __device__ double dfn_gammaln(double a) {return lgamma(a);} __device__ double dfn_gamma(double a) {return tgamma(a);} __device__ double dfn_ceil(double a) {return ceil(a);} __device__ double dfn_floor(double a) {return floor(a);} __device__ double dfn_round(double a) {return round(a);} __device__ double dfn_trunc(double a) {return trunc(a);} __device__ double dfn_sign(double a) {return (a>0) ? 1.0 : ((a<0) ? -1.0 : 0);} __device__ double dfn_j0(double a) {return j0(a);} __device__ double dfn_j1(double a) {return j1(a);} //__device__ double dfn_jn(double a) {return jnf(a);} __device__ double dfn_y0(double a) {return y0(a);} __device__ double dfn_y1(double a) {return y1(a);} //__device__ double dfn_yn(double a) {return ynf(a);} __device__ double dfn_exppsi(double a) {return (a<1.0) ? 0.5*a*a : a-0.5;} __device__ double dfn_atan2(double a, double b) {return atan2(a, b);} __device__ double dfn_pow(double a, double b) {return pow(a, b);} __device__ double dfn_normcdf(double a) {return normcdf(a);} __device__ double dfn_normcdfinv(double a) {return normcdfinv(a);} __device__ double dfn_logistic(double a) {return 0.5 * (tanh(a * 0.5) + 1.0);} __device__ const dfntype dfctns[] = { dfn_abs, dfn_exp, dfn_expm1, dfn_sqrt, dfn_ln, dfn_log10, dfn_log1p, dfn_cos, dfn_sin, dfn_tan, dfn_cosh, dfn_sinh, dfn_tanh, dfn_acos, dfn_asin, dfn_atan, dfn_acosh, dfn_asinh, dfn_atanh, dfn_erf, dfn_erfinv, dfn_erfc, dfn_erfcinv, dfn_gammaln, dfn_gamma, dfn_ceil, dfn_floor, dfn_round, dfn_trunc, dfn_sign, dfn_j0, dfn_j1, dfn_y0, dfn_y1, dfn_exppsi, dfn_normcdf, dfn_normcdfinv, dfn_logistic}; __device__ const doptype dfctns2[2] = { dfn_atan2, dfn_pow}; __device__ float psi_(float x); int getDeviceVersion() { int igpu; cudaGetDevice(&igpu); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, igpu); return 100 * prop.major + 10 * prop.minor; } void setsizes(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersion(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = 2*nblocks; } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } // nblocks is not necessarily a power of two void setsizesLean(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersion(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = max(nblocks, 1 + (int)((N-1)/nthreads)); } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } // keep nblocks less than 512 void setsizesTrim(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = max(nblocks, 1 + (int)((N-1)/nthreads)); } } nblocks = min(512, nblocks); gridp->x = nblocks; gridp->y = 1; gridp->z = 1; *nthreadsp = nthreads; } #define GENGFUN(ATYPE,FNTYPE,FUNCARRAY) \ __global__ void __apply_gfun_##ATYPE(ATYPE *A, ATYPE *B, int N, int opn) { \ FNTYPE fn = FUNCARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \ B[i] = fn(A[i]); \ } \ } \ \ int apply_gfun(ATYPE *A, ATYPE *B, int N, int opn) { \ int nthreads; \ dim3 griddims; \ setsizesLean(N, &griddims, &nthreads); \ __apply_gfun_##ATYPE<<<griddims,nthreads>>>(A, B, N, opn); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENGFUN(float,fntype,fctns) GENGFUN(double,dfntype,dfctns) #define GENGFUN2(ATYPE,FNTYPE,FUNCARRAY) \ __global__ void __apply_gfun2_##ATYPE(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \ FNTYPE fn = FUNCARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = fn(A[i], B[i]); \ } \ } \ \ int apply_gfun2(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \ int nthreads; \ dim3 griddims; \ setsizesLean(N, &griddims, &nthreads); \ __apply_gfun2_##ATYPE<<<griddims,nthreads>>>(A, B, C, N, opn); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENGFUN2(float,optype,fctns2) GENGFUN2(double,doptype,dfctns2) #define GENAPPLY(ATYPE,OPTYPE,OPARRAY) \ __global__ void __apply_full(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i],B[i]); \ } \ } \ \ __global__ void __apply_right_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i],B[i % nrows]); \ } \ } \ \ __global__ void __apply_right_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i],B[i / nrows]); \ } \ } \ \ __global__ void __apply_left_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i % nrows],B[i]); \ } \ } \ \ __global__ void __apply_left_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i / nrows],B[i]); \ } \ } \ \ __global__ void __apply_right_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ ATYPE val = B[0]; \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i],val); \ } \ } \ \ __global__ void __apply_left_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ ATYPE val = A[0]; \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(val,B[i]); \ } \ } \ \ __global__ void __apply_right_const(ATYPE *A, ATYPE B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A[i],B); \ } \ } \ \ __global__ void __apply_left_const(ATYPE A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \ C[i] = op(A,B[i]); \ } \ } \ \ int apply_binop(ATYPE *A, int Anrows, int Ancols, \ ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \ int N = max(Anrows, Bnrows)*max(Ancols, Bncols); \ int nthreads; \ dim3 griddims; \ setsizesLean(N, &griddims, &nthreads); \ if (Anrows == Bnrows && Ancols == Bncols) { \ __apply_full<<<griddims,nthreads>>>(A, B, C, N, opn); \ } else if (Anrows == Bnrows && Bncols == 1) { \ __apply_right_col<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \ } else if (Ancols == Bncols && Bnrows == 1) { \ __apply_right_row<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \ } else if (Anrows == Bnrows && Ancols == 1) { \ __apply_left_col<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \ } else if (Ancols == Bncols && Anrows == 1) { \ __apply_left_row<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \ } else if (Bnrows == 1 && Bncols == 1) { \ __apply_right_val<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \ } else if (Anrows == 1 && Ancols == 1) { \ __apply_left_val<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \ } \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } \ \ int apply_binop_left_const(ATYPE A, \ ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \ int N = Bnrows* Bncols; \ int nthreads; \ dim3 griddims; \ setsizesLean(N, &griddims, &nthreads); \ __apply_left_const<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } \ \ int apply_binop_right_const(ATYPE *A, int Anrows, int Ancols, \ ATYPE B, ATYPE *C, int opn) { \ int N = Anrows*Ancols; \ int nthreads; \ dim3 griddims; \ setsizesLean(N, &griddims, &nthreads); \ __apply_right_const<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENAPPLY(float,optype,operators) GENAPPLY(int,ioptype,ioperators) GENAPPLY(long long,loptype,loperators) GENAPPLY(double,doptype,doperators) #define GENSPOPERATION(ATYPE,OPTYPE,OPARRAY) \ __global__ void __sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, ATYPE *B, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \ int col = Aic[i]; \ ATYPE oldA = A[i]; \ A[i] = op(oldA,B[col]); \ } \ } \ \ __global__ void __sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, ATYPE *B, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \ int row = Air[i]; \ ATYPE oldA = A[i]; \ A[i] = op(oldA,B[row]); \ } \ } \ \ __global__ void __sdopval(int nnz, ATYPE *A, ATYPE *B, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \ ATYPE bval = B[0]; \ for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \ ATYPE oldA = A[i]; \ A[i] = op(oldA,bval); \ } \ } \ \ int sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, \ ATYPE *B, int len, int opn) { \ int nthreads; \ dim3 griddims; \ setsizesLean(nnz, &griddims, &nthreads); \ if (len > 1) { \ __sdoprow<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Aic, B, opn); \ } else { \ __sdopval<<<griddims,nthreads>>>(nnz, A, B, opn); \ } \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } \ \ int sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, \ ATYPE *B, int len, int opn) { \ int nthreads; \ dim3 griddims; \ setsizesLean(nnz, &griddims, &nthreads); \ if (len > 1) { \ __sdopcol<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Air, B, opn); \ } else { \ __sdopval<<<griddims,nthreads>>>(nnz, A, B, opn); \ } \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENSPOPERATION(float,optype,operators) GENSPOPERATION(double,doptype,doperators) #define GENREDUCE1OP(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \ OPTYPE op = OPARRAY[opn]; \ int imax = min(nrows, blockDim.x); \ int basecol = threadIdx.y + blockDim.y * blockIdx.x; \ ATYPE v; \ for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \ v = initval; \ if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \ for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) { \ v = op(v, A[i + icol * nrows]); \ } \ for (int i = 1; i < imax; i *= 2) { \ ATYPE vtmp = __shfl_down(v, i); \ if (threadIdx.x + i < imax) { \ v = op(v, vtmp); \ } \ } \ if (threadIdx.x == 0) { \ B[icol] = v; \ } \ } \ } #define GENREDUCE1OPX(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \ __shared__ ATYPE parts[32][33]; \ OPTYPE op = OPARRAY[opn]; \ ATYPE v; \ for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \ v = initval; \ if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \ for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) { \ v = op(v, A[irow + icol * nrows]); \ } \ parts[threadIdx.x][threadIdx.y] = v; \ __syncthreads(); \ for (int i = 1; i < blockDim.x; i *= 2) { \ if (i + threadIdx.x < blockDim.x) { \ parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \ } \ } \ if (threadIdx.x == 0) { \ B[icol] = parts[0][threadIdx.y]; \ } \ __syncthreads(); \ } \ } #if __CUDA_ARCH__ > 200 GENREDUCE1OP(float,optype,operators) GENREDUCE1OP(int,ioptype,ioperators) #else GENREDUCE1OPX(float,optype,operators) GENREDUCE1OPX(int,ioptype,ioperators) #endif GENREDUCE1OPX(long long,loptype,loperators) GENREDUCE1OPX(double,doptype,doperators) template<typename T> void reducevec(int n, T *A, T *B, int opn) { thrust::device_ptr<T> pa(A); thrust::device_ptr<T> pb(B); T v; switch (opn) { case 0 : // sum v = thrust::reduce(pa, pa + n); thrust::fill(pb, pb + 1, v); break; case 10 : // max v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::lowest(), thrust::maximum<T>()); thrust::fill(pb, pb + 1, v); break; case 11: // min v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::max(), thrust::minimum<T>()); thrust::fill(pb, pb + 1, v); break; } } #define GENREDUCE1OPY(ATYPE) \ int reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \ if (ncols == 1) { \ reducevec<ATYPE>(nrows, A, B, opn); \ } else { \ int blkx = 32; \ int blky = min(32, ncols); \ int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \ const dim3 blkdims(blkx,blky,1); \ __reduce1op<<<nblks,blkdims>>>(nrows, ncols, A, B, initval, opn); \ } \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENREDUCE1OPY(float) GENREDUCE1OPY(int) GENREDUCE1OPY(long long) GENREDUCE1OPY(double) #define GENREDUCEBIN1OP(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \ OPTYPE opbf = OPARRAY[opb]; \ OPTYPE oprf = OPARRAY[opr]; \ int imax = min(nrows, blockDim.x); \ int basecol = threadIdx.y + blockDim.y * blockIdx.x; \ for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \ ATYPE v = 0; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows])); \ } \ for (int i = 1; i < imax; i *= 2) { \ v = oprf(v, __shfl_down(v, i)); \ } \ if (threadIdx.x == 0) { \ C[icol] = v; \ } \ } \ } #define GENREDUCEBIN1OPX(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \ __shared__ ATYPE parts[32][33]; \ OPTYPE opbf = OPARRAY[opb]; \ OPTYPE oprf = OPARRAY[opr]; \ int imax = min(nrows, blockDim.x); \ for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \ ATYPE v = 0; \ for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) { \ v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \ } \ parts[threadIdx.x][threadIdx.y] = v; \ __syncthreads(); \ for (int i = 1; i < blockDim.x; i *= 2) { \ if (i + threadIdx.x < imax) { \ parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \ } \ } \ if (threadIdx.x == 0) { \ C[icol] = parts[0][threadIdx.y]; \ } \ __syncthreads(); \ } \ } #if __CUDA_ARCH__ > 200 GENREDUCEBIN1OP(float,optype,operators) #else GENREDUCEBIN1OPX(float,optype,operators) #endif GENREDUCEBIN1OPX(double,doptype,doperators) #define GENREDUCEBIN1OPY(ATYPE) \ int reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \ int blkx = 32; \ int blky = min(32, ncols); \ int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \ const dim3 blkdims(blkx,blky,1); \ __reducebin1op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENREDUCEBIN1OPY(float) GENREDUCEBIN1OPY(double) #define GENREDUCE2OP(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \ __shared__ ATYPE parts[32][33]; \ OPTYPE op = OPARRAY[opn]; \ int baserow = threadIdx.x + blockDim.x * blockIdx.x; \ for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \ ATYPE v = A[irow + threadIdx.y * nrows]; \ for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \ v = op(v, A[irow + icol * nrows]); \ } \ parts[threadIdx.x][threadIdx.y] = v; \ __syncthreads(); \ ATYPE newv = initval; \ for (int i = 1; i < blockDim.y; i *= 2) { \ if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \ __syncthreads(); \ if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv); \ __syncthreads(); \ } \ if (threadIdx.y == 0) { \ B[irow] = parts[threadIdx.x][0]; \ } \ __syncthreads(); \ } \ } \ \ int reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \ if (nrows == 1) { \ reducevec<ATYPE>(ncols, A, B, opn); \ } else { \ int blkx = 32; \ int blky = min(32, ncols); \ int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \ const dim3 blkdims(blkx,blky,1); \ __reduce2op<<<nblks,blkdims>>>(nrows, ncols, A, B, initval, opn); \ } \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENREDUCE2OP(float,optype,operators) GENREDUCE2OP(int,ioptype,ioperators) GENREDUCE2OP(long long,loptype,loperators) GENREDUCE2OP(double,doptype,doperators) #define GENREDUCEBIN2OP(ATYPE,OPTYPE,OPARRAY) \ __global__ void __reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \ __shared__ ATYPE parts[32][33]; \ OPTYPE opbf = OPARRAY[opb]; \ OPTYPE oprf = OPARRAY[opr]; \ int baserow = threadIdx.x + blockDim.x * blockIdx.x; \ for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \ float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]); \ for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \ v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \ } \ parts[threadIdx.x][threadIdx.y] = v; \ __syncthreads(); \ float newv = 0; \ for (int i = 1; i < blockDim.y; i *= 2) { \ if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \ __syncthreads(); \ if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv); \ __syncthreads(); \ } \ if (threadIdx.y == 0) { \ C[irow] = parts[threadIdx.x][0]; \ } \ __syncthreads(); \ } \ } \ \ int reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \ int blkx = 32; \ int blky = min(32, ncols); \ int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \ const dim3 blkdims(blkx,blky,1); \ __reducebin2op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } GENREDUCEBIN2OP(float,optype,operators) GENREDUCEBIN2OP(double,doptype,doperators) /* class FloatOps { public: __device__ static optype ops(int n) {return operators[n];} }; template<typename TT, typename OPTYPE, class CC> __global__ void opTensor3D_(int m, int n, int p, TT *A, int ia, int ja, int ka, TT *B, int ib, int jb, int kb, TT *C, int opn) { int ii, jj, kk; OPTYPE op = CC::ops(opn); int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < m*n*p; i += blockDim.x * gridDim.x * gridDim.y) { jj = i / m; ii = i - jj * m; kk = jj / n; jj = jj - kk * n; C[ii + m * (jj + n * kk)] = op(A[ii*ia + (1+ia*(m-1)) * (jj*ja + (1+ja*(n-1)) * kk*ka)], A[ii*ib + (1+ib*(m-1)) * (jj*jb + (1+jb*(n-1)) * kk*kb)]); } } template<typename TT, typename OPTYPE, class CC> int opTensor3D(int m, int n, int p, TT *A, int ia, int ja, int ka, TT *B, int ib, int jb, int kb, TT *C, int opn) { int nthreads; dim3 griddims; setsizesLean(m*n*p, &griddims, &nthreads); opTensor3D_<TT,OPTYPE,CC><<<griddims,nthreads>>>(A, B, nrows, nreduce, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } */
the_stack
using namespace facebook::cuda; namespace facebook { namespace deeplearning { namespace torch { namespace detail { // Read weight tensor. Need to regularize this type of access for the // template implementation. // __device__ void packOutputPlanes(float& result, DeviceTensor<float, 6>& weight, int outputRow, int outputCol, int kernelRow, int kernelCol, int outputPlane, int inputPlane) { result = weight[outputRow][outputCol][kernelRow] [kernelCol][ outputPlane][inputPlane].ldg(); } // Read as if weight tensor was float2 in output-plane dimension. __device__ void packOutputPlanes(float2& result, DeviceTensor<float, 6>& weight, int outputRow, int outputCol, int kernelRow, int kernelCol, int outputPlane, int inputPlane) { result.x = weight[outputRow][outputCol][kernelRow] [kernelCol][2 * outputPlane][inputPlane].ldg(); result.y = weight[outputRow][outputCol][kernelRow] [kernelCol][2 * outputPlane + 1][inputPlane].ldg(); } // Read as if weight tensor was float4 in output-plane dimension. __device__ void packOutputPlanes(float4& result, DeviceTensor<float, 6>& weight, int outputRow, int outputCol, int kernelRow, int kernelCol, int outputPlane, int inputPlane) { result.x = weight[outputRow][outputCol][kernelRow] [kernelCol][4 * outputPlane][inputPlane].ldg(); result.y = weight[outputRow][outputCol][kernelRow] [kernelCol][4 * outputPlane + 1][inputPlane].ldg(); result.z = weight[outputRow][outputCol][kernelRow] [kernelCol][4 * outputPlane + 2][inputPlane].ldg(); result.w = weight[outputRow][outputCol][kernelRow] [kernelCol][4 * outputPlane + 3][inputPlane].ldg(); } extern __shared__ float pShared[]; // Backprop template <int BatchSize, typename T> __global__ void updateGradInputBatch(DeviceTensor<T, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput, int dH, int dW) { // note: the "input" is being computed, i.e. "input" is the output int inputRow = blockIdx.z; int inputCol = blockIdx.y * blockDim.y + threadIdx.y; int inputPlane = threadIdx.x / gradOutput.getSize(kPlaneDim); int outputPlane = threadIdx.x % gradOutput.getSize(kPlaneDim); int smemSize[3] = {blockDim.y, BatchSize, gradOutput.getSize(kPlaneDim)}; DeviceTensor<T, 3> gradOutputSMEM(reinterpret_cast<T*>(pShared), smemSize); float vSum[BatchSize]; if (inputCol < gradInput.getSize(kWidthDim)) { // guard right-edge for (int image = 0; image < BatchSize; ++image) { vSum[image] = 0.0f; } for (int outputRow = max(0, (inputRow - weight.getSize(kKernelHeightDim) + dH) / dH); outputRow < min(inputRow / dH + 1, gradOutput.getSize(kHeightDim)); ++outputRow) { for (int outputCol = max(0, (inputCol - weight.getSize(kKernelWidthDim) + dW) / dW); outputCol < min(inputCol / dW + 1, gradOutput.getSize(kWidthDim)); ++outputCol) { int kernelRow = inputRow - dH * outputRow; int kernelCol = inputCol - dW * outputCol; T tempWeight; packOutputPlanes(tempWeight, weight, outputRow, outputCol, kernelRow, kernelCol, outputPlane, inputPlane); // use input-plane tiling to iterate images for (int image = inputPlane; image < BatchSize; image += gradInput.getSize(kPlaneDim)) { gradOutputSMEM[threadIdx.y][image][outputPlane] = gradOutput[image] [outputRow][outputCol][outputPlane]; } __syncthreads(); for (int image = 0; image < BatchSize; ++image) { T gradOutput = gradOutputSMEM[threadIdx.y][image][outputPlane]; vSum[image] += dot(gradOutput, tempWeight); } __syncthreads(); } } for (int delta = 1; delta < gradOutput.getSize(kPlaneDim); delta *= 2) { for (int image = 0; image < BatchSize; ++image) { vSum[image] += __shfl_down(vSum[image], delta); } } if (outputPlane == 0) { for (int image = 0; image < BatchSize; ++image) { gradInput[image][inputRow][inputCol][inputPlane] = vSum[image]; } } } // right-edge guard } template <int BatchSize, typename T, int Stride> __global__ void updateGradInputBatch(DeviceTensor<T, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput) { // note: the "input" is being computed, i.e. "input" is the output int inputRow = blockIdx.z; int inputCol = blockIdx.y * blockDim.y + threadIdx.y; int inputPlane = threadIdx.x / gradOutput.getSize(kPlaneDim); int outputPlane = threadIdx.x % gradOutput.getSize(kPlaneDim); int smemSize[3] = {blockDim.y, BatchSize, gradOutput.getSize(kPlaneDim)}; DeviceTensor<T, 3> gradOutputSMEM(reinterpret_cast<T*>(pShared), smemSize); float vSum[BatchSize]; if (inputCol < gradInput.getSize(kWidthDim)) { // guard right-edge for (int image = 0; image < BatchSize; ++image) { vSum[image] = 0.0f; } for (int outputRow = max(0, (inputRow - weight.getSize(kKernelHeightDim) + Stride) / Stride); outputRow < min(inputRow / Stride + 1, gradOutput.getSize(kHeightDim)); ++outputRow) { for (int outputCol = max(0, (inputCol - weight.getSize(kKernelWidthDim) + Stride) / Stride); outputCol < min(inputCol / Stride + 1, gradOutput.getSize(kWidthDim)); ++outputCol) { int kernelRow = inputRow - Stride * outputRow; int kernelCol = inputCol - Stride * outputCol; T tempWeight; packOutputPlanes(tempWeight, weight, outputRow, outputCol, kernelRow, kernelCol, outputPlane, inputPlane); // use input-plane tiling to iterate images for (int image = inputPlane; image < BatchSize; image += gradInput.getSize(kPlaneDim)) { gradOutputSMEM[threadIdx.y][image][outputPlane] = gradOutput[image] [outputRow][outputCol][outputPlane]; } __syncthreads(); for (int image = 0; image < BatchSize; ++image) { T gradOutput = gradOutputSMEM[threadIdx.y][image][outputPlane]; vSum[image] += dot(gradOutput, tempWeight); } __syncthreads(); } } for (int delta = 1; delta < gradOutput.getSize(kPlaneDim); delta *= 2) { for (int image = 0; image < BatchSize; ++image) { vSum[image] += __shfl_down(vSum[image], delta); } } if (outputPlane == 0) { for (int image = 0; image < BatchSize; ++image) { gradInput[image][inputRow][inputCol][inputPlane] = vSum[image]; } } } // right-edge guard } template <int BatchSize, typename T, int KernelSize, int Stride> __global__ void updateGradInputBatch(DeviceTensor<T, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput) { // note: the "input" is being computed, i.e. "input" is the output int inputRow = blockIdx.z; int inputCol = blockIdx.y * blockDim.y + threadIdx.y; int inputPlane = threadIdx.x / gradOutput.getSize(kPlaneDim); int outputPlane = threadIdx.x % gradOutput.getSize(kPlaneDim); int smemSize[3] = {blockDim.y, BatchSize, gradOutput.getSize(kPlaneDim)}; DeviceTensor<T, 3> gradOutputSMEM(reinterpret_cast<T*>(pShared), smemSize); float vSum[BatchSize]; if (inputCol < gradInput.getSize(kWidthDim)) { // guard right-edge for (int image = 0; image < BatchSize; ++image) { vSum[image] = 0.0f; } for (int outputRow = max(0, (inputRow - KernelSize + Stride) / Stride); outputRow < min(inputRow / Stride + 1, gradOutput.getSize(kHeightDim)); ++outputRow) { for (int outputCol = max(0, (inputCol - KernelSize + Stride) / Stride); outputCol < min(inputCol / Stride + 1, gradOutput.getSize(kWidthDim)); ++outputCol) { int kernelRow = inputRow - Stride * outputRow; int kernelCol = inputCol - Stride * outputCol; T tempWeight; packOutputPlanes(tempWeight, weight, outputRow, outputCol, kernelRow, kernelCol, outputPlane, inputPlane); // use input-plane tiling to iterate images for (int image = inputPlane; image < BatchSize; image += gradInput.getSize(kPlaneDim)) { gradOutputSMEM[threadIdx.y][image][outputPlane] = gradOutput[image] [outputRow][outputCol][outputPlane]; } __syncthreads(); for (int image = 0; image < BatchSize; ++image) { T gradOutput = gradOutputSMEM[threadIdx.y][image][outputPlane]; vSum[image] += dot(gradOutput, tempWeight); } __syncthreads(); } } for (int delta = 1; delta < gradOutput.getSize(kPlaneDim); delta *= 2) { for (int image = 0; image < BatchSize; ++image) { vSum[image] += __shfl_down(vSum[image], delta); } } if (outputPlane == 0) { for (int image = 0; image < BatchSize; ++image) { gradInput[image][inputRow][inputCol][inputPlane] = vSum[image]; } } } // right-edge guard } // Iterating kernel. // template <int BatchSize, typename T> __global__ void updateGradInputBatch(DeviceTensor<T, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput, int dH, int dW, int gradOutputPlaneThreads) { // note: the "input" is being computed, i.e. "input" is the output int inputRow = blockIdx.z; int inputCol = blockIdx.y * blockDim.y + threadIdx.y; int inputPlane = threadIdx.x / gradOutputPlaneThreads; int outputThread = threadIdx.x % gradOutputPlaneThreads; float vSum[BatchSize]; if (inputCol < gradInput.getSize(kWidthDim)) { // guard right-edge for (int image = 0; image < BatchSize; ++image) { vSum[image] = 0.0f; } for (int outputRow = max(0, (inputRow - weight.getSize(kKernelHeightDim) + dH) / dH); outputRow < min(inputRow / dH + 1, gradOutput.getSize(kHeightDim)); ++outputRow) { for (int outputCol = max(0, (inputCol - weight.getSize(kKernelWidthDim) + dW) / dW); outputCol < min(inputCol / dW + 1, gradOutput.getSize(kWidthDim)); ++outputCol) { int kernelRow = inputRow - dH * outputRow; int kernelCol = inputCol - dW * outputCol; for (int outputPlane = outputThread; outputPlane < gradOutput.getSize(kPlaneDim); outputPlane += gradOutputPlaneThreads) { T tempWeight; packOutputPlanes(tempWeight, weight, outputRow, outputCol, kernelRow, kernelCol, outputPlane, inputPlane); for (int image = 0; image < BatchSize; ++image) { T gradOut = gradOutput[image][outputRow][outputCol] [outputPlane]; vSum[image] += dot(gradOut, tempWeight); } } } } for (int delta = 1; delta < gradOutputPlaneThreads; delta *= 2) { for (int image = 0; image < BatchSize; ++image) { vSum[image] += __shfl_down(vSum[image], delta); } } if (outputThread == 0) { for (int image = 0; image < BatchSize; ++image) { gradInput[image][inputRow][inputCol][inputPlane] = vSum[image]; } } } // right-edge guard } #define UPDATE_GRAD_INPUT_SIZE_STRIDE(SIZE, STRIDE) case SIZE: \ updateGradInputBatch<BatchSize, T, SIZE, STRIDE><<<grid, block, smem, \ stream>>>( \ gradOutput, weight, gradInput); \ break // Dispatch based on input- and output-planes being powers of two // in which case an optimized version of the kernel can be used. // template <int BatchSize, typename T> void dispatchUpdateGradInputPlanePOT(cudaStream_t stream, DeviceTensor<T, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput, int dH, int dW) { const int kBlockSize = 256; int gradOutPlaneThreads = kBlockSize / gradInput.getSize(kPlaneDim); if (gradOutPlaneThreads < gradOutput.getSize(kPlaneDim) || !isPowerOfTwo(gradInput.getSize(kPlaneDim)) || !isPowerOfTwo(gradOutput.getSize(kPlaneDim)) || gradOutput.getSize(kPlaneDim) > 32) { // gradOutPlaneThreads must be a power of two or multiple of 32. gradOutPlaneThreads = std::min(32, gradOutPlaneThreads); gradOutPlaneThreads = greatestPowerOfTwoLessEq(gradOutPlaneThreads); dim3 block(gradInput.getSize(kPlaneDim) * gradOutPlaneThreads); dim3 grid(1, gradInput.getSize(kWidthDim), gradInput.getSize(kHeightDim)); updateGradInputBatch<BatchSize, T><<<grid, block, 0, stream>>>(gradOutput, weight, gradInput, dH, dW, gradOutPlaneThreads); } else { int totalPlanes = gradOutput.getSize(kPlaneDim) * gradInput.getSize(kPlaneDim); dim3 block(totalPlanes, kBlockSize / totalPlanes); dim3 grid(1, cuda::ceil(gradInput.getSize(kWidthDim), static_cast<int>(block.y)), gradInput.getSize(kHeightDim)); int smem = block.y * BatchSize * gradOutput.getSize(kPlaneDim) * sizeof(float4); // small-kernel optimization if (weight.getSize(kKernelWidthDim) == weight.getSize(kKernelHeightDim) && dH == dW) { switch (dH) { case 1: { switch (weight.getSize(kKernelWidthDim)) { UPDATE_GRAD_INPUT_SIZE_STRIDE(3, 1); UPDATE_GRAD_INPUT_SIZE_STRIDE(5, 1); default: updateGradInputBatch<BatchSize, T, 1><<<grid, block, smem, stream>>>( gradOutput, weight, gradInput); } } break; case 2: { switch (weight.getSize(kKernelWidthDim)) { UPDATE_GRAD_INPUT_SIZE_STRIDE(3, 2); UPDATE_GRAD_INPUT_SIZE_STRIDE(7, 2); default: updateGradInputBatch<BatchSize, T, 2><<<grid, block, smem, stream>>>( gradOutput, weight, gradInput); } } break; default: updateGradInputBatch<BatchSize, T><<<grid, block, smem, stream>>>(gradOutput, weight, gradInput, dH, dW); } } else { updateGradInputBatch<BatchSize, T><<<grid, block, smem, stream>>>(gradOutput, weight, gradInput, dH, dW); } } } template <int BatchSize> void dispatchUpdateGradInputBatchIPT(cudaStream_t stream, DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput, int dH, int dW) { if (gradOutput.getSize(kPlaneDim) % 4 == 0 && isAligned(gradOutput.data(), sizeof(float4)) && kFloat4Optimization) { // create float4 based gradOutput tensor DeviceTensor<float4, 4> gradOutput4 = convertImageBatch<float4>(gradOutput); dispatchUpdateGradInputPlanePOT<BatchSize, float4>( stream, gradOutput4, weight, gradInput, dH, dW); } else if (gradOutput.getSize(kPlaneDim) % 2 == 0 && isAligned(gradOutput.data(), sizeof(float2)) && kFloat2Optimization) { // create float2 based gradOutput tensor DeviceTensor<float2, 4> gradOutput2 = convertImageBatch<float2>(gradOutput); dispatchUpdateGradInputPlanePOT<BatchSize, float2>( stream, gradOutput2, weight, gradInput, dH, dW); } else { dispatchUpdateGradInputPlanePOT<BatchSize, float>( stream, gradOutput, weight, gradInput, dH, dW); } } #define UPDATE_GRAD_INPUT_CASE(B) case B: \ dispatchUpdateGradInputBatchIPT<B>( \ stream, gradOutput, weight, gradInput, dH, dW); \ break void updateGradInputBatchPOT(cudaStream_t stream, DeviceTensor<float, 4> gradOutput, DeviceTensor<float, 6> weight, DeviceTensor<float, 4> gradInput, int batchSize, int dH, int dW) { switch (batchSize) { UPDATE_GRAD_INPUT_CASE(128); UPDATE_GRAD_INPUT_CASE(64); UPDATE_GRAD_INPUT_CASE(32); UPDATE_GRAD_INPUT_CASE(16); UPDATE_GRAD_INPUT_CASE(8); UPDATE_GRAD_INPUT_CASE(4); UPDATE_GRAD_INPUT_CASE(2); UPDATE_GRAD_INPUT_CASE(1); default: assert(false); // input validation, for debugging only } } void locallyConnectedUpdateGradInput(cudaStream_t stream, const float* gradOutput, const float* weight, float* gradInput, LocallyConnectedParam& params) { long batchIdx = 0; int weightSize[6] = {params.outputHeight, params.outputWidth, params.kernelHeight, params.kernelWidth, params.outputPlanes, params.inputPlanes}; DeviceTensor<float, 6> cudaWeight(const_cast<float*>(weight), weightSize); int batchSize = 16; int inputSize[4] = {batchSize, params.inputHeight, params.inputWidth, params.inputPlanes}; int outputSize[4] = {batchSize, params.outputHeight, params.outputWidth, params.outputPlanes}; while (batchSize > 0) { while (batchIdx < (params.batchSize / batchSize) * batchSize) { DeviceTensor<float, 4> cudaGradOutput(const_cast<float*>(gradOutput), outputSize); DeviceTensor<float, 4> cudaGradInput(gradInput, inputSize); updateGradInputBatchPOT(stream, cudaGradOutput, cudaWeight, cudaGradInput, batchSize, params.dH, params.dW); batchIdx += batchSize; gradOutput += cudaGradOutput.numElements(); gradInput += cudaGradInput.numElements(); } batchSize /= 2; inputSize[0] = batchSize; outputSize[0] = batchSize; } } } // detail namespace }}} // namespaces
the_stack
__constant__ KernParameters c_params; const int OPT_BW = 32, OPT_BH = 8, OPT_NB = 5; const int SPACING = 5; template <int BW, int BH> struct SSIMData { float2 mean[BH*2+4][BW*2+4], var[BH*2+4][BW*2+4]; float cross[BH*2+4][BW*2+4], value[BH*2+4][BW*2+4]; int2 orig; }; template <class T> __device__ void swap(T &a, T &b)/*{{{*/ { T temp = a; a = b; b = temp; }/*}}}*/ // returns -1 if pixel cannot improve due to neighbors (and itself) // not improving __device__ int get_improve_mask_idx(const KernPyramidLevel &lvl, /*{{{*/ const int2 &p) { int2 block = p/5; int2 offset = p%5; int begi = (offset.y >= 2 ? 1 : 0), begj = (offset.x >= 2 ? 1 : 0), endi = begi+2, endj = begj+2; int impmask_idx = (block.y+1)*lvl.impmask_rowstride + (block.x+1); for(int i=begi; i<endi; ++i) { for(int j=begj; j<endj; ++j) { int d = impmask_idx + c_improvmask_offset[i][j]; if(lvl.improving_mask[d]&c_improvmask[offset.y][offset.x][i][j]) return impmask_idx; } } return -1; }/*}}}*/ __device__ bool pixel_on_border(const KernPyramidLevel &lvl, const int2 &p)/*{{{*/ { switch(c_params.bcond) { case BCOND_NONE: break; case BCOND_CORNER: if(p.x==0 && p.y==0 || p.x==0 && p.y==lvl.pixdim.y-1 || p.x==lvl.pixdim.x-1 && p.y==0 && p.x==lvl.pixdim.x-1 && p.y==lvl.pixdim.y-1) { return true; } break; case BCOND_BORDER: if(p.x==0 || p.y==0 || p.x==lvl.pixdim.x-1 || p.y==lvl.pixdim.y-1) return true; break; } return false; }/*}}}*/ // gradient calculation -------------------------- template <int BW, int BH> __device__ float ssim_change(const KernPyramidLevel &lvl,/*{{{*/ const int2 &p, float2 v, float2 old_luma, const SSIMData<BW,BH> &ssimdata) { float2 luma; luma.x = tex2D(tex_img0, p.x-v.x + 0.5f, p.y-v.y + 0.5f), luma.y = tex2D(tex_img1, p.x+v.x + 0.5f, p.y+v.y + 0.5f); float change = 0; float2 dmean = luma - old_luma, dvar = pow2(luma) - pow2(old_luma); float dcross = luma.x*luma.y - old_luma.x*old_luma.y; bool need_counter = p.x < 4 || p.x >= lvl.pixdim.x-4 || p.y < 4 || p.y >= lvl.pixdim.y-4; int idx = mem_index(lvl, p); int2 B = calc_border(p, lvl.pixdim); for(int i=0; i<5; ++i) { int sy = p.y+i-2 - ssimdata.orig.y; assert(sy >= 0 && sy < OPT_BH*2+4); for(int j=0; j<5; ++j) { if(c_iomask[B.y][B.x][i][j] == 0) continue; int sx = p.x+j-2 - ssimdata.orig.x; int nb = mem_index(lvl, p + make_int2(j,i)-2); float2 mean, var; float counter = need_counter ? lvl.ssim.counter[nb] : 25, cross; assert(sx >= 0 && sx < OPT_BW*2+4); mean = ssimdata.mean[sy][sx]; var = ssimdata.var[sy][sx]; cross = ssimdata.cross[sy][sx]; mean += dmean; var += dvar; cross += dcross; float new_ssim = ssim(mean,var,cross,counter,c_params.ssim_clamp); change += ssimdata.value[sy][sx] - new_ssim; } } return change; }/*}}}*/ template <int BW, int BH> __device__ float energy_change(const KernPyramidLevel &lvl, /*{{{*/ const int2 &p, const float2 &v, const float2 &old_luma, const float2 &d, const SSIMData<BW,BH> &ssimdata) { float v_ssim = ssim_change(lvl, p, v+d, old_luma, ssimdata); int idx = mem_index(lvl,p); float v_tps = lvl.tps.axy[idx]*(d.x*d.x + d.y*d.y); v_tps += lvl.tps.b[idx].x*d.x; v_tps += lvl.tps.b[idx].y*d.y; float v_ui = lvl.ui.axy[idx]*(d.x*d.x + d.y*d.y); v_ui += lvl.ui.b[idx].x*d.x; v_ui += lvl.ui.b[idx].y*d.y; return (c_params.w_ui*v_ui + c_params.w_ssim*v_ssim)*lvl.inv_wh + c_params.w_tps*v_tps; }/*}}}*/ template <int BW, int BH> __device__ float2 compute_gradient(const KernPyramidLevel &lvl, /*{{{*/ const int2 &p, const float2 &v, const float2 &old_luma, const SSIMData<BW,BH> &ssimdata) { float2 g; g.x = energy_change(lvl,p,v,old_luma,make_float2(c_params.eps,0),ssimdata)- energy_change(lvl,p,v,old_luma,make_float2(-c_params.eps,0),ssimdata); g.y = energy_change(lvl,p,v,old_luma,make_float2(0,c_params.eps),ssimdata)- energy_change(lvl,p,v,old_luma,make_float2(0,-c_params.eps),ssimdata); return -g; }/*}}}*/ // foldover -------------------------------- template <int X, int Y, int SIGN> __device__ float2 fover_calc_vtx(const KernPyramidLevel &lvl,/*{{{*/ const int2 &p, float2 v) { const int2 off = make_int2(X,Y); if(lvl.contains(p+off)) v = SIGN*lvl.v[mem_index(lvl,p+off)]; return v + (p-off); }/*}}}*/ __device__ void fover_update_isec_min(float2 c, float2 grad,/*{{{*/ float2 e0, float2 e1, float &t_min) { float2 de = e1-e0, dce = c-e0; // determinant float d = de.y*grad.x - de.x*grad.y; // signals that we don't have an intersection (yet) // t = td/d float td = -1; // u = ud/d // e0 + u*(e1-e0) = intersection point float ud = grad.x*dce.y - grad.y*dce.x; int sign = signbit(d); // this is faster than multiplying ud and d by sign if(sign) { ud = -ud; d = -d; } // line by c0 and c1 intersects segment [e0,e1] ? if(ud >= 0 && ud <= d) // u >= 0 && u <= 1 { // c0 + t*(c1-c0) = intersection point td = de.x*dce.y - de.y*dce.x; td *= (-sign*2+1); if(td >= 0 && td < t_min*d) t_min = td/d; } }/*}}}*/ template <int SIGN> __device__ void fover_calc_isec_min(const KernPyramidLevel &lvl, /*{{{*/ const int2 &p, float2 v, float2 grad, float &t_min) { // edge segment, start from upper left (-1,-1), go cw around center // pixel testing whether pixel will intersect the edge or not float2 e[2] = { fover_calc_vtx<-1,-1,SIGN>(lvl, p, v), fover_calc_vtx< 0,-1,SIGN>(lvl, p, v)}; float2 efirst = e[0]; // pixel displacement (c0 -> c1) float2 c = p + v; fover_update_isec_min(c,grad,e[0],e[1],t_min); e[0] = fover_calc_vtx<1,-1,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[1],e[0],t_min); e[1] = fover_calc_vtx<1,0,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[0],e[1],t_min); e[0] = fover_calc_vtx<1,1,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[1],e[0],t_min); e[1] = fover_calc_vtx<0,1,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[0],e[1],t_min); e[0] = fover_calc_vtx<-1,1,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[1],e[0],t_min); e[1] = fover_calc_vtx<-1,0,SIGN>(lvl, p, v); fover_update_isec_min(c,grad,e[0],e[1],t_min); fover_update_isec_min(c,grad,e[1],efirst,t_min); }/*}}}*/ __device__ float prevent_foldover(const KernPyramidLevel &lvl,/*{{{*/ const int2 &p, float2 v, float2 grad) { float t_min = 10; fover_calc_isec_min<-1>(lvl, p, -v, -grad, t_min); fover_calc_isec_min<1>(lvl, p, v, grad, t_min); return max(t_min-c_params.eps,0.0f); }/*}}}*/ template <int BW, int BH> __device__ void golden_section_search(const KernPyramidLevel &lvl,/*{{{*/ const int2 &p, float a, float c, float2 v, float2 grad, float2 old_luma, const SSIMData<BW,BH> &ssimdata, float &fmin, float &tmin) { const float R = 0.618033989f, C = 1.0f - R; float b = a*R + c*C, // b between [a,c> x = b*R + c*C; // x between [b,c> float fb = energy_change(lvl, p, v, old_luma, grad*b, ssimdata), fx = energy_change(lvl, p, v, old_luma, grad*x, ssimdata); #pragma unroll 4 while(c - a > c_params.eps) { if(fx < fb) // bracket is [b,x,c] ? { // [a,b,c] = [b,x,c] a = b; b = x; x = b*R + c*C; // x between [b,c> } else // bracket is [a,b,x] ? { // [a,b,c] = [a,b,x] c = x; x = b*R + a*C; // x between <a,b] } float f = energy_change(lvl, p, v, old_luma, grad*x, ssimdata); if(fx < fb) { fb = fx; fx = f; } else { swap(b,x); fx = fb; fb = f; } } if(fx < fb) { tmin = x; fmin = fx; } else { tmin = b; fmin = fb; } }/*}}}*/ // update -------------------------------- template <int BW, int BH> __device__ void ssim_update(KernPyramidLevel &lvl,/*{{{*/ const int2 &p, float2 v, float2 old_luma, SSIMData<BW,BH> &ssimdata) { float2 luma; luma.x = tex2D(tex_img0, p.x-v.x + 0.5f, p.y-v.y + 0.5f), luma.y = tex2D(tex_img1, p.x+v.x + 0.5f, p.y+v.y + 0.5f); int idx = mem_index(lvl,p); lvl.ssim.luma[idx] = luma; float2 dmean = luma - old_luma, dvar = pow2(luma) - pow2(old_luma); float dcross = luma.x*luma.y - old_luma.x*old_luma.y; int2 B = calc_border(p, lvl.pixdim); for(int i=0; i<5; ++i) { int sy = p.y+i-2 - ssimdata.orig.y; for(int j=0; j<5; ++j) { if(c_iomask[B.y][B.x][i][j]) { int sx = p.x+j-2 - ssimdata.orig.x; atomicAdd(&ssimdata.mean[sy][sx], dmean); atomicAdd(&ssimdata.var[sy][sx], dvar); atomicAdd(&ssimdata.cross[sy][sx], dcross); } } } }/*}}}*/ template <int BW, int BH> __device__ void commit_pixel_motion(KernPyramidLevel &lvl, /*{{{*/ const int2 &p, const float2 &newv, const float2 &old_luma, const float2 &grad, SSIMData<BW,BH> &ssimdata) { ssim_update(lvl, p, newv, old_luma, ssimdata); int2 B = calc_border(p, lvl.pixdim); // tps update for(int i=0; i<5; ++i) { for(int j=0; j<5; ++j) { assert(lvl.contains(p.x+j-2,p.y+i-2) || c_tps_data[B.y][B.x][i][j] == 0); int nb = mem_index(lvl, p + make_int2(j,i)-2); atomicAdd(&lvl.tps.b[nb], grad*c_tps_data[B.y][B.x][i][j]); } } int idx = mem_index(lvl,p); // ui update lvl.ui.b[idx] += 2*grad*lvl.ui.axy[idx]; // vector update lvl.v[idx] = newv; }/*}}}*/ // optimization kernel -------------------------- template <int BW, int BH> __device__ bool optimize_pixel(const KernPyramidLevel &lvl,/*{{{*/ const int2 &p, const SSIMData<BW,BH> &ssim, float2 &old_luma, float2 &v, float2 &grad, int &impmask_idx) { if(lvl.contains(p)) { int idx = mem_index(lvl,p); v = lvl.v[idx], old_luma = lvl.ssim.luma[idx]; impmask_idx = get_improve_mask_idx(lvl, p); assert(lvl.contains(p) || lvl.improving_mask[impmask_idx] == 0); if(impmask_idx >= 0) { if(!pixel_on_border(lvl, p)) { grad = compute_gradient(lvl, p, v, old_luma, ssim); // float ng = hypot(grad.x,grad.y); // slower float ng = sqrt(pow2(grad.x)+pow2(grad.y)); if(ng != 0) { grad /= ng; float t = prevent_foldover(lvl, p, v, grad); float tmin, fmin; golden_section_search(lvl, p, 0, t, v, grad, old_luma, ssim, fmin, tmin); if(fmin < 0) { grad *= tmin; v += grad; return true; } } } } } return false; }/*}}}*/ template <template<int,int> class F> __device__ void process_shared_state(F<8,8> fun, const KernPyramidLevel &lvl,/*{{{*/ const int2 &block_orig) { const int BW = 8, BH = 8; /* BW BW 4 ----------------------- | | | | BH | 1 | 2 | 6 | |-----------------|---| | | | | BH | 4 | 3 | 6 | |-----------------|---| | 5 | 5 | 7 | 4 ----------------------- */ // area 1 int sx = threadIdx.x, sy = threadIdx.y; int2 pix = block_orig + make_int2(sx,sy); if(lvl.contains(pix)) fun(pix, sx, sy); // area 2 pix.x += BW; sx += BW; if(lvl.contains(pix)) fun(pix, sx, sy); // area 3 pix.y += BH; sy += BH; if(lvl.contains(pix)) fun(pix, sx, sy); // area 4 pix.x -= BW; sx -= BW; if(lvl.contains(pix)) fun(pix, sx, sy); // area 5 sx = (threadIdx.y/4)*BW + threadIdx.x; sy = threadIdx.y%4 + BH*2; pix.x = block_orig.x+sx; pix.y = block_orig.y+sy; if(lvl.contains(pix)) fun(pix, sx, sy); // area 6 sx = threadIdx.x%4 + BW*2; sy = threadIdx.y*(BW/4) + threadIdx.x/4; pix.x = block_orig.x+sx; pix.y = block_orig.y+sy; if(lvl.contains(pix)) fun(pix, sx, sy); // area 7 sy += BH*2; pix.y += BH*2; if(lvl.contains(pix) && sy < BH*2+4) fun(pix, sx, sy); }/*}}}*/ template <template<int,int> class F> __device__ void process_shared_state(F<32,8> fun, const KernPyramidLevel &lvl,/*{{{*/ const int2 &block_orig) { const int BW = 32, BH = 8; int sx = threadIdx.x, sy = threadIdx.y; /* BW BW 4 ----------------------- | | | | BH | 1 | 2 | 6 | |-----------------|---| | | | | BH | 4 | 3 | 6 | |-----------------|---| | 5 | 5 | 6 | 4 ----------------------- */ // area 1 int2 pix = block_orig + make_int2(sx,sy); if(lvl.contains(pix)) fun(pix, sx, sy); // area 2 pix.x += BW; sx += BW; if(lvl.contains(pix)) fun(pix, sx, sy); // area 3 pix.y += BH; sy += BH; if(lvl.contains(pix)) fun(pix, sx, sy); // area 4 pix.x -= BW; sx -= BW; if(lvl.contains(pix)) fun(pix, sx, sy); // area 5 sx = (threadIdx.y/4)*BW + threadIdx.x; sy = threadIdx.y%4 + BH*2; pix.x = block_orig.x+sx; pix.y = block_orig.y+sy; if(lvl.contains(pix) && sx < BW*2+4 && sy < BH*2+4) fun(pix, sx, sy); // area 6 sx = threadIdx.x%4 + BW*2; sy = threadIdx.y*8 + threadIdx.x/4; pix.x = block_orig.x+sx; pix.y = block_orig.y+sy; if(lvl.contains(pix) && sx < BW*2+4 && sy < BH*2+4) fun(pix, sx, sy); }/*}}}*/ template <int BW, int BH> class LoadSSIM/*{{{*/ { public: __device__ LoadSSIM(const KernPyramidLevel &lvl, SSIMData<BW,BH> &ssim) : m_level(lvl), m_ssim(ssim) {} __device__ void operator()(const int2 &pix, int sx, int sy) { int idx = mem_index(m_level, pix); m_ssim.mean[sy][sx] = m_level.ssim.mean[idx]; m_ssim.var[sy][sx] = m_level.ssim.var[idx]; m_ssim.cross[sy][sx] = m_level.ssim.cross[idx]; m_ssim.value[sy][sx] = m_level.ssim.value[idx]; } private: const KernPyramidLevel &m_level; SSIMData<BW,BH> &m_ssim; };/*}}}*/ template <int BW, int BH> class SaveSSIM/*{{{*/ { public: __device__ SaveSSIM(KernPyramidLevel &lvl, const SSIMData<BW,BH> &ssim) : m_level(lvl), m_ssim(ssim) {} __device__ void operator()(const int2 &pix, int sx, int sy) { int idx = mem_index(m_level, pix); m_level.ssim.mean[idx] = m_ssim.mean[sy][sx]; m_level.ssim.var[idx] = m_ssim.var[sy][sx]; m_level.ssim.cross[idx] = m_ssim.cross[sy][sx]; m_level.ssim.value[idx] = m_ssim.value[sy][sx]; } private: KernPyramidLevel &m_level; const SSIMData<BW,BH> &m_ssim; };/*}}}*/ template <int BW, int BH> class UpdateSSIM/*{{{*/ { public: __device__ UpdateSSIM(const KernPyramidLevel &lvl, SSIMData<BW,BH> &ssim) : m_level(lvl), m_ssim(ssim) {} __device__ void operator()(const int2 &pix, int sx, int sy) { int idx = mem_index(m_level, pix); m_ssim.value[sy][sx] = ssim(m_ssim.mean[sy][sx], m_ssim.var[sy][sx], m_ssim.cross[sy][sx], m_level.ssim.counter[idx], c_params.ssim_clamp); } private: const KernPyramidLevel &m_level; SSIMData<BW,BH> &m_ssim; };/*}}}*/ __global__ //__launch_bounds__(OPT_BW*OPT_BH, OPT_NB) void kernel_optimize_level(KernPyramidLevel lvl,/*{{{*/ int offx, int offy, bool *out_improving) { __shared__ SSIMData<OPT_BW,OPT_BH> ssim; { int2 block_orig = make_int2(blockIdx.x*(OPT_BW*2+SPACING)+offx-2, blockIdx.y*(OPT_BH*2+SPACING)+offy-2); if(threadIdx.x == 0 && threadIdx.y == 0) ssim.orig = block_orig; process_shared_state(LoadSSIM<OPT_BW,OPT_BH>(lvl, ssim), lvl, block_orig); } bool improving = false; __syncthreads(); for(int i=0; i<2; ++i) { for(int j=0; j<2; ++j) { int2 p = ssim.orig + make_int2(threadIdx.x*2+j+2, threadIdx.y*2+i+2); float2 old_luma, v, grad; int impmask_idx = -1; bool ok = optimize_pixel(lvl, p, ssim, old_luma, v, grad, impmask_idx); int2 offset = p%5; __syncthreads(); if(ok) { commit_pixel_motion(lvl, p, v, old_luma, grad, ssim); improving = true; atomicOr(&lvl.improving_mask[impmask_idx], 1 << (offset.x + offset.y*5)); } else if(impmask_idx >= 0) { atomicAnd(&lvl.improving_mask[impmask_idx], ~(1 << (offset.x + offset.y*5))); } __syncthreads(); process_shared_state(UpdateSSIM<OPT_BW,OPT_BH>(lvl, ssim), lvl, ssim.orig); __syncthreads(); } } process_shared_state(SaveSSIM<OPT_BW,OPT_BH>(lvl, ssim), lvl, ssim.orig); if(improving) *out_improving = true; }/*}}}*/ template <class T> T *addressof(T &v) { return reinterpret_cast<T*>(&const_cast<char &>(reinterpret_cast<const volatile char &>(v))); } bool Morph::optimize_level(int &curiter, int maxiter, int totaliter, PyramidLevel &lvl, int orig_width, int orig_height, int nlevel) const { dim3 bdim(OPT_BW,OPT_BH), gdim((lvl.width+OPT_BW*2+SPACING-1)/(OPT_BW*2+SPACING), (lvl.height+OPT_BH*2+SPACING-1)/(OPT_BH*2+SPACING)); rod::base_timer *timer = NULL; if(m_params.verbose) timer = &rod::timers.gpu_add("optimize",lvl.width*lvl.height,"P"); KernPyramidLevel klvl(lvl); KernParameters kparams(m_params); rod::copy_to_symbol(c_params,kparams); bool *improving = NULL; cudaHostAlloc(&improving, sizeof(bool), cudaHostAllocMapped); rod::check_cuda_error("cudaHostAlloc"); assert(improving != NULL); bool *dimproving = NULL; cudaHostGetDevicePointer(&dimproving, improving, 0); rod::check_cuda_error("cudaHostGetDevicePointer"); int iter = 0; rod::cpu_timer cb_send_image_timer(0,"",false); try { do { if(m_cb != NULL) { std::ostringstream ss; ss << "Optimizing level " << nlevel << " iteration #" << iter+1; rod::dimage<float2> halfway; clock_t now = clock(); if(cb_send_image_timer.is_stopped() || cb_send_image_timer.elapsed() >= 0.2) { cb_send_image_timer.start(); internal_vector_to_image(halfway, lvl.v, lvl, make_float2((float)orig_width/lvl.width, (float)orig_height/lvl.height)); if(orig_width != lvl.width || orig_height != lvl.height) { rod::dimage<float2> temp(orig_width, orig_height); upsample(&temp, &halfway, rod::INTERP_LINEAR); swap(halfway,temp); // move temp to halfway } } if(!m_cb(ss.str(), ++curiter, totaliter, halfway.empty() ? NULL : addressof(halfway), NULL, m_cbdata)) { if(timer) timer->stop(); return false; } } *improving = false; kernel_optimize_level<<<gdim,bdim>>>(klvl, 0,0, dimproving); kernel_optimize_level<<<gdim,bdim>>>(klvl, OPT_BW*2, 0, dimproving); kernel_optimize_level<<<gdim,bdim>>>(klvl, 0, OPT_BH*2, dimproving); kernel_optimize_level<<<gdim,bdim>>>(klvl, OPT_BW*2, OPT_BH*2, dimproving); cudaDeviceSynchronize(); ++iter; } while(*improving && iter <= maxiter); curiter += maxiter-iter; cudaFreeHost(improving); } catch(...) { if(timer) timer->stop(); cb_send_image_timer.stop(); cudaFreeHost(improving); throw; } cb_send_image_timer.stop(); if(timer) timer->stop(); if(m_params.verbose) std::cout << iter << " iterations, grid " << gdim.x << 'x' << gdim.y << std::endl; return true; }
the_stack
extern __global__ void array_set_kernel(float *output, float value, size_t size); extern __global__ void float_memory_copy(float *A, const float *B, size_t len); extern int Float_Add(float *A, const float *B, int len, DLStreamHandle stream_handle); __global__ void im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, const float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) { int block_id = blockIdx.x; int thread_id = threadIdx.x; int max_threads_per_block = blockDim.x; int thread_index = block_id * max_threads_per_block + thread_id; int out_H = (H + 2 * padding - filter_H) / stride + 1; int out_W = (W + 2 * padding - filter_W) / stride + 1; for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block) { int N_i = i / (C * out_H * out_W); int base_N = N_i * C * out_H * out_W; int C_i = (i - base_N) / (out_H * out_W); int base_C = C_i * out_H * out_W; int out_H_i = (i - base_N - base_C) / out_W; int out_W_i = i % out_W; assert(base_N + base_C + out_H_i * out_W + out_W_i == i); int in_x = out_H_i * stride - padding; int in_y = out_W_i * stride - padding; for (int x = in_x; x < in_x + filter_H; x++) for (int y = in_y; y < in_y + filter_W; y++) { if (x < 0 || x >= H || y < 0 || y >= W) workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0; else workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = input_data_x[(N_i * C + C_i) * H * W + x * W + y]; } } } __global__ void gemm_kernel(const float *A, const float *B, float *C, int rowA, int colA, int rowB, int colB) { int r = blockIdx.y * blockDim.y + threadIdx.y; int c = blockIdx.x * blockDim.x + threadIdx.x; assert(rowB % colA == 0); int K = rowB / colA; if (r >= rowA || c >= colB) return; for (int k = 0; k < K; k++) { float Cvalue = 0.0; for (int e = 0; e < colA; e++) Cvalue += A[r * colA + e] * B[(e + k * colA) * colB + c]; C[(r + k * rowA) * colB + c] = Cvalue; } } int DLGpuConv2d(const DLArrayHandle input_x, const DLArrayHandle input_f, DLArrayHandle output, DLArrayHandle workspace_arr, const int padding, const int stride, DLStreamHandle stream_handle = NULL) { assert(input_x->ndim == 4); assert(input_f->ndim == 4); assert(input_x->shape[1] == input_f->shape[1]); int N = input_x->shape[0]; int C = input_x->shape[1]; int H = input_x->shape[2]; int W = input_x->shape[3]; int filter_outChannel = input_f->shape[0]; // int filter_inChannel = input_f->shape[1]; int filter_H = input_f->shape[2]; int filter_W = input_f->shape[3]; assert((H + 2 * padding - filter_H) % stride == 0); assert((W + 2 * padding - filter_W) % stride == 0); int out_H = (H + 2 * padding - filter_H) / stride + 1; int out_W = (W + 2 * padding - filter_W) / stride + 1; int y_col_size = out_H * out_W; int y_row_size = C * filter_H * filter_W; const float *input_data_x = (const float *)input_x->data; const float *input_data_f = (const float *)input_f->data; float *output_data = (float *)output->data; float *workspace_data = (float *)workspace_arr->data; // get max threads and blocks int dev_id = (input_x->ctx).device_id; ; cudaSetDevice(dev_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev_id); int threads = deviceProp.maxThreadsPerBlock; int blocks = deviceProp.maxThreadsPerMultiProcessor / threads * deviceProp.multiProcessorCount; // im2col kernel if (stream_handle) im2col_kernel<<<blocks, threads, 0, *(cudaStream_t *)stream_handle->handle>>>( N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks); else im2col_kernel<<<blocks, threads>>>( N, C, H, W, filter_outChannel, filter_H, filter_W, input_data_x, workspace_data, padding, stride, blocks); // sgemm const int BLOCK_SIZE = 16; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((std::max(y_row_size, y_col_size) + dimBlock.x - 1) / dimBlock.x, (std::max(filter_outChannel, y_row_size) + dimBlock.y - 1) / dimBlock.y); if (stream_handle) gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t *)stream_handle->handle>>>( input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size); else gemm_kernel<<<dimGrid, dimBlock>>>( input_data_f, workspace_data, output_data, filter_outChannel, y_row_size, N * y_row_size, y_col_size); return 0; } __global__ void trans_im2col_kernel(int N, int C, int H, int W, int filter_outChannel, int filter_H, int filter_W, float *input_data_x, float *workspace_data, const int padding, const int stride, const int blocks) { int block_id = blockIdx.x; int thread_id = threadIdx.x; int max_threads_per_block = blockDim.x; int thread_index = block_id * max_threads_per_block + thread_id; int out_H = (H + 2 * padding - filter_H) / stride + 1; int out_W = (W + 2 * padding - filter_W) / stride + 1; for (int i = thread_index; i < N * C * out_H * out_W; i += blocks * max_threads_per_block) { int N_i = i / (C * out_H * out_W); int base_N = N_i * C * out_H * out_W; int C_i = (i - base_N) / (out_H * out_W); int base_C = C_i * out_H * out_W; int out_H_i = (i - base_N - base_C) / out_W; int out_W_i = i % out_W; assert(base_N + base_C + out_H_i * out_W + out_W_i == i); int in_x = out_H_i * stride - padding; int in_y = out_W_i * stride - padding; for (int x = in_x; x < in_x + filter_H; x++) for (int y = in_y; y < in_y + filter_W; y++) { if (x < 0 || x >= H || y < 0 || y >= W) workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i] = 0; else atomicAdd( &input_data_x[(N_i * C + C_i) * H * W + x * W + y], workspace_data[(base_N + base_C) * filter_H * filter_W + ((x - in_x) * filter_W + (y - in_y)) * out_H * out_W + out_H_i * out_W + out_W_i]); } } } __global__ void transA_gemm_kernel(const float *A, const float *B, float *C, int rowA, int colA, int rowB, int colB) { size_t r = blockIdx.x * blockDim.x + threadIdx.x; size_t c = blockIdx.y * blockDim.y + threadIdx.y; if (r >= colA || c >= colB) return; assert(rowB % rowA == 0); size_t batch_size = rowB / rowA; // output shape(output_batch, filter_col_size, output_col_size) for (int i = 0; i < batch_size; i++) { float tmp = 0; // C[batch_size][colA][colB] -> C[i][r][c] for (int j = 0; j < rowA; j++) // A[j][r] * B[i][j][c] tmp += A[j * colA + r] * B[i * rowA * colB + j * colB + c]; C[i * colA * colB + r * colB + c] = tmp; } } __global__ void batch_transB_gemm_kernel(const float *A, const float *B, float *C, int rowA, int colA, int rowB, int colB, int batch_size) { size_t r = blockIdx.x * blockDim.x + threadIdx.x; size_t c = blockIdx.y * blockDim.y + threadIdx.y; if (r >= rowA || c >= rowB) return; assert(colA == colB); // output shape(batch_size, filter_row_size, filter_col_size) for (int i = 0; i < batch_size; i++) { float tmp = 0; // C[batch_size][rowA][rowB] -> C[i][r][c] for (int j = 0; j < colA; j++) // A[i][r][j] * B[i][c][j] tmp += A[i * rowA * colB + r * colB + j] * B[i * rowB * colB + c * colB + j]; C[i * rowA * rowB + r * rowB + c] = tmp; } } int DLGpuConv2d_Gradient_of_Data(const DLArrayHandle input_f, const DLArrayHandle gradient_y, DLArrayHandle gradient_x, DLArrayHandle workspace_im2col, const int padding, const int stride, DLStreamHandle stream_handle = NULL) { size_t input_N = gradient_x->shape[0]; size_t input_C = gradient_x->shape[1]; size_t input_H = gradient_x->shape[2]; size_t input_W = gradient_x->shape[3]; size_t filter_outChannel = input_f->shape[0]; size_t filter_inChannel = input_f->shape[1]; size_t filter_H = input_f->shape[2]; size_t filter_W = input_f->shape[3]; size_t output_N = gradient_y->shape[0]; size_t output_C = gradient_y->shape[1]; size_t output_H = gradient_y->shape[2]; size_t output_W = gradient_y->shape[3]; float *gradient_x_data = (float *)gradient_x->data; float *output_data = (float *)gradient_y->data; size_t output_batch = output_N; size_t output_row_size = output_C; size_t output_col_size = output_H * output_W; const float *filter_data = (const float *)input_f->data; size_t filter_row_size = filter_outChannel; size_t filter_col_size = filter_inChannel * filter_H * filter_W; float *gradient_im2col_XX; gradient_im2col_XX = (float *)workspace_im2col->data; // output size (output_N, filter_C * filter_H * filter_W, output_H * // output*W) == (output_batch, filter_col_size, output_col_size) const int BLOCK_SIZE = 16; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((filter_col_size + BLOCK_SIZE - 1) / dimBlock.x, (output_col_size + BLOCK_SIZE - 1) / dimBlock.y); if (stream_handle) transA_gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t *)stream_handle->handle>>>( filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size); else transA_gemm_kernel<<<dimGrid, dimBlock>>>( filter_data, output_data, gradient_im2col_XX, filter_row_size, filter_col_size, output_batch * output_row_size, output_col_size); // get max threads and blocks int dev_id = (input_f->ctx).device_id; cudaSetDevice(dev_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev_id); int threads = deviceProp.maxThreadsPerBlock; int blocks = deviceProp.maxThreadsPerMultiProcessor / threads * deviceProp.multiProcessorCount; // get the gradient of input_x size_t numthread = input_N * input_C * input_H * input_W; size_t numblocks = (numthread + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (stream_handle) array_set_kernel<<<numblocks, THREADS_PER_BLOCK, 0, *(cudaStream_t *)stream_handle->handle>>>( gradient_x_data, 0, numthread); else array_set_kernel<<<numblocks, THREADS_PER_BLOCK>>>(gradient_x_data, 0, numthread); if (stream_handle) trans_im2col_kernel<<<blocks, threads, 0, *(cudaStream_t *)stream_handle->handle>>>( input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks); else trans_im2col_kernel<<<blocks, threads>>>( input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, gradient_x_data, gradient_im2col_XX, padding, stride, blocks); return 0; } int DLGpuConv2d_Gradient_of_Filter(const DLArrayHandle input_x, const DLArrayHandle gradient_y, DLArrayHandle gradient_f, DLArrayHandle workspace_im2col, DLArrayHandle workspace_batch_filter, const int padding, const int stride, DLStreamHandle stream_handle = NULL) { size_t input_N = input_x->shape[0]; size_t input_C = input_x->shape[1]; size_t input_H = input_x->shape[2]; size_t input_W = input_x->shape[3]; size_t filter_outChannel = gradient_f->shape[0]; size_t filter_inChannel = gradient_f->shape[1]; size_t filter_H = gradient_f->shape[2]; size_t filter_W = gradient_f->shape[3]; size_t output_N = gradient_y->shape[0]; size_t output_C = gradient_y->shape[1]; size_t output_H = gradient_y->shape[2]; size_t output_W = gradient_y->shape[3]; const float *input_x_data = (const float *)input_x->data; float *gradient_f_data = (float *)gradient_f->data; float *output_data = (float *)gradient_y->data; size_t output_batch = output_N; size_t output_row_size = output_C; size_t output_col_size = output_H * output_W; size_t filter_row_size = filter_outChannel; size_t filter_col_size = filter_inChannel * filter_H * filter_W; // get max threads and blocks int dev_id = (input_x->ctx).device_id; cudaSetDevice(dev_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev_id); int threads = deviceProp.maxThreadsPerBlock; int blocks = deviceProp.maxThreadsPerMultiProcessor / threads * deviceProp.multiProcessorCount; float *im2col_XX; im2col_XX = (float *)workspace_im2col->data; if (stream_handle) im2col_kernel<<<blocks, threads, 0, *(cudaStream_t *)stream_handle->handle>>>( input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks); else im2col_kernel<<<blocks, threads>>>( input_N, input_C, input_H, input_W, filter_outChannel, filter_H, filter_W, input_x_data, im2col_XX, padding, stride, blocks); size_t im2col_XX_row = filter_col_size; size_t im2col_XX_col = output_col_size; float *batch_filter; // batch_filter = new float[input_N * filter_row_size * filter_col_size]; batch_filter = (float *)(workspace_batch_filter->data); const int BLOCK_SIZE = 16; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid((filter_row_size + BLOCK_SIZE - 1) / dimBlock.x, (filter_col_size + BLOCK_SIZE - 1) / dimBlock.y); if (stream_handle) batch_transB_gemm_kernel<<<dimGrid, dimBlock, 0, *(cudaStream_t *)stream_handle->handle>>>( output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col, output_batch); else batch_transB_gemm_kernel<<<dimGrid, dimBlock>>>( output_data, im2col_XX, batch_filter, output_row_size, output_col_size, im2col_XX_row, im2col_XX_col, output_batch); size_t total = filter_row_size * filter_col_size; while (output_batch != 1) { Float_Add(batch_filter, batch_filter + (output_batch + 1) / 2 * total, output_batch / 2 * total, stream_handle); output_batch = (output_batch + 1) / 2; } size_t BLOCKS = (total + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (stream_handle) float_memory_copy<<<BLOCKS, THREADS_PER_BLOCK, 0, *(cudaStream_t *)stream_handle->handle>>>( gradient_f_data, batch_filter, total); else float_memory_copy<<<BLOCKS, THREADS_PER_BLOCK>>>(gradient_f_data, batch_filter, total); return 0; }
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> template <uint OP_A, bool N64> __global__ void __launch_bounds__(128,6) bst_sgemm_32x64x32_xn( const uint2* __restrict__ Lut, const bhalf* __restrict__ A, const float* __restrict__ B, float* C, uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState, uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut, uint grid_M, uint grid_N, uint magic_N, uint shift_N) { __shared__ float fShare[(33 + 64)*32]; uint2* Lut2s = (uint2*)&fShare[(33 + 64)*32]; char* bShare = (char*)&fShare; uint tid = threadIdx.x; uint idx_MN = blockIdx.x; // compound outer product dims uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N; uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N; uint idx_B = blockIdx.y; // batch dim uint idx_H = blockIdx.z; // head dim // assume lower diagonal and schedule large reductions first if (OP_A == OP_N) idx_M = grid_M - idx_M; // each head can optionally have its own lut Lut += idx_H*szLut; uint2 lut_head = Lut[idx_M]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint txb = tid % 16; uint tyb = tid / 16; if (lut_size > 0) { // prefetch the lut data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; entry.x *= 32*32; // 1024 entries of A per block entry.y *= szHeadState*32; // 32 lines of B per block Lut2s[i] = entry; } __syncthreads(); uint txa = tid % 8; uint tya = tid / 8; uint tid16 = tid & 16; uint tid96 = tid & 96; uint loadB = ((tid / 2) % 8) * 4*4; uint loadA = (tid % 2) * 4*4; // each warp handles a quarter of the weights loadA += tid96; // second half of warp starts 16 rows down loadB += tid16 * 64*4; loadA += tid16 * 32*4; uint storB = (tyb*64 + txb*4) * 4; uint storA; if (OP_A == OP_T) storA = tid * 4*4; else { // Transpose weights on store to shared // Avoid bank conflicts by shifting writes over by 4 every 4 rows (+txa*4) storA = (txa*32*4 + tya + txa*4) * 4; loadA += tid16 * 4; // shift over 4 floats every 4 rows, second half of warp starts 16 rows down } uint b = idx_N*64 + txb*4; uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4; uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b; bool inB = N64 || b < szState; // zero accumulation registers float regC[4][8]; for (int i = 0; i < 4; i++) for (int j = 0; j < 8; j++) regC[i][j] = 0.0f; // Force compiler to fully compute these prior to loop asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(storA) : ); asm("mov.b32 %0, %0;" : "+r"(storB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); int idx_lut = 0; #pragma unroll 1 do { //asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint2 entry = Lut2s[idx_lut]; const bhalf* pA = add_ptr_u(A, entry.x + offsetA); bhalf4 a00 = __ldg((const bhalf4*)(pA + 0*32)); bhalf4 a16 = __ldg((const bhalf4*)(pA + 16*32)); float4 b00 = {0.0f}, b08 = {0.0f}, b16 = {0.0f}, b24 = {0.0f}; entry.y += offsetB; if (inB) { b00 = __ldg((const float4*)(B + (entry.y + 0*szHeadState))); b08 = __ldg((const float4*)(B + (entry.y + 8*szHeadState))); b16 = __ldg((const float4*)(B + (entry.y + 16*szHeadState))); b24 = __ldg((const float4*)(B + (entry.y + 24*szHeadState))); } __syncthreads(); float4 fa00 = to_float(a00); float4 fa16 = to_float(a16); if (OP_A == OP_T) { *(float4*)&bShare[storA + (0*16*32 + 64*32)*4] = fa00; *(float4*)&bShare[storA + (1*16*32 + 64*32)*4] = fa16; } else { // transpose the shared store of W *(float*)&bShare[storA + (0*32 + 0*16 + 64*32)*4] = fa00.x; *(float*)&bShare[storA + (1*32 + 0*16 + 64*32)*4] = fa00.y; *(float*)&bShare[storA + (2*32 + 0*16 + 64*32)*4] = fa00.z; *(float*)&bShare[storA + (3*32 + 0*16 + 64*32)*4] = fa00.w; *(float*)&bShare[storA + (0*32 + 1*16 + 64*32)*4] = fa16.x; *(float*)&bShare[storA + (1*32 + 1*16 + 64*32)*4] = fa16.y; *(float*)&bShare[storA + (2*32 + 1*16 + 64*32)*4] = fa16.z; *(float*)&bShare[storA + (3*32 + 1*16 + 64*32)*4] = fa16.w; } *(float4*)&bShare[storB + 0*64*4] = b00; *(float4*)&bShare[storB + 8*64*4] = b08; *(float4*)&bShare[storB + 16*64*4] = b16; *(float4*)&bShare[storB + 24*64*4] = b24; __syncthreads(); // computes a 32x64x32 gemm tile with 4x8 register blocking float regA[4]; float regB[8]; #pragma unroll for (int j = 0; j < 16; j++) { // fetch outer product data *(float4*)&regA[0] = *(float4*)&bShare[loadA + (32*j + 64*32 + (OP_A == OP_T ? 0 : (j/4)*4))*4]; // shift over 4 floats every 4 rows *(float4*)&regB[0] = *(float4*)&bShare[loadB + (64*j + 0)*4]; *(float4*)&regB[4] = *(float4*)&bShare[loadB + (64*j + 32)*4]; // accumulate outer product for (int i = 0; i < 4; i++) for (int j = 0; j < 8; j++) regC[i][j] += regA[i] * regB[j]; } } while (++idx_lut < lut_size); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :); asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :); asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :); idx_M = div64(idx_MN, magic_N, shift_N); idx_N = idx_MN - idx_M*grid_N; if (OP_A == OP_N) idx_M = grid_M - idx_M; // printf("%3d %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f | %.0f %.0f %.0f %.0f\n", tid, // regC[0][0], regC[0][1], regC[0][2], regC[0][3], // regC[1][0], regC[1][1], regC[1][2], regC[1][3], // regC[2][0], regC[2][1], regC[2][2], regC[2][3], // regC[3][0], regC[3][1], regC[3][2], regC[3][3]); tid16 = tid & 16; tid96 = tid & 96; uint tn = (tid / 2) % 8; uint tm = ((tid % 2) + (tid96 / 16))*4 + (tid16 / 16); bool t16 = tid16 != 0; float outC[2][8]; for (int i = 0; i < 2; i++) for (int j = 0; j < 8; j++) { float swap = t16 ? regC[2*i + 0][j] : regC[2*i + 1][j]; outC[i][j] = t16 ? regC[2*i + 1][j] : regC[2*i + 0][j]; outC[i][j] += shfl_xor(swap, 16); } uint n = idx_N*64 + tn*4; bool bn00 = N64 || n + 0 < szState; bool bn32 = N64 || n + 32 < szState; uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tm)*szHeadState + idx_H*szState + n; store((float4*)(C + (offsetC + szHeadState*0 + 0)), *(float4*)&outC[0][0], 0, bn00); store((float4*)(C + (offsetC + szHeadState*0 + 32)), *(float4*)&outC[0][4], 0, bn32); store((float4*)(C + (offsetC + szHeadState*2 + 0)), *(float4*)&outC[1][0], 0, bn00); store((float4*)(C + (offsetC + szHeadState*2 + 32)), *(float4*)&outC[1][4], 0, bn32); } else { uint c = idx_N*64 + txb*4; uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c; if (N64 || c < szState) { float4 zero = {0.0f}; *(float4*)&C[offsetC + szHeadState* 0] = zero; *(float4*)&C[offsetC + szHeadState* 8] = zero; *(float4*)&C[offsetC + szHeadState*16] = zero; *(float4*)&C[offsetC + szHeadState*24] = zero; } } } template <bool K64> __global__ void __launch_bounds__(256,3) bst_sgemm_32x32x64_nt( const uint2* __restrict__ Lut, const float* __restrict__ A, const float* __restrict__ B, bhalf* C, uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState, uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut, uint loops) { __shared__ float fShare[65*32*2]; char* bShare = (char*)fShare; uint tid = threadIdx.x; uint bid = blockIdx.x; // blockid uint idx_B = blockIdx.y; // batch dim uint idx_H = blockIdx.z; // head dim // each head can optionally have its own lut uint2 lut_head = Lut[idx_H*szLut + bid]; uint tx = tid % 16; uint ty = tid / 16; uint k = tx * 4; uint idx_M = lut_head.x; uint idx_N = lut_head.y; uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*32 + ty)*szHeadState + idx_H*szState + k; uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*32 + ty)*szHeadState + idx_H*szState + k; uint offsetA16 = offsetA00 + szHeadState*16; uint offsetB16 = offsetB00 + szHeadState*16; uint tid224 = tid & 224; // 256 - 32 // avoid bank conflicts when writing transpose (+ tx*2) uint storAB = (tx*32*4 + ty + tx*2)*4; // 32 threads per tile, each tile reads 8 lines, shifted over by 4 uint loadA = (((tid & 16) >> 3) | (tid & 1)) << 4; uint loadB = ((tid >> 1) & 7) << 4; loadA += (tid224 * 32) + (tid224 / 2); // 32*8*4 loadB += (tid224 * 32) + (tid224 / 2); // 32*8*4 // This keeps all prior logic outside of the loops. asm("mov.b32 %0, %0;" : "+r"(storAB) : ); asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); float regC[8][4]; for (int i = 0; i < 8; i++) for (int j = 0; j < 4; j++) regC[i][j] = 0.0f; uint loop = 0; #pragma unroll 1 do { float4 a00 = {0}, a16 = {0}; float4 b00 = {0}, b16 = {0}; if (K64 || k < szState) { a00 = __ldg((const float4*)(add_ptr_u(A, offsetA00))); a16 = __ldg((const float4*)(add_ptr_u(A, offsetA16))); b00 = __ldg((const float4*)(add_ptr_u(B, offsetB00))); b16 = __ldg((const float4*)(add_ptr_u(B, offsetB16))); } offsetA00 += 64; offsetA16 += 64; offsetB00 += 64; offsetB16 += 64; if (!K64) k += 64; __syncthreads(); *(float*)&bShare[storAB + (0*32 + 0 + 0*65*32)*4] = a00.x; *(float*)&bShare[storAB + (1*32 + 0 + 0*65*32)*4] = a00.y; *(float*)&bShare[storAB + (2*32 + 0 + 0*65*32)*4] = a00.z; *(float*)&bShare[storAB + (3*32 + 0 + 0*65*32)*4] = a00.w; *(float*)&bShare[storAB + (0*32 + 16 + 0*65*32)*4] = a16.x; *(float*)&bShare[storAB + (1*32 + 16 + 0*65*32)*4] = a16.y; *(float*)&bShare[storAB + (2*32 + 16 + 0*65*32)*4] = a16.z; *(float*)&bShare[storAB + (3*32 + 16 + 0*65*32)*4] = a16.w; *(float*)&bShare[storAB + (0*32 + 0 + 1*65*32)*4] = b00.x; *(float*)&bShare[storAB + (1*32 + 0 + 1*65*32)*4] = b00.y; *(float*)&bShare[storAB + (2*32 + 0 + 1*65*32)*4] = b00.z; *(float*)&bShare[storAB + (3*32 + 0 + 1*65*32)*4] = b00.w; *(float*)&bShare[storAB + (0*32 + 16 + 1*65*32)*4] = b16.x; *(float*)&bShare[storAB + (1*32 + 16 + 1*65*32)*4] = b16.y; *(float*)&bShare[storAB + (2*32 + 16 + 1*65*32)*4] = b16.z; *(float*)&bShare[storAB + (3*32 + 16 + 1*65*32)*4] = b16.w; __syncthreads(); float regA[8], regB[4]; #pragma unroll for (int j = 0; j < 4; j++) { // fetch outer product data *(float4*)&regA[0] = *(float4*)&bShare[loadA + (32*j + 0)*4]; *(float4*)&regA[4] = *(float4*)&bShare[loadA + (32*j + 16)*4]; *(float4*)&regB[0] = *(float4*)&bShare[loadB + (32*j + 65*32)*4]; for (int i = 0; i < 8; i++) for (int j = 0; j < 4; j++) regC[i][j] += regA[i] * regB[j]; } #pragma unroll for (int j = 4; j < 8; j++) { *(float2*)&regA[0] = *(float2*)&bShare[loadA + (32*j + 0 + (j/4)*2)*4]; *(float2*)&regA[2] = *(float2*)&bShare[loadA + (32*j + 2 + (j/4)*2)*4]; *(float2*)&regA[4] = *(float2*)&bShare[loadA + (32*j + 16 + (j/4)*2)*4]; *(float2*)&regA[6] = *(float2*)&bShare[loadA + (32*j + 18 + (j/4)*2)*4]; *(float2*)&regB[0] = *(float2*)&bShare[loadB + (32*j + 0 + (j/4)*2 + 65*32)*4]; *(float2*)&regB[2] = *(float2*)&bShare[loadB + (32*j + 2 + (j/4)*2 + 65*32)*4]; for (int i = 0; i < 8; i++) for (int j = 0; j < 4; j++) regC[i][j] += regA[i] * regB[j]; } } while (++loop < loops); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :); asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :); //printf("%3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", tid, regC[0][0], regC[0][1], regC[0][2], regC[0][3], regC[4][0], regC[4][1], regC[4][2], regC[4][3]); // if ((tid & 31) == 0) // printf("%3d %.0f\n", tid, regC[0][0]); C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*2; // Arrange 8 tiles horizontally in the X direction: ((tid & 224) >> 1) // Add some spacing to avoid write bank conflicts: (ty << 2) ty = ((tid & 16) >> 3) + (tid & 1); tx = ((tid >> 1) & 7) + ((tid & 224) >> 2) + (ty << 2); uint storC = ty*32*8*4 + tx*4; tx = tid % 16; ty = tid / 16; uint readC = ty*32*8 + tx*2 + ((tid & 192)>>2); __syncthreads(); *(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[0]; *(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[1]; *(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[2]; *(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[3]; __syncthreads(); float2 c2[8]; for (int i = 0; i < 8; i++) c2[i] = *(float2*)&fShare[readC + i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) c2[i] = ew_add(c2[i], c2[i+j]); store((bhalf2*)C, c2[0]); __syncthreads(); *(float4*)&fShare[storC + 0*32*8] = *(float4*)regC[4]; *(float4*)&fShare[storC + 1*32*8] = *(float4*)regC[5]; *(float4*)&fShare[storC + 2*32*8] = *(float4*)regC[6]; *(float4*)&fShare[storC + 3*32*8] = *(float4*)regC[7]; __syncthreads(); for (int i = 0; i < 8; i++) c2[i] = *(float2*)&fShare[readC + i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) c2[i] = ew_add(c2[i], c2[i+j]); store((bhalf2*)(C + 16*32), c2[0]); } bool bst_sgemm_xn(CUstream stream, const uint2* lut, const bhalf* a, const float* b, float* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_b, uint ctx_blks_c, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut) { uint szState = state_dim; uint szHeadState = head_dim * szState; uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState; uint szCtxHeadStateC = ctx_blks_c * block_size * szHeadState; uint szBlocksBlk = blocks * block_size * block_size; uint szHeadBlocksBlk = head_dim * szBlocksBlk; // if just one lut head, broadcast block-sparsity to all heads uint szLut = lut_heads > 1 ? lut_dim : 0; // compound gridDim.x with m and n coords uint gridN = CEIL_DIV(state_dim, 64); uint gridM = ctx_blks_c - 1; uint gridX = ctx_blks_c * gridN; uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry bool n64 = (state_dim & 63) == 0; dim3 grid(gridX, batch_dim, head_dim); if (block_size == 32) { if (op == NN_OP) // NN { if (n64) bst_sgemm_32x64x32_xn<OP_N, true><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift); else bst_sgemm_32x64x32_xn<OP_N,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift); } else // TN { if (n64) bst_sgemm_32x64x32_xn<OP_T, true><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift); else bst_sgemm_32x64x32_xn<OP_T,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift); } } return true; } bool bst_sgemm_nt(CUstream stream, const uint2* lut, const float* a, const float* b, bhalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim) { uint szState = state_dim; uint szHeadState = head_dim * szState; uint szCtxHeadStateA = ctx_blks_a * block_size * szHeadState; uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState; uint szBlocksBlk = blocks * block_size * block_size; uint szHeadBlocksBlk = head_dim * szBlocksBlk; // if just one lut head, broadcast block-sparsity to all heads uint szLut = lut_heads > 1 ? lut_dim : 0; uint loops = CEIL_DIV(state_dim, 64); bool k64 = (state_dim & 63) == 0; dim3 grid(blocks, batch_dim, head_dim); if (block_size == 32) { if (k64) bst_sgemm_32x32x64_nt< true><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops); else bst_sgemm_32x32x64_nt<false><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops); } return true; } #endif // GOOGLE_CUDA
the_stack
#pragma once #include <cuda.h> #include <gunrock/util/cuda_properties.cuh> #include <gunrock/util/vector_types.cuh> namespace gunrock { namespace util { namespace io { /** * Enumeration of data movement cache modifiers. */ namespace ld { enum CacheModifier { NONE, // default (currently ca) cg, // cache global ca, // cache all cs, // cache streaming LIMIT }; } // namespace ld #define CacheModifierToString(modifier) \ (((int)modifier == b40c::util::io::ld::NONE) \ ? "util::io::ld::NONE" \ : ((int)modifier == b40c::util::io::ld::cg) \ ? "util::io::ld::cg" \ : ((int)modifier == b40c::util::io::ld::ca) \ ? "util::io::ld::ca" \ : ((int)modifier == b40c::util::io::ld::cs) \ ? "util::io::ld::cs" \ : ((int)modifier == b40c::util::io::st::NONE) \ ? "util::io::st::NONE" \ : ((int)modifier == b40c::util::io::st::cg) \ ? "util::io::st::cg" \ : ((int)modifier == \ b40c::util::io::st::wb) \ ? "util::io::st::wb" \ : ((int)modifier == \ b40c::util::io::st::cs) \ ? "util::io::st::cs" \ : "<ERROR>") /** * Basic utility for performing modified loads through cache. */ template <ld::CacheModifier CACHE_MODIFIER> struct ModifiedLoad { /* * Load operation we will provide specializations for */ template <typename T> __device__ __forceinline__ static void Ld(T &val, T *ptr); /* * Vec-4 loads for 64-bit types are implemented as two vec-2 loads */ __device__ __forceinline__ static void Ld(double4 &val, double4 *ptr) { ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<double2 *>(&val.x), reinterpret_cast<double2 *>(ptr)); ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<double2 *>(&val.z), reinterpret_cast<double2 *>(ptr) + 1); } __device__ __forceinline__ static void Ld(ulonglong4 &val, ulonglong4 *ptr) { ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<ulonglong2 *>(&val.x), reinterpret_cast<ulonglong2 *>(ptr)); ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<ulonglong2 *>(&val.z), reinterpret_cast<ulonglong2 *>(ptr) + 1); } __device__ __forceinline__ static void Ld(longlong4 &val, longlong4 *ptr) { ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<longlong2 *>(&val.x), reinterpret_cast<longlong2 *>(ptr)); ModifiedLoad<CACHE_MODIFIER>::Ld(*reinterpret_cast<longlong2 *>(&val.z), reinterpret_cast<longlong2 *>(ptr) + 1); } }; #if __CUDA_ARCH__ >= 200 /** * Specialization for NONE modifier */ template <> template <typename T> __device__ __forceinline__ void ModifiedLoad<ld::NONE>::Ld(T &val, T *ptr) { val = *ptr; } /** * Singleton load op */ #define GR_LOAD(base_type, ptx_type, reg_mod, cast_type, modifier) \ template <> \ template <> \ void ModifiedLoad<ld::modifier>::Ld(base_type &val, base_type *ptr) { \ asm volatile("ld.global." #modifier "." #ptx_type " %0, [%1];" \ : "=" #reg_mod(reinterpret_cast<cast_type &>(val)) \ : _GR_ASM_PTR_(ptr)); \ } /** * Vector load ops */ #define GR_LOAD_VEC1(base_type, ptx_type, reg_mod, cast_type, modifier) \ template <> \ template <> \ void ModifiedLoad<ld::modifier>::Ld(base_type &val, base_type *ptr) { \ asm volatile("ld.global." #modifier "." #ptx_type " %0, [%1];" \ : "=" #reg_mod(reinterpret_cast<cast_type &>(val.x)) \ : _GR_ASM_PTR_(ptr)); \ } #define GR_LOAD_VEC2(base_type, ptx_type, reg_mod, cast_type, modifier) \ template <> \ template <> \ void ModifiedLoad<ld::modifier>::Ld(base_type &val, base_type *ptr) { \ asm volatile("ld.global." #modifier ".v2." #ptx_type " {%0, %1}, [%2];" \ : "=" #reg_mod(reinterpret_cast<cast_type &>(val.x)), \ "=" #reg_mod(reinterpret_cast<cast_type &>(val.y)) \ : _GR_ASM_PTR_(ptr)); \ } #define GR_LOAD_VEC4(base_type, ptx_type, reg_mod, cast_type, modifier) \ template <> \ template <> \ void ModifiedLoad<ld::modifier>::Ld(base_type &val, base_type *ptr) { \ asm volatile("ld.global." #modifier ".v4." #ptx_type \ " {%0, %1, %2, %3}, [%4];" \ : "=" #reg_mod(reinterpret_cast<cast_type &>(val.x)), \ "=" #reg_mod(reinterpret_cast<cast_type &>(val.y)), \ "=" #reg_mod(reinterpret_cast<cast_type &>(val.z)), \ "=" #reg_mod(reinterpret_cast<cast_type &>(val.w)) \ : _GR_ASM_PTR_(ptr)); \ } /** * Defines specialized load ops for only the base type */ #define GR_LOAD_BASE(base_type, ptx_type, reg_mod, cast_type) \ GR_LOAD(base_type, ptx_type, reg_mod, cast_type, cg) \ GR_LOAD(base_type, ptx_type, reg_mod, cast_type, ca) \ GR_LOAD(base_type, ptx_type, reg_mod, cast_type, cs) /** * Defines specialized load ops for the base type and for its derivative vec1 * and vec2 types */ #define GR_LOAD_BASE_ONE_TWO(base_type, dest_type, short_type, ptx_type, \ reg_mod, cast_type) \ GR_LOAD_BASE(base_type, ptx_type, reg_mod, cast_type) \ \ GR_LOAD_VEC1(short_type##1, ptx_type, reg_mod, cast_type, cg) \ GR_LOAD_VEC1(short_type##1, ptx_type, reg_mod, cast_type, ca) \ GR_LOAD_VEC1(short_type##1, ptx_type, reg_mod, cast_type, cs) \ \ GR_LOAD_VEC2(short_type##2, ptx_type, reg_mod, cast_type, cg) \ GR_LOAD_VEC2(short_type##2, ptx_type, reg_mod, cast_type, ca) \ GR_LOAD_VEC2(short_type##2, ptx_type, reg_mod, cast_type, cs) /** * Defines specialized load ops for the base type and for its derivative vec1, * vec2, and vec4 types */ #define GR_LOAD_BASE_ONE_TWO_FOUR(base_type, dest_type, short_type, ptx_type, \ reg_mod, cast_type) \ GR_LOAD_BASE_ONE_TWO(base_type, dest_type, short_type, ptx_type, reg_mod, \ cast_type) \ GR_LOAD_VEC4(short_type##4, ptx_type, reg_mod, cast_type, cg) \ GR_LOAD_VEC4(short_type##4, ptx_type, reg_mod, cast_type, ca) \ GR_LOAD_VEC4(short_type##4, ptx_type, reg_mod, cast_type, cs) #if CUDA_VERSION >= 4000 #define GR_REG8 h #define GR_REG16 h #define GR_CAST8 short #else #define GR_REG8 r #define GR_REG16 r #define GR_CAST8 char #endif /** * Define cache-modified loads for all 4-byte (and smaller) structures */ GR_LOAD_BASE_ONE_TWO_FOUR(char, char, char, s8, GR_REG8, GR_CAST8) GR_LOAD_BASE_ONE_TWO_FOUR(short, short, short, s16, GR_REG16, short) GR_LOAD_BASE_ONE_TWO_FOUR(int, int, int, s32, r, int) GR_LOAD_BASE_ONE_TWO_FOUR(unsigned char, unsigned char, uchar, u8, GR_REG8, unsigned GR_CAST8) GR_LOAD_BASE_ONE_TWO_FOUR(unsigned short, unsigned short, ushort, u16, GR_REG16, unsigned short) GR_LOAD_BASE_ONE_TWO_FOUR(unsigned int, unsigned int, uint, u32, r, unsigned int) GR_LOAD_BASE_ONE_TWO_FOUR(float, float, float, f32, f, float) #if !defined(__LP64__) || (__LP64__ == 0) // longs are 64-bit on non-Windows 64-bit compilers GR_LOAD_BASE_ONE_TWO_FOUR(long, long, long, s32, r, long) GR_LOAD_BASE_ONE_TWO_FOUR(unsigned long, unsigned long, ulong, u32, r, unsigned long) #endif GR_LOAD_BASE(signed char, s8, r, unsigned int) // Only need to define base: char2,char4, etc // already defined from char /** * Define cache-modified loads for all 8-byte structures */ GR_LOAD_BASE_ONE_TWO(unsigned long long, unsigned long long, ulonglong, u64, l, unsigned long long) GR_LOAD_BASE_ONE_TWO(long long, long long, longlong, s64, l, long long) GR_LOAD_BASE_ONE_TWO( double, double, double, s64, l, long long) // Cast to 64-bit long long a workaround for the fact that // the 3.x assembler has no register constraint for doubles #if defined(__LP64__) // longs are 64-bit on non-Windows 64-bit compilers GR_LOAD_BASE_ONE_TWO(long, long, long, s64, l, long) GR_LOAD_BASE_ONE_TWO(unsigned long, unsigned long, ulong, u64, l, unsigned long) #endif /** * Undefine macros */ #undef GR_LOAD_VEC1 #undef GR_LOAD_VEC2 #undef GR_LOAD_VEC4 #undef GR_LOAD_BASE #undef GR_LOAD_BASE_ONE_TWO #undef GR_LOAD_BASE_ONE_TWO_FOUR #undef GR_CAST8 #undef GR_REG8 #undef GR_REG16 #else //__CUDA_ARCH__ template <ld::CacheModifier READ_MODIFIER> template <typename T> __device__ __forceinline__ void ModifiedLoad<READ_MODIFIER>::Ld(T &val, T *ptr) { val = *ptr; } #endif //__CUDA_ARCH__ } // namespace io } // namespace util } // namespace gunrock
the_stack
#include "parallax/simpleFlow.hpp" #include "./kernels/patchDifferenceFunction.cu" #include "backend/common/vectorOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "backend/cuda/core1/kernels/samplingKernel.cu" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "gpu/stream.hpp" #include "cuda/error.hpp" #include "cuda/util.hpp" #include "util/imageProcessingGPUUtils.hpp" namespace VideoStitch { namespace Core { #define TILE_WIDTH 16 #define CUDABLOCKSIZE 512 #define SIMPLEFLOW_KERNEL_BLOCK_SIZE_X 16 #define SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y 8 __global__ void forwardFlowKernel(const int flowSize, const int windowSize, const float flowMagnitudeWeight, const float gradientWeight, const int2 size0, const int2 offset0, const uint32_t* input0, const float* gradient0, const int2 size1, const int2 offset1, const uint32_t* input1, const float* gradient1, const float2* inputFlowOffset, float2* flow, float* confidence) { // Check whether we need to calculate the flow uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; uint32_t v0 = input0[y * size0.x + x]; if (Image::RGBA::a(v0) == 0) { // If current alpha is 0, do nothing if (inputFlowOffset) { flow[y * size0.x + x] = inputFlowOffset[y * size0.x + x]; if (confidence) { confidence[y * size0.x + x] = 1; } return; } flow[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (confidence) { confidence[y * size0.x + x] = 0; } return; } int2 coord1 = make_int2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); if (!inRange(coord1, size1)) { flow[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (confidence) { confidence[y * size0.x + x] = 0; } return; } uint32_t v1 = input1[coord1.y * size1.x + coord1.x]; if (Image::RGBA::a(v1) == 0) { flow[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (confidence) { confidence[y * size0.x + x] = 0; } return; } float2 flowOffset = make_float2(0, 0); if (inputFlowOffset) { flowOffset = inputFlowOffset[y * size0.x + x]; } // Try to find the best forward flow here int2 sampleCoord = make_int2(x, y); float bestCost = MAX_INVALID_COST; float2 minFlow = make_float2(flowOffset.x, flowOffset.y); float totalCost = 0; float totalCount = 0; for (int i = -flowSize; i <= flowSize; i++) for (int j = -flowSize; j <= flowSize; j++) { int2 mapCoord = make_int2(flowOffset.x + x + i + offset0.x - offset1.x, flowOffset.y + y + j + offset0.y - offset1.y); if (inRange(mapCoord, size1)) { float cost = getCost(windowSize, gradientWeight, size0, input0, gradient0, sampleCoord, size1, input1, gradient1, mapCoord) + flowMagnitudeWeight * sqrtf(i * i + j * j) / sqrtf(2 * flowSize * flowSize); if (cost < MAX_INVALID_COST) { totalCost += cost; totalCount++; } if (cost < bestCost) { bestCost = cost; minFlow = make_float2(flowOffset.x + i, flowOffset.y + j); } } } flow[y * size0.x + x] = minFlow; if (confidence) { if (bestCost != MAX_INVALID_COST) { int2 mapCoord = make_int2(sampleCoord.x + minFlow.x + offset0.x - offset1.x, sampleCoord.y + minFlow.y + offset0.y - offset1.y); confidence[y * size0.x + x] = (getCUR(windowSize, gradientWeight, size0, input0, gradient0, sampleCoord, size1, input1, gradient1, mapCoord)); // confidence[y * size0.x + x] = totalCost / totalCount - bestCost; } else { confidence[y * size0.x + x] = 0; } } } __global__ void flowAgreementConfidenceKernel(const int flowSize, const int2 size0, const int2 offset0, const float2* flow0, const float* confidence0, const int2 size1, const int2 offset1, const float2* flow1, const float* confidence1, float* flowAgreementConfidence0) { // Check whether we need to calculate the agreement confidence uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; const float2 forwardFlow = flow0[y * size0.x + x]; const float forwardConfidence = confidence0[y * size0.x + x]; const int2 lookupCoord = make_int2(x + forwardFlow.x + offset0.x - offset1.x, y + forwardFlow.y + offset0.y - offset1.y); if (!inRange(lookupCoord, size1)) { flowAgreementConfidence0[y * size0.x + x] = 0; return; } const float2 backwardFlow = flow1[lookupCoord.y * size1.x + lookupCoord.x]; const float backwardConfidence = confidence1[lookupCoord.y * size1.x + lookupCoord.x]; // Check if forward and backward flow agree float normalizedAgreementLength = length(forwardFlow + backwardFlow) / (length(make_float2(2 * flowSize + 1, 2 * flowSize + 1))); flowAgreementConfidence0[y * size0.x + x] = powf(fmaxf(1 - normalizedAgreementLength, 0.0), 3) // * forwardConfidence; * sqrtf(backwardConfidence * forwardConfidence); } __global__ void confidenceTransformKernel(const int width, const int height, const float threshold, const float gamma, const float clampedValue, const float* inputConfidence, float* outputConfidence) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; float inputValue = inputConfidence[y * width + x]; if (inputValue < threshold) { outputConfidence[y * width + x] = 0; } else { outputConfidence[y * width + x] = powf(inputValue, gamma); } } __device__ float getSpacialWeight(const float sigmaSpace, const float x) { return exp(-abs(sigmaSpace) * x * x); } __global__ void confidenceAwareFlowBlurKernel(const bool extrapolation, const int2 size, const int kernelSize, const float sigmaSpace, const float sigmaImage, const float sigmaConfidence, const uint32_t* const inputImage, const float2* const inputFlow, const float* const inputConfidence, float2* const outputFlow) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; if (extrapolation) { float2 inFlow = inputFlow[y * size.x + x]; if (inFlow.x != INVALID_FLOW_VALUE) { outputFlow[y * size.x + x] = inFlow; return; } } // check if the current flow is not valid, then just do nothing float maxDist = kernelSize * 1.4142; uint32_t imageColor; if (sigmaImage) { imageColor = inputImage[y * size.x + x]; } if (!extrapolation) { if (sigmaImage > 0) { if (!Image::RGBA::a(imageColor)) { outputFlow[y * size.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); return; } } } float sumWeight = 0; float2 sumContribution = make_float2(0, 0); if (maxDist == 0) maxDist = 1; for (int dx = -kernelSize; dx <= kernelSize; dx++) { for (int dy = -kernelSize; dy <= kernelSize; dy++) { // Here i came across a neighbor, what he is look like int2 neighborCoord = make_int2(x + dx, y + dy); if (!inRange(neighborCoord, size)) { continue; } float neighborConfidence = 1; if (inputConfidence) { neighborConfidence = inputConfidence[neighborCoord.y * size.x + neighborCoord.x]; } if (neighborConfidence == 0) { continue; } float2 neighborflowOffset = inputFlow[neighborCoord.y * size.x + neighborCoord.x]; if (neighborflowOffset.x == INVALID_FLOW_VALUE) { continue; } float weightImage = 1.0f; if (sigmaImage) { uint32_t imageColorNeighbor = inputImage[neighborCoord.y * size.x + neighborCoord.x]; if (Image::RGBA::a(imageColorNeighbor) > 0 && Image::RGBA::a(imageColor) > 0) { const float sad = abs((float(Image::RGBA::r(imageColorNeighbor)) - Image::RGBA::r(imageColor)) / 255.0) + abs((float(Image::RGBA::g(imageColorNeighbor)) - Image::RGBA::g(imageColor)) / 255.0) + abs((float(Image::RGBA::b(imageColorNeighbor)) - Image::RGBA::b(imageColor)) / 255.0); weightImage = exp(-abs(sad * sad * sigmaImage)); } } // Now calculate the distance between source and target float distSpace = length(make_float2(dx, dy)) / maxDist; float weightSpace = exp(-abs(distSpace * distSpace * sigmaSpace)); // Now i do really look at the neighbor on the other side to see how think is going on there float weight = weightSpace * weightImage * neighborConfidence; sumWeight += weight; sumContribution += weight * neighborflowOffset; } } // If my confidence is high, i would tend to keep mine, don't care about the neighbor's confidence // Here is where to set the weight if (sumWeight == 0) { outputFlow[y * size.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } else { outputFlow[y * size.x + x] = sumContribution / sumWeight; } } __global__ void flowConfidenceKernel(const int windowSize, const float gradientWeight, const int2 size0, const uint32_t* input0, const float* gradient0, const float2* inputFlow, const int2 size1, const uint32_t* input1, const float* gradient1, float* confidence) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 flow = inputFlow[y * size0.x + x]; if (flow.x == INVALID_FLOW_VALUE) { confidence[y * size0.x + x] = 0; } else { int2 mapCoord = make_int2(x + flow.x, y + flow.y); int2 sampleCoord = make_int2(x, y); confidence[y * size0.x + x] = getCUR(windowSize, gradientWeight, size0, input0, gradient0, sampleCoord, size1, input1, gradient1, mapCoord); } } Status SimpleFlow::findForwardFlow(const int flowSize, const int windowSize, const float flowMagnitudeWeight, const float gradientWeight, const int2 size0, const int2 offset0, const GPU::Buffer<const uint32_t> inputBuffer0, const GPU::Buffer<const float> inputGradientBuffer0, const int2 size1, const int2 offset1, const GPU::Buffer<const uint32_t> inputBuffer1, const GPU::Buffer<const float> inputGradientBuffer1, GPU::Buffer<float2> flow, GPU::Buffer<float> confidence, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); forwardFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(flowSize, windowSize, flowMagnitudeWeight, gradientWeight, size0, offset0, inputBuffer0.get(), inputGradientBuffer0.get(), size1, offset1, inputBuffer1.get(), inputGradientBuffer1.get(), 0, flow.get(), confidence.get()); return CUDA_STATUS; } __global__ void offsetCostKernel(const int2 flowOffset, const int flowSize, const float flowMagnitudeWeight, const float gradientWeight, const int2 size0, const int2 offset0, const uint32_t* input0, const float* gradient0, const int2 size1, const int2 offset1, const uint32_t* input1, const float* gradient1, float2* cost) { // Check whether we need to calculate the flow uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; uint32_t v0 = input0[y * size0.x + x]; if (Image::RGBA::a(v0) == 0) { // If current alpha is 0, do nothing cost[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); return; } // Try to find the best forward flow here int2 sampleCoord = make_int2(x, y); int2 mapCoord = make_int2(flowOffset.x + x + offset0.x - offset1.x, flowOffset.y + y + offset0.y - offset1.y); cost[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (inRange(mapCoord, size1)) { float difCost = getCost(0, gradientWeight, size0, input0, gradient0, sampleCoord, size1, input1, gradient1, mapCoord) + flowMagnitudeWeight * sqrtf(flowOffset.x * flowOffset.x + flowOffset.y * flowOffset.y) / sqrtf(2 * flowSize * flowSize); cost[y * size0.x + x] = make_float2(difCost, difCost); } } Status SimpleFlow::findOffsetCost(const int2 flowOffset, const int flowSize, const float flowMagnitudeWeight, const float gradientWeight, const int2 size0, const int2 offset0, const GPU::Buffer<const uint32_t> inputBuffer0, const GPU::Buffer<const float> inputGradientBuffer0, const int2 size1, const int2 offset1, const GPU::Buffer<const uint32_t> inputBuffer1, const GPU::Buffer<const float> inputGradientBuffer1, GPU::Buffer<float2> cost, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); offsetCostKernel<<<dimGrid, dimBlock, 0, stream>>>( flowOffset, flowSize, flowMagnitudeWeight, gradientWeight, size0, offset0, inputBuffer0.get(), inputGradientBuffer0.get(), size1, offset1, inputBuffer1.get(), inputGradientBuffer1.get(), cost.get()); return CUDA_STATUS; } __global__ void updateBestCostKernel(const int2 flowOffset, const int2 size0, const float2* cost, float* bestCost, float2* bestOffset) { // Check whether we need to calculate the flow uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; if (flowOffset.x == 123456) { bestCost[y * size0.x + x] = MAX_INVALID_COST; bestOffset[y * size0.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } if (cost[y * size0.x + x].x >= 0 && cost[y * size0.x + x].x < bestCost[y * size0.x + x]) { bestCost[y * size0.x + x] = cost[y * size0.x + x].x; bestOffset[y * size0.x + x] = make_float2(flowOffset.x, flowOffset.y); } } Status SimpleFlow::updateBestCost(const int2 flowOffset, const int2 size0, const GPU::Buffer<const float2> cost, GPU::Buffer<float> bestCost, GPU::Buffer<float2> bestOffset, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); updateBestCostKernel<<<dimGrid, dimBlock, 0, stream>>>(flowOffset, size0, cost.get(), bestCost.get(), bestOffset.get()); return CUDA_STATUS; } Status SimpleFlow::findBackwardAndForwardFlowAgreementConfidence( const int flowSize, const int2 size0, const int2 offset0, const GPU::Buffer<const float2> flow0, const GPU::Buffer<const float> confidence0, const int2 size1, const int2 offset1, const GPU::Buffer<const float2> flow1, const GPU::Buffer<const float> confidence1, GPU::Buffer<float> flowAgreementConfidence0, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); flowAgreementConfidenceKernel<<<dimGrid, dimBlock, 0, stream>>>(flowSize, size0, offset0, flow0.get(), confidence0.get(), size1, offset1, flow1.get(), confidence1.get(), flowAgreementConfidence0.get()); return CUDA_STATUS; } Status SimpleFlow::performConfidenceTransform(const int width, const int height, const float threshold, const float gamma, const float clampedValue, const GPU::Buffer<const float> inputConfidence, GPU::Buffer<float> outputConfidence, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(width, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(height, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); confidenceTransformKernel<<<dimGrid, dimBlock, 0, stream>>>(width, height, threshold, gamma, clampedValue, inputConfidence.get(), outputConfidence.get()); return CUDA_STATUS; } Status SimpleFlow::findConfidence(const int windowSize, const float gradientWeight, const int2 size0, const GPU::Buffer<const uint32_t> input0, const GPU::Buffer<const float> gradient0, GPU::Buffer<const float2> forwardFlow0, const int2 size1, const GPU::Buffer<const uint32_t> input1, const GPU::Buffer<const float> gradient1, GPU::Buffer<float> confidence, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); flowConfidenceKernel<<<dimGrid, dimBlock, 0, stream>>>(windowSize, gradientWeight, size0, input0.get(), gradient0.get(), forwardFlow0.get(), size1, input1.get(), gradient1.get(), confidence.get()); return CUDA_STATUS; } Status SimpleFlow::performConfidenceAwareFlowInterpolation(const bool extrapolation, const int2 size, const int kernelSize, const float sigmaSpace, const float sigmaImage, const float sigmaConfidence, const GPU::Buffer<const uint32_t> inputImage, const GPU::Buffer<const float2> inputFlow, const GPU::Buffer<const float> inputConfidence, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); if (inputConfidence.wasAllocated()) { confidenceAwareFlowBlurKernel<<<dimGrid, dimBlock, 0, stream>>>( extrapolation, size, kernelSize, sigmaSpace, sigmaImage, sigmaConfidence, inputImage.get(), inputFlow.get(), inputConfidence.get(), outputFlow.get()); } else { confidenceAwareFlowBlurKernel<<<dimGrid, dimBlock, 0, stream>>>(extrapolation, size, kernelSize, sigmaSpace, sigmaImage, sigmaConfidence, inputImage.get(), inputFlow.get(), nullptr, outputFlow.get()); } return CUDA_STATUS; } __global__ void temporalAwareFlowBlurKernel(const bool extrapolation, const int frameId, const int frameCount, const int2 size, const int kernelSize, const float sigmaSpace, const float sigmaImage, const float sigmaTime, const float* const frames, const uint32_t* const inputImages, const float2* const inputFlows, const float* const inputConfidences, float2* const outputFlow) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; int baseOffset = frameId * size.x * size.y; if (extrapolation) { float2 inFlow = inputFlows[baseOffset + y * size.x + x]; if (inFlow.x != INVALID_FLOW_VALUE) { outputFlow[y * size.x + x] = inFlow; return; } } // check if the current flow is not valid, then just do nothing float maxDist = kernelSize * 1.4142; uint32_t imageColor; if (sigmaImage) { imageColor = inputImages[baseOffset + y * size.x + x]; } if (!extrapolation) { if (sigmaImage > 0) { if (!Image::RGBA::a(imageColor)) { outputFlow[y * size.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); return; } } } float sumWeight = 0; float2 sumContribution = make_float2(0, 0); if (maxDist == 0) maxDist = 1; for (int t = 0; t < frameCount; t++) if (frames[t] >= 0) { for (int dx = -kernelSize; dx <= kernelSize; dx++) for (int dy = -kernelSize; dy <= kernelSize; dy++) { const int offset = t * size.x * size.y; // Here i came across a neighbor, what he is look like int2 neighborCoord = make_int2(x + dx, y + dy); if (!inRange(neighborCoord, size)) { continue; } float neighborConfidence = 1; if (inputConfidences) { neighborConfidence = inputConfidences[offset + neighborCoord.y * size.x + neighborCoord.x]; } if (neighborConfidence == 0) { continue; } float2 neighborflowOffset = inputFlows[offset + neighborCoord.y * size.x + neighborCoord.x]; if (neighborflowOffset.x == INVALID_FLOW_VALUE) { continue; } float weightImage = 1.0f; if (sigmaImage) { uint32_t imageColorNeighbor = inputImages[offset + neighborCoord.y * size.x + neighborCoord.x]; if (Image::RGBA::a(imageColorNeighbor) > 0 && Image::RGBA::a(imageColor) > 0) { const float sad = abs((float(Image::RGBA::r(imageColorNeighbor)) - Image::RGBA::r(imageColor)) / 255.0) + abs((float(Image::RGBA::g(imageColorNeighbor)) - Image::RGBA::g(imageColor)) / 255.0) + abs((float(Image::RGBA::b(imageColorNeighbor)) - Image::RGBA::b(imageColor)) / 255.0); weightImage = exp(-abs(sad * sad * sigmaImage)); } } // Now calculate the distance of time float distTime = float(frames[t] - frames[frameId]) / frameCount; float weightTime = exp(-abs(distTime * distTime * sigmaTime)); // Now calculate the distance between source and target float distSpace = length(make_float2(dx, dy)) / maxDist; float weightSpace = exp(-abs(distSpace * distSpace * sigmaSpace)); // Now i do really look at the neighbor on the other side to see how think is going on there float weight = weightSpace * weightImage * weightTime * neighborConfidence; sumWeight += weight; sumContribution += weight * neighborflowOffset; } } // If my confidence is high, i would tend to keep mine, don't care about the neighbor's confidence // Here is where to set the weight if (sumWeight == 0) { outputFlow[y * size.x + x] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } else { outputFlow[y * size.x + x] = sumContribution / sumWeight; } } Status SimpleFlow::performTemporalAwareFlowInterpolation( const bool extrapolation, const frameid_t frameId, const int2 size, const int kernelSize, const float sigmaSpace, const float sigmaImage, const float sigmaTime, const GPU::Buffer<const float> frames, const GPU::Buffer<const uint32_t> inputImages, const GPU::Buffer<const float2> inputFlows, const GPU::Buffer<const float> inputConfidences, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); const int frameCount = (int)frames.numElements(); temporalAwareFlowBlurKernel<<<dimGrid, dimBlock, 0, stream>>>( extrapolation, frameId, frameCount, size, kernelSize, sigmaSpace, sigmaImage, sigmaTime, frames.get(), inputImages.get(), inputFlows.get(), inputConfidences.get(), outputFlow.get()); return CUDA_STATUS; } Status SimpleFlow::performFlowJittering(const int jitterSize, const int windowSize, const float flowMagnitudeWeight, const float gradientWeight, const int2 size0, const int2 offset0, const GPU::Buffer<const uint32_t> inputBuffer0, const GPU::Buffer<const float> inputGradientBuffer0, const int2 size1, const int2 offset1, const GPU::Buffer<const uint32_t> inputBuffer1, const GPU::Buffer<const float> inputGradientBuffer1, const GPU::Buffer<const float2> inputFlow, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(SIMPLEFLOW_KERNEL_BLOCK_SIZE_X, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, SIMPLEFLOW_KERNEL_BLOCK_SIZE_X), (unsigned)Cuda::ceilDiv(size0.y, SIMPLEFLOW_KERNEL_BLOCK_SIZE_Y), 1); forwardFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(jitterSize, windowSize, flowMagnitudeWeight, gradientWeight, size0, offset0, inputBuffer0.get(), inputGradientBuffer0.get(), size1, offset1, inputBuffer1.get(), inputGradientBuffer1.get(), inputFlow.get(), outputFlow.get(), 0); return CUDA_STATUS; } } // namespace Core } // namespace VideoStitch
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3, int outputStride0, int outputStride1, int outputStride2, int outputStride3, const int dimSize, const int concatDim, int linearIndex) { int offset = 0; int curDimSize = 3 == concatDim ? dimSize : outputSize3; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStride3; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 2 == concatDim ? dimSize : outputSize2; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride2; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 1 == concatDim ? dimSize : outputSize1; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride1; offset += curDimOffset; linearIndex = nextDimIndex; return offset + linearIndex * outputStride0; // for (int i = 3; i >= 1; i--) { // int curDimSize = i == concatDim ? dimSize : outputSize[i]; // int nextDimIndex = linearIndex / curDimSize; // int curDimIndex = linearIndex - curDimSize * nextDimIndex; // int curDimOffset = curDimIndex * outputStride[i]; // offset += curDimOffset; // linearIndex = nextDimIndex; // } // return offset + linearIndex * outputStride[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; tid += stride; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; tid += stride; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } // From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh // Result of div/mod operation stored together. template <typename Value> struct DivMod { Value div, mod; __host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { } }; // Base case: we only have an implementation for uint32_t for now. For // everything else, we use plain division. template <typename Value> struct IntDivider { IntDivider() { } // Dummy constructor for arrays. IntDivider(Value d) : divisor(d) { } __host__ __device__ inline Value div(Value n) const { return n / divisor; } __host__ __device__ inline Value mod(Value n) const { return n % divisor; } __host__ __device__ inline DivMod<Value> divmod(Value n) const { return DivMod<Value>(n / divisor, n % divisor); } Value divisor; }; // Implement fast integer division. template <> struct IntDivider<unsigned int> { static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); IntDivider() { } // Dummy constructor for arrays. IntDivider(unsigned int d) : divisor(d) { assert(divisor >= 1 && divisor <= INT32_MAX); // TODO: gcc/clang has __builtin_clz() but it's not portable. for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; uint64_t one = 1; uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; m1 = magic; assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. } __host__ __device__ inline unsigned int div(unsigned int n) const { #ifdef __CUDA_ARCH__ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and // 'm1'. unsigned int t = __umulhi(n, m1); return (t + n) >> shift; #else // Using uint64_t so that the addition does not overflow. uint64_t t = ((uint64_t) n * m1) >> 32; return (t + n) >> shift; #endif } __host__ __device__ inline unsigned int mod(unsigned int n) const { return n - div(n) * divisor; } __host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const { unsigned int q = div(n); return DivMod<unsigned int>(q, n - q * divisor); } unsigned int divisor; // d above. unsigned int m1; // Magic number: m' above. unsigned int shift; // Shift amounts. }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh /// OffsetCalculator calculates the offset in bytes of a linear index for NARGS /// operands that share the same shape, but may have different strides. template <int NARGS> struct OffsetCalculator { static constexpr int MAX_DIMS = 25; // The offset for each argument (in bytes). Wrapper around fixed-size array. struct offsets_t { __host__ __device__ uint32_t& operator[](int idx) { return values[idx]; } uint32_t values[NARGS]; }; // OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) { OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) { for (int i = 0; i < MAX_DIMS; ++i) { if (i < dims) { sizes_[i] = IntDivider<uint32_t>(sizes[i]); } else { sizes_[i] = IntDivider<uint32_t>(1); } for (int arg = 0; arg < NARGS; arg++) { strides_[i][arg] = i < dims ? strides[arg][i] : 0; } } } __host__ __device__ offsets_t get(uint32_t linear_idx) const { offsets_t offsets; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] = 0; } #pragma unroll for (int dim = 0; dim < MAX_DIMS; ++dim) { if (dim == dims) { break; } auto divmod = sizes_[dim].divmod(linear_idx); linear_idx = divmod.div; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] += divmod.mod * strides_[dim][arg]; } } return offsets; } void print() { for (auto i = 1; i < 128; i++) { auto offsets = get(i); printf("offsets[%d]: ", i); for (auto arg = 0; arg < NARGS; arg++) { printf("%d ", offsets[arg]); } printf("\n"); } } int dims; IntDivider<uint32_t> sizes_[MAX_DIMS]; uint32_t strides_[MAX_DIMS][NARGS]; }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh template<int nt, int vt, typename func_t> __launch_bounds__(nt, 4) __global__ void elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f); } template<typename func_t> void gpu_unary_kernel(float *res, float *x, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<2> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in = &x[offsets[1]]; *out = f(*in); }); } template<typename func_t> void gpu_binary_kernel(float *res, float *x, float *y, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<3> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in1 = &x[offsets[1]]; float* in2 = &y[offsets[2]]; *out = f(*in1, *in2); }); } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); int32_t x7 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0); int64_t x8 = fsize(x7); int64_t x10 = x8 / 3073LL; int32_t x11 = (int32_t)x10; int32_t x12 = x11 * 3072; float* x13 = (float*)myMalloc(x12 * sizeof(float));; int* x14 = (int32_t*)myMalloc(x11 * sizeof(int32_t));; char* x9 = (char*)mmap(0, x8, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x7, 0); for(int x16=0; x16 < x11; x16++) { int32_t x17 = x16 * 3073; char x18 = x9[x17]; int32_t x19 = (int32_t)(unsigned char)x18; x14[x16] = x19; int32_t x25 = x17 + 1; int32_t x23 = x16 * 3072; for(int x22=0; x22 < 3072; x22++) { int32_t x26 = x25 + x22; char x27 = x9[x26]; int32_t x24 = x23 + x22; float x28 = (float)(unsigned char)x27; float x29 = x28 / 255.0f; x13[x24] = x29; } } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x37 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x38 = (float)x37; float x39 = x38 / 1000000.0f; printf("Data reading in %lf sec\n",x39); // Tensor 'toGPU' invocation. float* x98 = (float*)myGpuMalloc(32768 * sizeof(float)); int32_t x41 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/squeezenet/squeezenetCifar10.onnx.bin",0); int64_t x42 = fsize(x41); float* x43 = (float*)mmap(0, x42, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x41, 0); float* x45 = x43+526720; CUDA_CALL(cudaMemcpy(x98, x45, 32768 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x101 = (float*)myGpuMalloc(48 * sizeof(float)); float* x46 = x43+245136; CUDA_CALL(cudaMemcpy(x101, x46, 48 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x104 = (float*)myGpuMalloc(64 * sizeof(float)); float* x47 = x43+17696; CUDA_CALL(cudaMemcpy(x104, x47, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x107 = (float*)myGpuMalloc(81920 * sizeof(float)); float* x48 = x43+723904; CUDA_CALL(cudaMemcpy(x107, x48, 81920 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x110 = (float*)myGpuMalloc(64 * sizeof(float)); float* x49 = x43+14544; CUDA_CALL(cudaMemcpy(x110, x49, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x113 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x50 = x43+35392; CUDA_CALL(cudaMemcpy(x113, x50, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x116 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x51 = x43+80608; CUDA_CALL(cudaMemcpy(x116, x51, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x119 = (float*)myGpuMalloc(16 * sizeof(float)); float* x52 = x43+4224; CUDA_CALL(cudaMemcpy(x119, x52, 16 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x122 = (float*)myGpuMalloc(64 * sizeof(float)); float* x53 = x43+362304; CUDA_CALL(cudaMemcpy(x122, x53, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x125 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x54 = x43+27040; CUDA_CALL(cudaMemcpy(x125, x54, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x128 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x55 = x43+16672; CUDA_CALL(cudaMemcpy(x128, x55, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x131 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x56 = x43+14608; CUDA_CALL(cudaMemcpy(x131, x56, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x134 = (float*)myGpuMalloc(256 * sizeof(float)); float* x57 = x43+526464; CUDA_CALL(cudaMemcpy(x134, x57, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x137 = (float*)myGpuMalloc(18432 * sizeof(float)); float* x58 = x43+226704; CUDA_CALL(cudaMemcpy(x137, x58, 18432 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x140 = (float*)myGpuMalloc(32 * sizeof(float)); float* x59 = x43+80576; CUDA_CALL(cudaMemcpy(x140, x59, 32 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x143 = (float*)myGpuMalloc(128 * sizeof(float)); float* x60 = x43+121696; CUDA_CALL(cudaMemcpy(x143, x60, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x146 = (float*)myGpuMalloc(256 * sizeof(float)); float* x61 = x43+723648; CUDA_CALL(cudaMemcpy(x146, x61, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x149 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x62 = x43+254592; CUDA_CALL(cudaMemcpy(x149, x62, 82944 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x152 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x63 = x43+17760; CUDA_CALL(cudaMemcpy(x152, x63, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x155 = (float*)myGpuMalloc(64 * sizeof(float)); float* x64 = x43+559488; CUDA_CALL(cudaMemcpy(x155, x64, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x158 = (float*)myGpuMalloc(128 * sizeof(float)); float* x65 = x43+84704; CUDA_CALL(cudaMemcpy(x158, x65, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x161 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x66 = x43+245184; CUDA_CALL(cudaMemcpy(x161, x66, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x164 = (float*)myGpuMalloc(32 * sizeof(float)); float* x67 = x43+31136; CUDA_CALL(cudaMemcpy(x164, x67, 32 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x167 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x68 = x43+4240; CUDA_CALL(cudaMemcpy(x167, x68, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x170 = (float*)myGpuMalloc(16 * sizeof(float)); float* x69 = x43+16656; CUDA_CALL(cudaMemcpy(x170, x69, 16 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x173 = (float*)myGpuMalloc(256 * sizeof(float)); float* x70 = x43+575936; CUDA_CALL(cudaMemcpy(x173, x70, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x176 = (float*)myGpuMalloc(8192 * sizeof(float)); float* x71 = x43+72384; CUDA_CALL(cudaMemcpy(x176, x71, 8192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x179 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x72 = x43+379008; CUDA_CALL(cudaMemcpy(x179, x72, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x182 = (float*)myGpuMalloc(192 * sizeof(float)); float* x73 = x43+226512; CUDA_CALL(cudaMemcpy(x182, x73, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x185 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x74 = x43+576192; CUDA_CALL(cudaMemcpy(x185, x74, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x188 = (float*)myGpuMalloc(64 * sizeof(float)); float* x75 = x43+5264; CUDA_CALL(cudaMemcpy(x188, x75, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x191 = (float*)myGpuMalloc(192 * sizeof(float)); float* x76 = x43+254400; CUDA_CALL(cudaMemcpy(x191, x76, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x194 = (float*)myGpuMalloc(2592 * sizeof(float)); float* x77 = x43+0; CUDA_CALL(cudaMemcpy(x194, x77, 2592 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x197 = (float*)myGpuMalloc(24576 * sizeof(float)); float* x78 = x43+337728; CUDA_CALL(cudaMemcpy(x197, x78, 24576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x200 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x79 = x43+31168; CUDA_CALL(cudaMemcpy(x200, x79, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x203 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x80 = x43+84832; CUDA_CALL(cudaMemcpy(x203, x80, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x206 = (float*)myGpuMalloc(64 * sizeof(float)); float* x81 = x43+26976; CUDA_CALL(cudaMemcpy(x206, x81, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x209 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x82 = x43+559552; CUDA_CALL(cudaMemcpy(x209, x82, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x212 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x83 = x43+143568; CUDA_CALL(cudaMemcpy(x212, x83, 82944 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x215 = (float*)myGpuMalloc(256 * sizeof(float)); float* x84 = x43+378752; CUDA_CALL(cudaMemcpy(x215, x84, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x218 = (float*)myGpuMalloc(128 * sizeof(float)); float* x85 = x43+72256; CUDA_CALL(cudaMemcpy(x218, x85, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x221 = (float*)myGpuMalloc(12288 * sizeof(float)); float* x86 = x43+121824; CUDA_CALL(cudaMemcpy(x221, x86, 12288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x224 = (float*)myGpuMalloc(96 * sizeof(float)); float* x87 = x43+2592; CUDA_CALL(cudaMemcpy(x224, x87, 96 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x227 = (float*)myGpuMalloc(192 * sizeof(float)); float* x88 = x43+337536; CUDA_CALL(cudaMemcpy(x227, x88, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x230 = (float*)myGpuMalloc(128 * sizeof(float)); float* x89 = x43+35264; CUDA_CALL(cudaMemcpy(x230, x89, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x233 = (float*)myGpuMalloc(192 * sizeof(float)); float* x90 = x43+143376; CUDA_CALL(cudaMemcpy(x233, x90, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x236 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x91 = x43+5328; CUDA_CALL(cudaMemcpy(x236, x91, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x239 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x92 = x43+134160; CUDA_CALL(cudaMemcpy(x239, x92, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x242 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x93 = x43+362368; CUDA_CALL(cudaMemcpy(x242, x93, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x245 = (float*)myGpuMalloc(1536 * sizeof(float)); float* x94 = x43+2688; CUDA_CALL(cudaMemcpy(x245, x94, 1536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x248 = (float*)myGpuMalloc(10 * sizeof(float)); float* x95 = x43+805824; CUDA_CALL(cudaMemcpy(x248, x95, 10 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x251 = (float*)myGpuMalloc(48 * sizeof(float)); float* x96 = x43+134112; CUDA_CALL(cudaMemcpy(x251, x96, 48 * sizeof(float), cudaMemcpyHostToDevice)); float* x253 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x254 = (float*)myGpuMalloc(48 * sizeof(float)); float* x255 = (float*)myGpuMalloc(64 * sizeof(float)); float* x256 = (float*)myGpuMalloc(81920 * sizeof(float)); float* x257 = (float*)myGpuMalloc(64 * sizeof(float)); float* x258 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x259 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x260 = (float*)myGpuMalloc(16 * sizeof(float)); float* x261 = (float*)myGpuMalloc(64 * sizeof(float)); float* x262 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x263 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x264 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x265 = (float*)myGpuMalloc(256 * sizeof(float)); float* x266 = (float*)myGpuMalloc(18432 * sizeof(float)); float* x267 = (float*)myGpuMalloc(32 * sizeof(float)); float* x268 = (float*)myGpuMalloc(128 * sizeof(float)); float* x269 = (float*)myGpuMalloc(256 * sizeof(float)); float* x270 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x271 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x272 = (float*)myGpuMalloc(64 * sizeof(float)); float* x273 = (float*)myGpuMalloc(128 * sizeof(float)); float* x274 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x275 = (float*)myGpuMalloc(32 * sizeof(float)); float* x276 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x277 = (float*)myGpuMalloc(16 * sizeof(float)); float* x278 = (float*)myGpuMalloc(256 * sizeof(float)); float* x279 = (float*)myGpuMalloc(8192 * sizeof(float)); float* x280 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x281 = (float*)myGpuMalloc(192 * sizeof(float)); float* x282 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x283 = (float*)myGpuMalloc(64 * sizeof(float)); float* x284 = (float*)myGpuMalloc(192 * sizeof(float)); float* x285 = (float*)myGpuMalloc(2592 * sizeof(float)); float* x286 = (float*)myGpuMalloc(24576 * sizeof(float)); float* x287 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x288 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x289 = (float*)myGpuMalloc(64 * sizeof(float)); float* x290 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x291 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x292 = (float*)myGpuMalloc(256 * sizeof(float)); float* x293 = (float*)myGpuMalloc(128 * sizeof(float)); float* x294 = (float*)myGpuMalloc(12288 * sizeof(float)); float* x295 = (float*)myGpuMalloc(96 * sizeof(float)); float* x296 = (float*)myGpuMalloc(192 * sizeof(float)); float* x297 = (float*)myGpuMalloc(128 * sizeof(float)); float* x298 = (float*)myGpuMalloc(192 * sizeof(float)); float* x299 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x300 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x301 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x302 = (float*)myGpuMalloc(1536 * sizeof(float)); float* x303 = (float*)myGpuMalloc(10 * sizeof(float)); float* x304 = (float*)myGpuMalloc(48 * sizeof(float)); double* x305 = (double*)myMalloc(4 * sizeof(double));; double* x306 = (double*)myMalloc(4 * sizeof(double));; int64_t x307 = (long)mallocAddr; int64_t x308 = (long)gpuMallocAddr; // training loop starts here int32_t x319 = x11 / 64; int32_t x336 = 31 / 1; int32_t x337 = x336 + 1; int32_t x341 = 6144 * x337; int32_t x342 = x341 * x337; int32_t x364 = x337 - 2; int32_t x365 = x364 / 2; int32_t x366 = x365 + 1; int32_t x370 = 6144 * x366; int32_t x371 = x370 * x366; bool x375 = x366 >= 1; bool x376; if (x375) { x376 = x375; } else { x376 = false; } int32_t x381 = x365 / 1; int32_t x382 = x381 + 1; int32_t x386 = 1024 * x382; int32_t x387 = x386 * x382; bool x405 = x382 >= 1; bool x406; if (x405) { x406 = x405; } else { x406 = false; } int32_t x411 = x381 / 1; int32_t x412 = x411 + 1; int32_t x416 = 4096 * x412; int32_t x417 = x416 * x412; int32_t x435 = x382 + 2; bool x436 = x435 >= 3; bool x437; if (x436) { x437 = x436; } else { x437 = false; } int32_t x442 = x435 - 3; int32_t x443 = x442 / 1; int32_t x444 = x443 + 1; int32_t x448 = 4096 * x444; int32_t x449 = x448 * x444; bool x467 = true || false; bool x469; if (x467) { bool x468 = true || true; x469 = x468; } else { x469 = false; } bool x472; if (x469) { bool x470 = x444 == x412; bool x471 = x470 || false; x472 = x471; } else { x472 = false; } bool x473; if (x472) { bool x470 = x444 == x412; bool x471 = x470 || false; x473 = x471; } else { x473 = false; } int32_t x482 = 8192 * x412; int32_t x483 = x482 * x412; int32_t x413 = x412 * x412; int32_t x414 = 64 * x413; int32_t x415 = 64 * x414; int32_t x445 = x444 * x444; int32_t x446 = 64 * x445; int32_t x447 = 64 * x446; int32_t x480 = 128 * x413; bool x487 = x412 >= 1; bool x488; if (x487) { x488 = x487; } else { x488 = false; } int32_t x493 = x411 / 1; int32_t x494 = x493 + 1; int32_t x498 = 1024 * x494; int32_t x499 = x498 * x494; bool x517 = x494 >= 1; bool x518; if (x517) { x518 = x517; } else { x518 = false; } int32_t x523 = x493 / 1; int32_t x524 = x523 + 1; int32_t x528 = 4096 * x524; int32_t x529 = x528 * x524; int32_t x547 = x494 + 2; bool x548 = x547 >= 3; bool x549; if (x548) { x549 = x548; } else { x549 = false; } int32_t x554 = x547 - 3; int32_t x555 = x554 / 1; int32_t x556 = x555 + 1; int32_t x560 = 4096 * x556; int32_t x561 = x560 * x556; bool x581; if (x469) { bool x579 = x556 == x524; bool x580 = x579 || false; x581 = x580; } else { x581 = false; } bool x582; if (x581) { bool x579 = x556 == x524; bool x580 = x579 || false; x582 = x580; } else { x582 = false; } int32_t x591 = 8192 * x524; int32_t x592 = x591 * x524; int32_t x525 = x524 * x524; int32_t x526 = 64 * x525; int32_t x527 = 64 * x526; int32_t x557 = x556 * x556; int32_t x558 = 64 * x557; int32_t x559 = 64 * x558; int32_t x589 = 128 * x525; bool x596 = x524 >= 1; bool x597; if (x596) { x597 = x596; } else { x597 = false; } int32_t x602 = x523 / 1; int32_t x603 = x602 + 1; int32_t x607 = 2048 * x603; int32_t x608 = x607 * x603; bool x626 = x603 >= 1; bool x627; if (x626) { x627 = x626; } else { x627 = false; } int32_t x632 = x602 / 1; int32_t x633 = x632 + 1; int32_t x637 = 8192 * x633; int32_t x638 = x637 * x633; int32_t x656 = x603 + 2; bool x657 = x656 >= 3; bool x658; if (x657) { x658 = x657; } else { x658 = false; } int32_t x663 = x656 - 3; int32_t x664 = x663 / 1; int32_t x665 = x664 + 1; int32_t x669 = 8192 * x665; int32_t x670 = x669 * x665; bool x690; if (x469) { bool x688 = x665 == x633; bool x689 = x688 || false; x690 = x689; } else { x690 = false; } bool x691; if (x690) { bool x688 = x665 == x633; bool x689 = x688 || false; x691 = x689; } else { x691 = false; } int32_t x700 = 16384 * x633; int32_t x701 = x700 * x633; int32_t x634 = x633 * x633; int32_t x635 = 128 * x634; int32_t x636 = 64 * x635; int32_t x666 = x665 * x665; int32_t x667 = 128 * x666; int32_t x668 = 64 * x667; int32_t x698 = 256 * x634; int32_t x709 = x633 - 2; int32_t x710 = x709 / 2; int32_t x711 = x710 + 1; int32_t x715 = 16384 * x711; int32_t x716 = x715 * x711; bool x720 = x711 >= 1; bool x721; if (x720) { x721 = x720; } else { x721 = false; } int32_t x726 = x710 / 1; int32_t x727 = x726 + 1; int32_t x731 = 2048 * x727; int32_t x732 = x731 * x727; bool x750 = x727 >= 1; bool x751; if (x750) { x751 = x750; } else { x751 = false; } int32_t x756 = x726 / 1; int32_t x757 = x756 + 1; int32_t x761 = 8192 * x757; int32_t x762 = x761 * x757; int32_t x780 = x727 + 2; bool x781 = x780 >= 3; bool x782; if (x781) { x782 = x781; } else { x782 = false; } int32_t x787 = x780 - 3; int32_t x788 = x787 / 1; int32_t x789 = x788 + 1; int32_t x793 = 8192 * x789; int32_t x794 = x793 * x789; bool x814; if (x469) { bool x812 = x789 == x757; bool x813 = x812 || false; x814 = x813; } else { x814 = false; } bool x815; if (x814) { bool x812 = x789 == x757; bool x813 = x812 || false; x815 = x813; } else { x815 = false; } int32_t x824 = 16384 * x757; int32_t x825 = x824 * x757; int32_t x758 = x757 * x757; int32_t x759 = 128 * x758; int32_t x760 = 64 * x759; int32_t x790 = x789 * x789; int32_t x791 = 128 * x790; int32_t x792 = 64 * x791; int32_t x822 = 256 * x758; bool x829 = x757 >= 1; bool x830; if (x829) { x830 = x829; } else { x830 = false; } int32_t x835 = x756 / 1; int32_t x836 = x835 + 1; int32_t x840 = 3072 * x836; int32_t x841 = x840 * x836; bool x859 = x836 >= 1; bool x860; if (x859) { x860 = x859; } else { x860 = false; } int32_t x865 = x835 / 1; int32_t x866 = x865 + 1; int32_t x870 = 12288 * x866; int32_t x871 = x870 * x866; int32_t x889 = x836 + 2; bool x890 = x889 >= 3; bool x891; if (x890) { x891 = x890; } else { x891 = false; } int32_t x896 = x889 - 3; int32_t x897 = x896 / 1; int32_t x898 = x897 + 1; int32_t x902 = 12288 * x898; int32_t x903 = x902 * x898; bool x923; if (x469) { bool x921 = x898 == x866; bool x922 = x921 || false; x923 = x922; } else { x923 = false; } bool x924; if (x923) { bool x921 = x898 == x866; bool x922 = x921 || false; x924 = x922; } else { x924 = false; } int32_t x933 = 24576 * x866; int32_t x934 = x933 * x866; int32_t x867 = x866 * x866; int32_t x868 = 192 * x867; int32_t x869 = 64 * x868; int32_t x899 = x898 * x898; int32_t x900 = 192 * x899; int32_t x901 = 64 * x900; int32_t x931 = 384 * x867; bool x938 = x866 >= 1; bool x939; if (x938) { x939 = x938; } else { x939 = false; } int32_t x944 = x865 / 1; int32_t x945 = x944 + 1; int32_t x949 = 3072 * x945; int32_t x950 = x949 * x945; bool x968 = x945 >= 1; bool x969; if (x968) { x969 = x968; } else { x969 = false; } int32_t x974 = x944 / 1; int32_t x975 = x974 + 1; int32_t x979 = 12288 * x975; int32_t x980 = x979 * x975; int32_t x998 = x945 + 2; bool x999 = x998 >= 3; bool x1000; if (x999) { x1000 = x999; } else { x1000 = false; } int32_t x1005 = x998 - 3; int32_t x1006 = x1005 / 1; int32_t x1007 = x1006 + 1; int32_t x1011 = 12288 * x1007; int32_t x1012 = x1011 * x1007; bool x1032; if (x469) { bool x1030 = x1007 == x975; bool x1031 = x1030 || false; x1032 = x1031; } else { x1032 = false; } bool x1033; if (x1032) { bool x1030 = x1007 == x975; bool x1031 = x1030 || false; x1033 = x1031; } else { x1033 = false; } int32_t x1042 = 24576 * x975; int32_t x1043 = x1042 * x975; int32_t x976 = x975 * x975; int32_t x977 = 192 * x976; int32_t x978 = 64 * x977; int32_t x1008 = x1007 * x1007; int32_t x1009 = 192 * x1008; int32_t x1010 = 64 * x1009; int32_t x1040 = 384 * x976; bool x1047 = x975 >= 1; bool x1048; if (x1047) { x1048 = x1047; } else { x1048 = false; } int32_t x1053 = x974 / 1; int32_t x1054 = x1053 + 1; int32_t x1058 = 4096 * x1054; int32_t x1059 = x1058 * x1054; bool x1077 = x1054 >= 1; bool x1078; if (x1077) { x1078 = x1077; } else { x1078 = false; } int32_t x1083 = x1053 / 1; int32_t x1084 = x1083 + 1; int32_t x1088 = 16384 * x1084; int32_t x1089 = x1088 * x1084; int32_t x1107 = x1054 + 2; bool x1108 = x1107 >= 3; bool x1109; if (x1108) { x1109 = x1108; } else { x1109 = false; } int32_t x1114 = x1107 - 3; int32_t x1115 = x1114 / 1; int32_t x1116 = x1115 + 1; int32_t x1120 = 16384 * x1116; int32_t x1121 = x1120 * x1116; bool x1141; if (x469) { bool x1139 = x1116 == x1084; bool x1140 = x1139 || false; x1141 = x1140; } else { x1141 = false; } bool x1142; if (x1141) { bool x1139 = x1116 == x1084; bool x1140 = x1139 || false; x1142 = x1140; } else { x1142 = false; } int32_t x1151 = 32768 * x1084; int32_t x1152 = x1151 * x1084; int32_t x1085 = x1084 * x1084; int32_t x1086 = 256 * x1085; int32_t x1087 = 64 * x1086; int32_t x1117 = x1116 * x1116; int32_t x1118 = 256 * x1117; int32_t x1119 = 64 * x1118; int32_t x1149 = 512 * x1085; int32_t x1160 = x1084 - 2; int32_t x1161 = x1160 / 2; int32_t x1162 = x1161 + 1; int32_t x1166 = 32768 * x1162; int32_t x1167 = x1166 * x1162; bool x1171 = x1162 >= 1; bool x1172; if (x1171) { x1172 = x1171; } else { x1172 = false; } int32_t x1177 = x1161 / 1; int32_t x1178 = x1177 + 1; int32_t x1182 = 4096 * x1178; int32_t x1183 = x1182 * x1178; bool x1201 = x1178 >= 1; bool x1202; if (x1201) { x1202 = x1201; } else { x1202 = false; } int32_t x1207 = x1177 / 1; int32_t x1208 = x1207 + 1; int32_t x1212 = 16384 * x1208; int32_t x1213 = x1212 * x1208; int32_t x1231 = x1178 + 2; bool x1232 = x1231 >= 3; bool x1233; if (x1232) { x1233 = x1232; } else { x1233 = false; } int32_t x1238 = x1231 - 3; int32_t x1239 = x1238 / 1; int32_t x1240 = x1239 + 1; int32_t x1244 = 16384 * x1240; int32_t x1245 = x1244 * x1240; bool x1265; if (x469) { bool x1263 = x1240 == x1208; bool x1264 = x1263 || false; x1265 = x1264; } else { x1265 = false; } bool x1266; if (x1265) { bool x1263 = x1240 == x1208; bool x1264 = x1263 || false; x1266 = x1264; } else { x1266 = false; } int32_t x1275 = 32768 * x1208; int32_t x1276 = x1275 * x1208; int32_t x1209 = x1208 * x1208; int32_t x1210 = 256 * x1209; int32_t x1211 = 64 * x1210; int32_t x1241 = x1240 * x1240; int32_t x1242 = 256 * x1241; int32_t x1243 = 64 * x1242; int32_t x1273 = 512 * x1209; bool x1280 = x1208 >= 4; bool x1281; if (x1280) { x1281 = x1280; } else { x1281 = false; } int32_t x1286 = x1208 - 4; int32_t x1287 = x1286 / 1; int32_t x1288 = x1287 + 1; int32_t x1292 = 640 * x1288; int32_t x1293 = x1292 * x1288; int32_t x1289 = x1288 * x1288; int32_t x1290 = 10 * x1289; int32_t x1291 = 64 * x1290; int32_t x2243 = x319 / 10; double x2248 = (double)x11; int64_t x2274 = (int64_t)x11; float x2278 = (float)x11; for(int x311=0; x311 < 4; x311++) { struct timeval begin_1, end_1, diff_1; float x313 = 0.0f; float x314 = x313; float x315 = x314; int32_t x316 = x311 + 1; printf("Start training epoch %d\n",x316); gettimeofday(&begin_1, NULL); for(int x321=0; x321 < x319; x321++) { int32_t x322 = x321 * 64; int32_t x323 = x322 * 3072; float* x324 = x13+x323; int* x325 = x14+x322; // Tensor 'toGPU' invocation. float* x327 = (float*)myGpuMalloc(196608 * sizeof(float)); CUDA_CALL(cudaMemcpy(x327, x324, 196608 * sizeof(float), cudaMemcpyHostToDevice)); float* x329 = (float*)myGpuMalloc(2 * sizeof(float)); int* x330 = (int32_t*)myGpuMalloc(64 * sizeof(int32_t)); CUDA_CALL(cudaMemcpy(x330, x325, 64 * sizeof(int32_t), cudaMemcpyHostToDevice)); float* x332 = (float*)myGpuMalloc(1 * sizeof(float)); float* x333 = (float*)myGpuMalloc(1 * sizeof(float)); // allocate memory to save the final loss in CPU Tensor float* x335 = (float*)myGpuMalloc(1 * sizeof(float)); float* x343 = (float*)myGpuMalloc(x342 * sizeof(float)); float* x344 = (float*)myMalloc(1 * sizeof(float));; x344[0] = 0.0f; float* x346 = (float*)myMalloc(1 * sizeof(float));; x346[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 96, 3, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x346, in_desc, x327, filt_desc, x194, conv_desc, algo, ws_data, ws_size, x344, out_desc, x343)); }; float* x349 = (float*)myMalloc(1 * sizeof(float));; x349[0] = 1.0f; float* x351 = (float*)myMalloc(1 * sizeof(float));; x351[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 96, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x349, bias_desc, x224, x351, out_desc, x343)); }; float* x354 = (float*)myGpuMalloc(x342 * sizeof(float)); float* x355 = (float*)myMalloc(1 * sizeof(float));; x355[0] = 0.0f; float* x357 = (float*)myMalloc(1 * sizeof(float));; x357[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x357, x_desc, x343, x355, x_desc, x343)); }; float* x360 = (float*)myMalloc(1 * sizeof(float));; x360[0] = 0.0f; float* x362 = (float*)myMalloc(1 * sizeof(float));; x362[0] = 1.0f; float* x372 = (float*)myGpuMalloc(x371 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x366, x366)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x362, in_desc, x343, x360, out_desc, x372)); }; float* x374 = (float*)myGpuMalloc(x371 * sizeof(float)); if (x376) { } else { assert(false && "ERROR not specified"); } float* x388 = (float*)myGpuMalloc(x387 * sizeof(float)); float* x389 = (float*)myMalloc(1 * sizeof(float));; x389[0] = 0.0f; float* x391 = (float*)myMalloc(1 * sizeof(float));; x391[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x366, x366)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 96, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x391, in_desc, x372, filt_desc, x245, conv_desc, algo, ws_data, ws_size, x389, out_desc, x388)); }; float* x394 = (float*)myMalloc(1 * sizeof(float));; x394[0] = 1.0f; float* x396 = (float*)myMalloc(1 * sizeof(float));; x396[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x394, bias_desc, x119, x396, out_desc, x388)); }; float* x399 = (float*)myGpuMalloc(x387 * sizeof(float)); float* x400 = (float*)myMalloc(1 * sizeof(float));; x400[0] = 0.0f; float* x402 = (float*)myMalloc(1 * sizeof(float));; x402[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x402, x_desc, x388, x400, x_desc, x388)); }; if (x406) { } else { assert(false && "ERROR not specified"); } float* x418 = (float*)myGpuMalloc(x417 * sizeof(float)); float* x419 = (float*)myMalloc(1 * sizeof(float));; x419[0] = 0.0f; float* x421 = (float*)myMalloc(1 * sizeof(float));; x421[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x421, in_desc, x388, filt_desc, x167, conv_desc, algo, ws_data, ws_size, x419, out_desc, x418)); }; float* x424 = (float*)myMalloc(1 * sizeof(float));; x424[0] = 1.0f; float* x426 = (float*)myMalloc(1 * sizeof(float));; x426[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x424, bias_desc, x188, x426, out_desc, x418)); }; float* x429 = (float*)myGpuMalloc(x417 * sizeof(float)); float* x430 = (float*)myMalloc(1 * sizeof(float));; x430[0] = 0.0f; float* x432 = (float*)myMalloc(1 * sizeof(float));; x432[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x432, x_desc, x418, x430, x_desc, x418)); }; if (x437) { } else { assert(false && "ERROR not specified"); } float* x450 = (float*)myGpuMalloc(x449 * sizeof(float)); float* x451 = (float*)myMalloc(1 * sizeof(float));; x451[0] = 0.0f; float* x453 = (float*)myMalloc(1 * sizeof(float));; x453[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x453, in_desc, x388, filt_desc, x236, conv_desc, algo, ws_data, ws_size, x451, out_desc, x450)); }; float* x456 = (float*)myMalloc(1 * sizeof(float));; x456[0] = 1.0f; float* x458 = (float*)myMalloc(1 * sizeof(float));; x458[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x456, bias_desc, x110, x458, out_desc, x450)); }; float* x461 = (float*)myGpuMalloc(x449 * sizeof(float)); float* x462 = (float*)myMalloc(1 * sizeof(float));; x462[0] = 0.0f; float* x464 = (float*)myMalloc(1 * sizeof(float));; x464[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x464, x_desc, x450, x462, x_desc, x450)); }; if (x473) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x484 = (float*)myGpuMalloc(x483 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x418, 64, x415, x450, 64, x447, x484, 1, 64, 128, x412, x412, x480, x413, x412, 1); }; float* x486 = (float*)myGpuMalloc(x483 * sizeof(float)); if (x488) { } else { assert(false && "ERROR not specified"); } float* x500 = (float*)myGpuMalloc(x499 * sizeof(float)); float* x501 = (float*)myMalloc(1 * sizeof(float));; x501[0] = 0.0f; float* x503 = (float*)myMalloc(1 * sizeof(float));; x503[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x412, x412)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x503, in_desc, x484, filt_desc, x131, conv_desc, algo, ws_data, ws_size, x501, out_desc, x500)); }; float* x506 = (float*)myMalloc(1 * sizeof(float));; x506[0] = 1.0f; float* x508 = (float*)myMalloc(1 * sizeof(float));; x508[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x506, bias_desc, x170, x508, out_desc, x500)); }; float* x511 = (float*)myGpuMalloc(x499 * sizeof(float)); float* x512 = (float*)myMalloc(1 * sizeof(float));; x512[0] = 0.0f; float* x514 = (float*)myMalloc(1 * sizeof(float));; x514[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x514, x_desc, x500, x512, x_desc, x500)); }; if (x518) { } else { assert(false && "ERROR not specified"); } float* x530 = (float*)myGpuMalloc(x529 * sizeof(float)); float* x531 = (float*)myMalloc(1 * sizeof(float));; x531[0] = 0.0f; float* x533 = (float*)myMalloc(1 * sizeof(float));; x533[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x533, in_desc, x500, filt_desc, x128, conv_desc, algo, ws_data, ws_size, x531, out_desc, x530)); }; float* x536 = (float*)myMalloc(1 * sizeof(float));; x536[0] = 1.0f; float* x538 = (float*)myMalloc(1 * sizeof(float));; x538[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x536, bias_desc, x104, x538, out_desc, x530)); }; float* x541 = (float*)myGpuMalloc(x529 * sizeof(float)); float* x542 = (float*)myMalloc(1 * sizeof(float));; x542[0] = 0.0f; float* x544 = (float*)myMalloc(1 * sizeof(float));; x544[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x544, x_desc, x530, x542, x_desc, x530)); }; if (x549) { } else { assert(false && "ERROR not specified"); } float* x562 = (float*)myGpuMalloc(x561 * sizeof(float)); float* x563 = (float*)myMalloc(1 * sizeof(float));; x563[0] = 0.0f; float* x565 = (float*)myMalloc(1 * sizeof(float));; x565[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x565, in_desc, x500, filt_desc, x152, conv_desc, algo, ws_data, ws_size, x563, out_desc, x562)); }; float* x568 = (float*)myMalloc(1 * sizeof(float));; x568[0] = 1.0f; float* x570 = (float*)myMalloc(1 * sizeof(float));; x570[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x568, bias_desc, x206, x570, out_desc, x562)); }; float* x573 = (float*)myGpuMalloc(x561 * sizeof(float)); float* x574 = (float*)myMalloc(1 * sizeof(float));; x574[0] = 0.0f; float* x576 = (float*)myMalloc(1 * sizeof(float));; x576[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x576, x_desc, x562, x574, x_desc, x562)); }; if (x582) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x593 = (float*)myGpuMalloc(x592 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x530, 64, x527, x562, 64, x559, x593, 1, 64, 128, x524, x524, x589, x525, x524, 1); }; float* x595 = (float*)myGpuMalloc(x592 * sizeof(float)); if (x597) { } else { assert(false && "ERROR not specified"); } float* x609 = (float*)myGpuMalloc(x608 * sizeof(float)); float* x610 = (float*)myMalloc(1 * sizeof(float));; x610[0] = 0.0f; float* x612 = (float*)myMalloc(1 * sizeof(float));; x612[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x524, x524)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x612, in_desc, x593, filt_desc, x125, conv_desc, algo, ws_data, ws_size, x610, out_desc, x609)); }; float* x615 = (float*)myMalloc(1 * sizeof(float));; x615[0] = 1.0f; float* x617 = (float*)myMalloc(1 * sizeof(float));; x617[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x615, bias_desc, x164, x617, out_desc, x609)); }; float* x620 = (float*)myGpuMalloc(x608 * sizeof(float)); float* x621 = (float*)myMalloc(1 * sizeof(float));; x621[0] = 0.0f; float* x623 = (float*)myMalloc(1 * sizeof(float));; x623[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x623, x_desc, x609, x621, x_desc, x609)); }; if (x627) { } else { assert(false && "ERROR not specified"); } float* x639 = (float*)myGpuMalloc(x638 * sizeof(float)); float* x640 = (float*)myMalloc(1 * sizeof(float));; x640[0] = 0.0f; float* x642 = (float*)myMalloc(1 * sizeof(float));; x642[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x642, in_desc, x609, filt_desc, x200, conv_desc, algo, ws_data, ws_size, x640, out_desc, x639)); }; float* x645 = (float*)myMalloc(1 * sizeof(float));; x645[0] = 1.0f; float* x647 = (float*)myMalloc(1 * sizeof(float));; x647[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x645, bias_desc, x230, x647, out_desc, x639)); }; float* x650 = (float*)myGpuMalloc(x638 * sizeof(float)); float* x651 = (float*)myMalloc(1 * sizeof(float));; x651[0] = 0.0f; float* x653 = (float*)myMalloc(1 * sizeof(float));; x653[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x653, x_desc, x639, x651, x_desc, x639)); }; if (x658) { } else { assert(false && "ERROR not specified"); } float* x671 = (float*)myGpuMalloc(x670 * sizeof(float)); float* x672 = (float*)myMalloc(1 * sizeof(float));; x672[0] = 0.0f; float* x674 = (float*)myMalloc(1 * sizeof(float));; x674[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x674, in_desc, x609, filt_desc, x113, conv_desc, algo, ws_data, ws_size, x672, out_desc, x671)); }; float* x677 = (float*)myMalloc(1 * sizeof(float));; x677[0] = 1.0f; float* x679 = (float*)myMalloc(1 * sizeof(float));; x679[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x677, bias_desc, x218, x679, out_desc, x671)); }; float* x682 = (float*)myGpuMalloc(x670 * sizeof(float)); float* x683 = (float*)myMalloc(1 * sizeof(float));; x683[0] = 0.0f; float* x685 = (float*)myMalloc(1 * sizeof(float));; x685[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x685, x_desc, x671, x683, x_desc, x671)); }; if (x691) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x702 = (float*)myGpuMalloc(x701 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x639, 128, x636, x671, 128, x668, x702, 1, 64, 256, x633, x633, x698, x634, x633, 1); }; float* x704 = (float*)myGpuMalloc(x701 * sizeof(float)); float* x705 = (float*)myMalloc(1 * sizeof(float));; x705[0] = 0.0f; float* x707 = (float*)myMalloc(1 * sizeof(float));; x707[0] = 1.0f; float* x717 = (float*)myGpuMalloc(x716 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x633, x633) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x711, x711)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x707, in_desc, x702, x705, out_desc, x717)); }; float* x719 = (float*)myGpuMalloc(x716 * sizeof(float)); if (x721) { } else { assert(false && "ERROR not specified"); } float* x733 = (float*)myGpuMalloc(x732 * sizeof(float)); float* x734 = (float*)myMalloc(1 * sizeof(float));; x734[0] = 0.0f; float* x736 = (float*)myMalloc(1 * sizeof(float));; x736[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x711, x711)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x736, in_desc, x717, filt_desc, x176, conv_desc, algo, ws_data, ws_size, x734, out_desc, x733)); }; float* x739 = (float*)myMalloc(1 * sizeof(float));; x739[0] = 1.0f; float* x741 = (float*)myMalloc(1 * sizeof(float));; x741[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x739, bias_desc, x140, x741, out_desc, x733)); }; float* x744 = (float*)myGpuMalloc(x732 * sizeof(float)); float* x745 = (float*)myMalloc(1 * sizeof(float));; x745[0] = 0.0f; float* x747 = (float*)myMalloc(1 * sizeof(float));; x747[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x747, x_desc, x733, x745, x_desc, x733)); }; if (x751) { } else { assert(false && "ERROR not specified"); } float* x763 = (float*)myGpuMalloc(x762 * sizeof(float)); float* x764 = (float*)myMalloc(1 * sizeof(float));; x764[0] = 0.0f; float* x766 = (float*)myMalloc(1 * sizeof(float));; x766[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x766, in_desc, x733, filt_desc, x116, conv_desc, algo, ws_data, ws_size, x764, out_desc, x763)); }; float* x769 = (float*)myMalloc(1 * sizeof(float));; x769[0] = 1.0f; float* x771 = (float*)myMalloc(1 * sizeof(float));; x771[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x769, bias_desc, x158, x771, out_desc, x763)); }; float* x774 = (float*)myGpuMalloc(x762 * sizeof(float)); float* x775 = (float*)myMalloc(1 * sizeof(float));; x775[0] = 0.0f; float* x777 = (float*)myMalloc(1 * sizeof(float));; x777[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x777, x_desc, x763, x775, x_desc, x763)); }; if (x782) { } else { assert(false && "ERROR not specified"); } float* x795 = (float*)myGpuMalloc(x794 * sizeof(float)); float* x796 = (float*)myMalloc(1 * sizeof(float));; x796[0] = 0.0f; float* x798 = (float*)myMalloc(1 * sizeof(float));; x798[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x798, in_desc, x733, filt_desc, x203, conv_desc, algo, ws_data, ws_size, x796, out_desc, x795)); }; float* x801 = (float*)myMalloc(1 * sizeof(float));; x801[0] = 1.0f; float* x803 = (float*)myMalloc(1 * sizeof(float));; x803[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x801, bias_desc, x143, x803, out_desc, x795)); }; float* x806 = (float*)myGpuMalloc(x794 * sizeof(float)); float* x807 = (float*)myMalloc(1 * sizeof(float));; x807[0] = 0.0f; float* x809 = (float*)myMalloc(1 * sizeof(float));; x809[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x809, x_desc, x795, x807, x_desc, x795)); }; if (x815) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x826 = (float*)myGpuMalloc(x825 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x763, 128, x760, x795, 128, x792, x826, 1, 64, 256, x757, x757, x822, x758, x757, 1); }; float* x828 = (float*)myGpuMalloc(x825 * sizeof(float)); if (x830) { } else { assert(false && "ERROR not specified"); } float* x842 = (float*)myGpuMalloc(x841 * sizeof(float)); float* x843 = (float*)myMalloc(1 * sizeof(float));; x843[0] = 0.0f; float* x845 = (float*)myMalloc(1 * sizeof(float));; x845[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x757, x757)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x845, in_desc, x826, filt_desc, x221, conv_desc, algo, ws_data, ws_size, x843, out_desc, x842)); }; float* x848 = (float*)myMalloc(1 * sizeof(float));; x848[0] = 1.0f; float* x850 = (float*)myMalloc(1 * sizeof(float));; x850[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x848, bias_desc, x251, x850, out_desc, x842)); }; float* x853 = (float*)myGpuMalloc(x841 * sizeof(float)); float* x854 = (float*)myMalloc(1 * sizeof(float));; x854[0] = 0.0f; float* x856 = (float*)myMalloc(1 * sizeof(float));; x856[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x856, x_desc, x842, x854, x_desc, x842)); }; if (x860) { } else { assert(false && "ERROR not specified"); } float* x872 = (float*)myGpuMalloc(x871 * sizeof(float)); float* x873 = (float*)myMalloc(1 * sizeof(float));; x873[0] = 0.0f; float* x875 = (float*)myMalloc(1 * sizeof(float));; x875[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x875, in_desc, x842, filt_desc, x239, conv_desc, algo, ws_data, ws_size, x873, out_desc, x872)); }; float* x878 = (float*)myMalloc(1 * sizeof(float));; x878[0] = 1.0f; float* x880 = (float*)myMalloc(1 * sizeof(float));; x880[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x878, bias_desc, x233, x880, out_desc, x872)); }; float* x883 = (float*)myGpuMalloc(x871 * sizeof(float)); float* x884 = (float*)myMalloc(1 * sizeof(float));; x884[0] = 0.0f; float* x886 = (float*)myMalloc(1 * sizeof(float));; x886[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x886, x_desc, x872, x884, x_desc, x872)); }; if (x891) { } else { assert(false && "ERROR not specified"); } float* x904 = (float*)myGpuMalloc(x903 * sizeof(float)); float* x905 = (float*)myMalloc(1 * sizeof(float));; x905[0] = 0.0f; float* x907 = (float*)myMalloc(1 * sizeof(float));; x907[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x907, in_desc, x842, filt_desc, x212, conv_desc, algo, ws_data, ws_size, x905, out_desc, x904)); }; float* x910 = (float*)myMalloc(1 * sizeof(float));; x910[0] = 1.0f; float* x912 = (float*)myMalloc(1 * sizeof(float));; x912[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x910, bias_desc, x182, x912, out_desc, x904)); }; float* x915 = (float*)myGpuMalloc(x903 * sizeof(float)); float* x916 = (float*)myMalloc(1 * sizeof(float));; x916[0] = 0.0f; float* x918 = (float*)myMalloc(1 * sizeof(float));; x918[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x918, x_desc, x904, x916, x_desc, x904)); }; if (x924) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x935 = (float*)myGpuMalloc(x934 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x872, 192, x869, x904, 192, x901, x935, 1, 64, 384, x866, x866, x931, x867, x866, 1); }; float* x937 = (float*)myGpuMalloc(x934 * sizeof(float)); if (x939) { } else { assert(false && "ERROR not specified"); } float* x951 = (float*)myGpuMalloc(x950 * sizeof(float)); float* x952 = (float*)myMalloc(1 * sizeof(float));; x952[0] = 0.0f; float* x954 = (float*)myMalloc(1 * sizeof(float));; x954[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x866, x866)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 384, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x954, in_desc, x935, filt_desc, x137, conv_desc, algo, ws_data, ws_size, x952, out_desc, x951)); }; float* x957 = (float*)myMalloc(1 * sizeof(float));; x957[0] = 1.0f; float* x959 = (float*)myMalloc(1 * sizeof(float));; x959[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x957, bias_desc, x101, x959, out_desc, x951)); }; float* x962 = (float*)myGpuMalloc(x950 * sizeof(float)); float* x963 = (float*)myMalloc(1 * sizeof(float));; x963[0] = 0.0f; float* x965 = (float*)myMalloc(1 * sizeof(float));; x965[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x965, x_desc, x951, x963, x_desc, x951)); }; if (x969) { } else { assert(false && "ERROR not specified"); } float* x981 = (float*)myGpuMalloc(x980 * sizeof(float)); float* x982 = (float*)myMalloc(1 * sizeof(float));; x982[0] = 0.0f; float* x984 = (float*)myMalloc(1 * sizeof(float));; x984[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x984, in_desc, x951, filt_desc, x161, conv_desc, algo, ws_data, ws_size, x982, out_desc, x981)); }; float* x987 = (float*)myMalloc(1 * sizeof(float));; x987[0] = 1.0f; float* x989 = (float*)myMalloc(1 * sizeof(float));; x989[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x987, bias_desc, x191, x989, out_desc, x981)); }; float* x992 = (float*)myGpuMalloc(x980 * sizeof(float)); float* x993 = (float*)myMalloc(1 * sizeof(float));; x993[0] = 0.0f; float* x995 = (float*)myMalloc(1 * sizeof(float));; x995[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x995, x_desc, x981, x993, x_desc, x981)); }; if (x1000) { } else { assert(false && "ERROR not specified"); } float* x1013 = (float*)myGpuMalloc(x1012 * sizeof(float)); float* x1014 = (float*)myMalloc(1 * sizeof(float));; x1014[0] = 0.0f; float* x1016 = (float*)myMalloc(1 * sizeof(float));; x1016[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1016, in_desc, x951, filt_desc, x149, conv_desc, algo, ws_data, ws_size, x1014, out_desc, x1013)); }; float* x1019 = (float*)myMalloc(1 * sizeof(float));; x1019[0] = 1.0f; float* x1021 = (float*)myMalloc(1 * sizeof(float));; x1021[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1019, bias_desc, x227, x1021, out_desc, x1013)); }; float* x1024 = (float*)myGpuMalloc(x1012 * sizeof(float)); float* x1025 = (float*)myMalloc(1 * sizeof(float));; x1025[0] = 0.0f; float* x1027 = (float*)myMalloc(1 * sizeof(float));; x1027[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1027, x_desc, x1013, x1025, x_desc, x1013)); }; if (x1033) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x1044 = (float*)myGpuMalloc(x1043 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x981, 192, x978, x1013, 192, x1010, x1044, 1, 64, 384, x975, x975, x1040, x976, x975, 1); }; float* x1046 = (float*)myGpuMalloc(x1043 * sizeof(float)); if (x1048) { } else { assert(false && "ERROR not specified"); } float* x1060 = (float*)myGpuMalloc(x1059 * sizeof(float)); float* x1061 = (float*)myMalloc(1 * sizeof(float));; x1061[0] = 0.0f; float* x1063 = (float*)myMalloc(1 * sizeof(float));; x1063[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x975, x975)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 384, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1063, in_desc, x1044, filt_desc, x197, conv_desc, algo, ws_data, ws_size, x1061, out_desc, x1060)); }; float* x1066 = (float*)myMalloc(1 * sizeof(float));; x1066[0] = 1.0f; float* x1068 = (float*)myMalloc(1 * sizeof(float));; x1068[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1066, bias_desc, x122, x1068, out_desc, x1060)); }; float* x1071 = (float*)myGpuMalloc(x1059 * sizeof(float)); float* x1072 = (float*)myMalloc(1 * sizeof(float));; x1072[0] = 0.0f; float* x1074 = (float*)myMalloc(1 * sizeof(float));; x1074[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1074, x_desc, x1060, x1072, x_desc, x1060)); }; if (x1078) { } else { assert(false && "ERROR not specified"); } float* x1090 = (float*)myGpuMalloc(x1089 * sizeof(float)); float* x1091 = (float*)myMalloc(1 * sizeof(float));; x1091[0] = 0.0f; float* x1093 = (float*)myMalloc(1 * sizeof(float));; x1093[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1093, in_desc, x1060, filt_desc, x242, conv_desc, algo, ws_data, ws_size, x1091, out_desc, x1090)); }; float* x1096 = (float*)myMalloc(1 * sizeof(float));; x1096[0] = 1.0f; float* x1098 = (float*)myMalloc(1 * sizeof(float));; x1098[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1096, bias_desc, x215, x1098, out_desc, x1090)); }; float* x1101 = (float*)myGpuMalloc(x1089 * sizeof(float)); float* x1102 = (float*)myMalloc(1 * sizeof(float));; x1102[0] = 0.0f; float* x1104 = (float*)myMalloc(1 * sizeof(float));; x1104[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1104, x_desc, x1090, x1102, x_desc, x1090)); }; if (x1109) { } else { assert(false && "ERROR not specified"); } float* x1122 = (float*)myGpuMalloc(x1121 * sizeof(float)); float* x1123 = (float*)myMalloc(1 * sizeof(float));; x1123[0] = 0.0f; float* x1125 = (float*)myMalloc(1 * sizeof(float));; x1125[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1125, in_desc, x1060, filt_desc, x179, conv_desc, algo, ws_data, ws_size, x1123, out_desc, x1122)); }; float* x1128 = (float*)myMalloc(1 * sizeof(float));; x1128[0] = 1.0f; float* x1130 = (float*)myMalloc(1 * sizeof(float));; x1130[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1128, bias_desc, x134, x1130, out_desc, x1122)); }; float* x1133 = (float*)myGpuMalloc(x1121 * sizeof(float)); float* x1134 = (float*)myMalloc(1 * sizeof(float));; x1134[0] = 0.0f; float* x1136 = (float*)myMalloc(1 * sizeof(float));; x1136[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1136, x_desc, x1122, x1134, x_desc, x1122)); }; if (x1142) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x1153 = (float*)myGpuMalloc(x1152 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x1090, 256, x1087, x1122, 256, x1119, x1153, 1, 64, 512, x1084, x1084, x1149, x1085, x1084, 1); }; float* x1155 = (float*)myGpuMalloc(x1152 * sizeof(float)); float* x1156 = (float*)myMalloc(1 * sizeof(float));; x1156[0] = 0.0f; float* x1158 = (float*)myMalloc(1 * sizeof(float));; x1158[0] = 1.0f; float* x1168 = (float*)myGpuMalloc(x1167 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1084, x1084) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1162, x1162)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x1158, in_desc, x1153, x1156, out_desc, x1168)); }; float* x1170 = (float*)myGpuMalloc(x1167 * sizeof(float)); if (x1172) { } else { assert(false && "ERROR not specified"); } float* x1184 = (float*)myGpuMalloc(x1183 * sizeof(float)); float* x1185 = (float*)myMalloc(1 * sizeof(float));; x1185[0] = 0.0f; float* x1187 = (float*)myMalloc(1 * sizeof(float));; x1187[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1162, x1162)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1187, in_desc, x1168, filt_desc, x98, conv_desc, algo, ws_data, ws_size, x1185, out_desc, x1184)); }; float* x1190 = (float*)myMalloc(1 * sizeof(float));; x1190[0] = 1.0f; float* x1192 = (float*)myMalloc(1 * sizeof(float));; x1192[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1190, bias_desc, x155, x1192, out_desc, x1184)); }; float* x1195 = (float*)myGpuMalloc(x1183 * sizeof(float)); float* x1196 = (float*)myMalloc(1 * sizeof(float));; x1196[0] = 0.0f; float* x1198 = (float*)myMalloc(1 * sizeof(float));; x1198[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1198, x_desc, x1184, x1196, x_desc, x1184)); }; if (x1202) { } else { assert(false && "ERROR not specified"); } float* x1214 = (float*)myGpuMalloc(x1213 * sizeof(float)); float* x1215 = (float*)myMalloc(1 * sizeof(float));; x1215[0] = 0.0f; float* x1217 = (float*)myMalloc(1 * sizeof(float));; x1217[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1217, in_desc, x1184, filt_desc, x209, conv_desc, algo, ws_data, ws_size, x1215, out_desc, x1214)); }; float* x1220 = (float*)myMalloc(1 * sizeof(float));; x1220[0] = 1.0f; float* x1222 = (float*)myMalloc(1 * sizeof(float));; x1222[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1220, bias_desc, x173, x1222, out_desc, x1214)); }; float* x1225 = (float*)myGpuMalloc(x1213 * sizeof(float)); float* x1226 = (float*)myMalloc(1 * sizeof(float));; x1226[0] = 0.0f; float* x1228 = (float*)myMalloc(1 * sizeof(float));; x1228[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1228, x_desc, x1214, x1226, x_desc, x1214)); }; if (x1233) { } else { assert(false && "ERROR not specified"); } float* x1246 = (float*)myGpuMalloc(x1245 * sizeof(float)); float* x1247 = (float*)myMalloc(1 * sizeof(float));; x1247[0] = 0.0f; float* x1249 = (float*)myMalloc(1 * sizeof(float));; x1249[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1249, in_desc, x1184, filt_desc, x185, conv_desc, algo, ws_data, ws_size, x1247, out_desc, x1246)); }; float* x1252 = (float*)myMalloc(1 * sizeof(float));; x1252[0] = 1.0f; float* x1254 = (float*)myMalloc(1 * sizeof(float));; x1254[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1252, bias_desc, x146, x1254, out_desc, x1246)); }; float* x1257 = (float*)myGpuMalloc(x1245 * sizeof(float)); float* x1258 = (float*)myMalloc(1 * sizeof(float));; x1258[0] = 0.0f; float* x1260 = (float*)myMalloc(1 * sizeof(float));; x1260[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1260, x_desc, x1246, x1258, x_desc, x1246)); }; if (x1266) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x1277 = (float*)myGpuMalloc(x1276 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x1214, 256, x1211, x1246, 256, x1243, x1277, 1, 64, 512, x1208, x1208, x1273, x1209, x1208, 1); }; float* x1279 = (float*)myGpuMalloc(x1276 * sizeof(float)); if (x1281) { } else { assert(false && "ERROR not specified"); } float* x1294 = (float*)myGpuMalloc(x1293 * sizeof(float)); float* x1295 = (float*)myMalloc(1 * sizeof(float));; x1295[0] = 0.0f; float* x1297 = (float*)myMalloc(1 * sizeof(float));; x1297[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1208, x1208)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 512, 4, 4)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1288, x1288)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1297, in_desc, x1277, filt_desc, x107, conv_desc, algo, ws_data, ws_size, x1295, out_desc, x1294)); }; float* x1300 = (float*)myMalloc(1 * sizeof(float));; x1300[0] = 1.0f; float* x1302 = (float*)myMalloc(1 * sizeof(float));; x1302[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1288, x1288)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1300, bias_desc, x248, x1302, out_desc, x1294)); }; float* x1305 = (float*)myGpuMalloc(x1293 * sizeof(float)); int32_t x1306 = 0; int32_t x1307 = 1; x1307 *= 64; x1307 *= 10; int32_t x1310 = x1306; bool x1311 = x1310 >= 2; if (x1311) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1317 = x1310 == 0; if (x1317) { int32_t x1318 = x1307; bool x1319 = x1318 == x1291; if (x1319) { } else { assert(false && "must same size!!"); } } else { } int32_t x1326 = 0; int32_t x1327 = 1; x1327 *= 64; x1327 *= 10; x1327 *= 1; x1327 *= 1; int32_t x1332 = x1326; bool x1333 = x1332 >= 2; if (x1333) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1338 = x1332 == 0; if (x1338) { int32_t x1339 = x1327; bool x1340 = x1339 == 640; if (x1340) { } else { assert(false && "must same size!!"); } } else { } float* x1347 = (float*)myMalloc(1 * sizeof(float));; x1347[0] = 0.0f; float* x1349 = (float*)myMalloc(1 * sizeof(float));; x1349[0] = 1.0f; float* x1351 = (float*)myGpuMalloc(640 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxForward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x1349, x_desc, x1294, x1347, x_desc, x1351)); }; int32_t x1353 = 0; int32_t x1354 = 1; x1354 *= 64; x1354 *= 10; int32_t x1357 = x1353; bool x1358 = x1357 >= 2; if (x1358) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1363 = x1357 == 0; if (x1363) { int32_t x1364 = x1354; bool x1365 = x1364 == 640; if (x1365) { } else { assert(false && "must same size!!"); } } else { } float* x1372 = (float*)myGpuMalloc(640 * sizeof(float)); float* x1373 = (float*)myGpuMalloc(64 * sizeof(float)); nllLoss<<<64, 1>>>(x1351, 10, x1373, x330); float* x1375 = (float*)myGpuMalloc(64 * sizeof(float)); int32_t x1376 = 0; int32_t x1377 = 1; x1377 *= 64; x1377 *= 1; x1377 *= 1; x1377 *= 1; int32_t x1382 = x1376; bool x1383 = x1382 >= 2; if (x1383) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1388 = x1382 == 0; if (x1388) { int32_t x1389 = x1377; bool x1390 = x1389 == 64; if (x1390) { } else { assert(false && "must same size!!"); } } else { } float* x1397 = (float*)myGpuMalloc(1 * sizeof(float)); float* x1398 = (float*)myMalloc(1 * sizeof(float));; x1398[0] = 0.0f; float* x1400 = (float*)myMalloc(1 * sizeof(float));; x1400[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 1, 1)); cudnnReduceTensorDescriptor_t reduce_desc; CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc)); CUDNN_CALL(cudnnSetReduceTensorDescriptor( reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES)); void *indices = nullptr; // Don't store indices. // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetReductionWorkspaceSize( cudnnHandle, reduce_desc, x_desc, out_desc, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnReduceTensor( cudnnHandle, reduce_desc, indices, 0, ws_data, ws_size, x1400, x_desc, x1373, x1398, out_desc, x1397)); }; int32_t x1403 = 0; int32_t x1404 = 1; x1404 *= 1; int32_t x1406 = x1403; bool x1407 = x1406 >= 2; if (x1407) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1412 = x1406 == 0; if (x1412) { int32_t x1413 = x1404; bool x1414 = x1413 == 1; if (x1414) { } else { assert(false && "must same size!!"); } } else { } float* x1421 = (float*)myGpuMalloc(1 * sizeof(float)); // make sure the size of loss is 1 arrayFill<<<28, 512>>>(x1421, 1.0f, 1); // backend is lantern.TensorDslCudnn$BackendCudnn@6aec41a5 CUDA_CALL(cudaMemcpy(x335, x1397, 1 * sizeof(float), cudaMemcpyDeviceToDevice)); // 'mean' gradient // backprop for mean op float x1428 = x1421[0]; float x1429 = x1428 / 64.0f; addScalar<<<28, 512>>>(x1375, x1375, x1429, 64); // 'nllLossB' gradient. nllLoss_grad<<<64, 1>>>(10, x1375, x330, x1372); int32_t x1433 = 0; int32_t x1434 = 1; x1434 *= 64; x1434 *= 10; x1434 *= 1; x1434 *= 1; int32_t x1439 = x1433; bool x1440 = x1439 >= 2; if (x1440) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1445 = x1439 == 0; if (x1445) { int32_t x1446 = x1434; bool x1447 = x1446 == 640; if (x1447) { } else { assert(false && "must same size!!"); } } else { } int32_t x1454 = 0; int32_t x1455 = 1; x1455 *= 64; x1455 *= 10; x1455 *= 1; x1455 *= 1; int32_t x1460 = x1454; bool x1461 = x1460 >= 2; if (x1461) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1466 = x1460 == 0; if (x1466) { int32_t x1467 = x1455; bool x1468 = x1467 == 640; if (x1468) { } else { assert(false && "must same size!!"); } } else { } int32_t x1475 = 0; int32_t x1476 = 1; x1476 *= 64; x1476 *= 10; x1476 *= 1; x1476 *= 1; int32_t x1481 = x1475; bool x1482 = x1481 >= 2; if (x1482) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1487 = x1481 == 0; if (x1487) { int32_t x1488 = x1476; bool x1489 = x1488 == 640; if (x1489) { } else { assert(false && "must same size!!"); } } else { } int32_t x1496 = 0; int32_t x1497 = 1; x1497 *= 64; x1497 *= 10; x1497 *= 1; x1497 *= 1; int32_t x1502 = x1496; bool x1503 = x1502 >= 2; if (x1503) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1508 = x1502 == 0; if (x1508) { int32_t x1509 = x1497; bool x1510 = x1509 == 640; if (x1510) { } else { assert(false && "must same size!!"); } } else { } float* x1517 = (float*)myMalloc(1 * sizeof(float));; x1517[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxBackward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x1517, x_desc, x1351, x_desc, x1372, x1517, x_desc, x1305)); }; // conv2D back-propagate float* x1521 = (float*)myMalloc(1 * sizeof(float));; x1521[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 512, 4, 4)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1208, x1208)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1288, x1288)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1521, filt_desc, x107, grad_out_desc, x1305, conv_desc, algo, ws_data, ws_size, x1521, grad_in_desc, x1279)); }; float* x1524 = (float*)myMalloc(1 * sizeof(float));; x1524[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 512, 4, 4)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1288, x1288)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1208, x1208)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1524, in_desc, x1277, grad_out_desc, x1305, conv_desc, algo, ws_data, ws_size, x1524, grad_filt_desc, x256)); }; float* x1527 = (float*)myMalloc(1 * sizeof(float));; x1527[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1288, x1288)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1527, grad_out_desc, x1305, x1527, grad_bias_desc, x303)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x1225, 256, x1211, x1257, 256, x1243, x1279, 1, 64, 512, x1208, x1208, x1273, x1209, x1208, 1); }; float* x1531 = (float*)myMalloc(1 * sizeof(float));; x1531[0] = 1.0f; float* x1533 = (float*)myMalloc(1 * sizeof(float));; x1533[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1531, x_desc, x1246, x_desc, x1257, x_desc, x1246, x1533, x_desc, x1257)); }; // conv2D back-propagate float* x1537 = (float*)myMalloc(1 * sizeof(float));; x1537[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1537, filt_desc, x185, grad_out_desc, x1257, conv_desc, algo, ws_data, ws_size, x1537, grad_in_desc, x1195)); }; float* x1540 = (float*)myMalloc(1 * sizeof(float));; x1540[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1540, in_desc, x1184, grad_out_desc, x1257, conv_desc, algo, ws_data, ws_size, x1540, grad_filt_desc, x282)); }; float* x1543 = (float*)myMalloc(1 * sizeof(float));; x1543[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1240, x1240)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1543, grad_out_desc, x1257, x1543, grad_bias_desc, x269)); }; float* x1546 = (float*)myMalloc(1 * sizeof(float));; x1546[0] = 1.0f; float* x1548 = (float*)myMalloc(1 * sizeof(float));; x1548[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1546, x_desc, x1214, x_desc, x1225, x_desc, x1214, x1548, x_desc, x1225)); }; // conv2D back-propagate float* x1552 = (float*)myMalloc(1 * sizeof(float));; x1552[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1552, filt_desc, x209, grad_out_desc, x1225, conv_desc, algo, ws_data, ws_size, x1552, grad_in_desc, x1195)); }; float* x1555 = (float*)myMalloc(1 * sizeof(float));; x1555[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1555, in_desc, x1184, grad_out_desc, x1225, conv_desc, algo, ws_data, ws_size, x1555, grad_filt_desc, x290)); }; float* x1558 = (float*)myMalloc(1 * sizeof(float));; x1558[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1208, x1208)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1558, grad_out_desc, x1225, x1558, grad_bias_desc, x278)); }; float* x1561 = (float*)myMalloc(1 * sizeof(float));; x1561[0] = 1.0f; float* x1563 = (float*)myMalloc(1 * sizeof(float));; x1563[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1561, x_desc, x1184, x_desc, x1195, x_desc, x1184, x1563, x_desc, x1195)); }; // conv2D back-propagate float* x1567 = (float*)myMalloc(1 * sizeof(float));; x1567[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1162, x1162)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1567, filt_desc, x98, grad_out_desc, x1195, conv_desc, algo, ws_data, ws_size, x1567, grad_in_desc, x1170)); }; float* x1570 = (float*)myMalloc(1 * sizeof(float));; x1570[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1162, x1162)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1570, in_desc, x1168, grad_out_desc, x1195, conv_desc, algo, ws_data, ws_size, x1570, grad_filt_desc, x253)); }; float* x1573 = (float*)myMalloc(1 * sizeof(float));; x1573[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1178, x1178)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1573, grad_out_desc, x1195, x1573, grad_bias_desc, x272)); }; float* x1576 = (float*)myMalloc(1 * sizeof(float));; x1576[0] = 0.0f; float* x1578 = (float*)myMalloc(1 * sizeof(float));; x1578[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1084, x1084)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1162, x1162)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x1578, out_desc, x1168, out_desc, x1170, in_desc, x1153 , x1576, in_desc, x1155)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x1101, 256, x1087, x1133, 256, x1119, x1155, 1, 64, 512, x1084, x1084, x1149, x1085, x1084, 1); }; float* x1582 = (float*)myMalloc(1 * sizeof(float));; x1582[0] = 1.0f; float* x1584 = (float*)myMalloc(1 * sizeof(float));; x1584[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1582, x_desc, x1122, x_desc, x1133, x_desc, x1122, x1584, x_desc, x1133)); }; // conv2D back-propagate float* x1588 = (float*)myMalloc(1 * sizeof(float));; x1588[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1588, filt_desc, x179, grad_out_desc, x1133, conv_desc, algo, ws_data, ws_size, x1588, grad_in_desc, x1071)); }; float* x1591 = (float*)myMalloc(1 * sizeof(float));; x1591[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1591, in_desc, x1060, grad_out_desc, x1133, conv_desc, algo, ws_data, ws_size, x1591, grad_filt_desc, x280)); }; float* x1594 = (float*)myMalloc(1 * sizeof(float));; x1594[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1116, x1116)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1594, grad_out_desc, x1133, x1594, grad_bias_desc, x265)); }; float* x1597 = (float*)myMalloc(1 * sizeof(float));; x1597[0] = 1.0f; float* x1599 = (float*)myMalloc(1 * sizeof(float));; x1599[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1597, x_desc, x1090, x_desc, x1101, x_desc, x1090, x1599, x_desc, x1101)); }; // conv2D back-propagate float* x1603 = (float*)myMalloc(1 * sizeof(float));; x1603[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1603, filt_desc, x242, grad_out_desc, x1101, conv_desc, algo, ws_data, ws_size, x1603, grad_in_desc, x1071)); }; float* x1606 = (float*)myMalloc(1 * sizeof(float));; x1606[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1606, in_desc, x1060, grad_out_desc, x1101, conv_desc, algo, ws_data, ws_size, x1606, grad_filt_desc, x301)); }; float* x1609 = (float*)myMalloc(1 * sizeof(float));; x1609[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1084, x1084)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1609, grad_out_desc, x1101, x1609, grad_bias_desc, x292)); }; float* x1612 = (float*)myMalloc(1 * sizeof(float));; x1612[0] = 1.0f; float* x1614 = (float*)myMalloc(1 * sizeof(float));; x1614[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1612, x_desc, x1060, x_desc, x1071, x_desc, x1060, x1614, x_desc, x1071)); }; // conv2D back-propagate float* x1618 = (float*)myMalloc(1 * sizeof(float));; x1618[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 384, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x975, x975)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1618, filt_desc, x197, grad_out_desc, x1071, conv_desc, algo, ws_data, ws_size, x1618, grad_in_desc, x1046)); }; float* x1621 = (float*)myMalloc(1 * sizeof(float));; x1621[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 384, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x975, x975)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1621, in_desc, x1044, grad_out_desc, x1071, conv_desc, algo, ws_data, ws_size, x1621, grad_filt_desc, x286)); }; float* x1624 = (float*)myMalloc(1 * sizeof(float));; x1624[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1054, x1054)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1624, grad_out_desc, x1071, x1624, grad_bias_desc, x261)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x992, 192, x978, x1024, 192, x1010, x1046, 1, 64, 384, x975, x975, x1040, x976, x975, 1); }; float* x1628 = (float*)myMalloc(1 * sizeof(float));; x1628[0] = 1.0f; float* x1630 = (float*)myMalloc(1 * sizeof(float));; x1630[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1628, x_desc, x1013, x_desc, x1024, x_desc, x1013, x1630, x_desc, x1024)); }; // conv2D back-propagate float* x1634 = (float*)myMalloc(1 * sizeof(float));; x1634[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1634, filt_desc, x149, grad_out_desc, x1024, conv_desc, algo, ws_data, ws_size, x1634, grad_in_desc, x962)); }; float* x1637 = (float*)myMalloc(1 * sizeof(float));; x1637[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1637, in_desc, x951, grad_out_desc, x1024, conv_desc, algo, ws_data, ws_size, x1637, grad_filt_desc, x270)); }; float* x1640 = (float*)myMalloc(1 * sizeof(float));; x1640[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x1007, x1007)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1640, grad_out_desc, x1024, x1640, grad_bias_desc, x296)); }; float* x1643 = (float*)myMalloc(1 * sizeof(float));; x1643[0] = 1.0f; float* x1645 = (float*)myMalloc(1 * sizeof(float));; x1645[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1643, x_desc, x981, x_desc, x992, x_desc, x981, x1645, x_desc, x992)); }; // conv2D back-propagate float* x1649 = (float*)myMalloc(1 * sizeof(float));; x1649[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1649, filt_desc, x161, grad_out_desc, x992, conv_desc, algo, ws_data, ws_size, x1649, grad_in_desc, x962)); }; float* x1652 = (float*)myMalloc(1 * sizeof(float));; x1652[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1652, in_desc, x951, grad_out_desc, x992, conv_desc, algo, ws_data, ws_size, x1652, grad_filt_desc, x274)); }; float* x1655 = (float*)myMalloc(1 * sizeof(float));; x1655[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x975, x975)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1655, grad_out_desc, x992, x1655, grad_bias_desc, x284)); }; float* x1658 = (float*)myMalloc(1 * sizeof(float));; x1658[0] = 1.0f; float* x1660 = (float*)myMalloc(1 * sizeof(float));; x1660[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1658, x_desc, x951, x_desc, x962, x_desc, x951, x1660, x_desc, x962)); }; // conv2D back-propagate float* x1664 = (float*)myMalloc(1 * sizeof(float));; x1664[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 384, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x866, x866)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1664, filt_desc, x137, grad_out_desc, x962, conv_desc, algo, ws_data, ws_size, x1664, grad_in_desc, x937)); }; float* x1667 = (float*)myMalloc(1 * sizeof(float));; x1667[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 384, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x866, x866)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1667, in_desc, x935, grad_out_desc, x962, conv_desc, algo, ws_data, ws_size, x1667, grad_filt_desc, x266)); }; float* x1670 = (float*)myMalloc(1 * sizeof(float));; x1670[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x945, x945)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1670, grad_out_desc, x962, x1670, grad_bias_desc, x254)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x883, 192, x869, x915, 192, x901, x937, 1, 64, 384, x866, x866, x931, x867, x866, 1); }; float* x1674 = (float*)myMalloc(1 * sizeof(float));; x1674[0] = 1.0f; float* x1676 = (float*)myMalloc(1 * sizeof(float));; x1676[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1674, x_desc, x904, x_desc, x915, x_desc, x904, x1676, x_desc, x915)); }; // conv2D back-propagate float* x1680 = (float*)myMalloc(1 * sizeof(float));; x1680[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1680, filt_desc, x212, grad_out_desc, x915, conv_desc, algo, ws_data, ws_size, x1680, grad_in_desc, x853)); }; float* x1683 = (float*)myMalloc(1 * sizeof(float));; x1683[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1683, in_desc, x842, grad_out_desc, x915, conv_desc, algo, ws_data, ws_size, x1683, grad_filt_desc, x291)); }; float* x1686 = (float*)myMalloc(1 * sizeof(float));; x1686[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x898, x898)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1686, grad_out_desc, x915, x1686, grad_bias_desc, x281)); }; float* x1689 = (float*)myMalloc(1 * sizeof(float));; x1689[0] = 1.0f; float* x1691 = (float*)myMalloc(1 * sizeof(float));; x1691[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1689, x_desc, x872, x_desc, x883, x_desc, x872, x1691, x_desc, x883)); }; // conv2D back-propagate float* x1695 = (float*)myMalloc(1 * sizeof(float));; x1695[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1695, filt_desc, x239, grad_out_desc, x883, conv_desc, algo, ws_data, ws_size, x1695, grad_in_desc, x853)); }; float* x1698 = (float*)myMalloc(1 * sizeof(float));; x1698[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1698, in_desc, x842, grad_out_desc, x883, conv_desc, algo, ws_data, ws_size, x1698, grad_filt_desc, x300)); }; float* x1701 = (float*)myMalloc(1 * sizeof(float));; x1701[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x866, x866)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1701, grad_out_desc, x883, x1701, grad_bias_desc, x298)); }; float* x1704 = (float*)myMalloc(1 * sizeof(float));; x1704[0] = 1.0f; float* x1706 = (float*)myMalloc(1 * sizeof(float));; x1706[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1704, x_desc, x842, x_desc, x853, x_desc, x842, x1706, x_desc, x853)); }; // conv2D back-propagate float* x1710 = (float*)myMalloc(1 * sizeof(float));; x1710[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x757, x757)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1710, filt_desc, x221, grad_out_desc, x853, conv_desc, algo, ws_data, ws_size, x1710, grad_in_desc, x828)); }; float* x1713 = (float*)myMalloc(1 * sizeof(float));; x1713[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x757, x757)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1713, in_desc, x826, grad_out_desc, x853, conv_desc, algo, ws_data, ws_size, x1713, grad_filt_desc, x294)); }; float* x1716 = (float*)myMalloc(1 * sizeof(float));; x1716[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x836, x836)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1716, grad_out_desc, x853, x1716, grad_bias_desc, x304)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x774, 128, x760, x806, 128, x792, x828, 1, 64, 256, x757, x757, x822, x758, x757, 1); }; float* x1720 = (float*)myMalloc(1 * sizeof(float));; x1720[0] = 1.0f; float* x1722 = (float*)myMalloc(1 * sizeof(float));; x1722[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1720, x_desc, x795, x_desc, x806, x_desc, x795, x1722, x_desc, x806)); }; // conv2D back-propagate float* x1726 = (float*)myMalloc(1 * sizeof(float));; x1726[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1726, filt_desc, x203, grad_out_desc, x806, conv_desc, algo, ws_data, ws_size, x1726, grad_in_desc, x744)); }; float* x1729 = (float*)myMalloc(1 * sizeof(float));; x1729[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1729, in_desc, x733, grad_out_desc, x806, conv_desc, algo, ws_data, ws_size, x1729, grad_filt_desc, x288)); }; float* x1732 = (float*)myMalloc(1 * sizeof(float));; x1732[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x789, x789)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1732, grad_out_desc, x806, x1732, grad_bias_desc, x268)); }; float* x1735 = (float*)myMalloc(1 * sizeof(float));; x1735[0] = 1.0f; float* x1737 = (float*)myMalloc(1 * sizeof(float));; x1737[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1735, x_desc, x763, x_desc, x774, x_desc, x763, x1737, x_desc, x774)); }; // conv2D back-propagate float* x1741 = (float*)myMalloc(1 * sizeof(float));; x1741[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1741, filt_desc, x116, grad_out_desc, x774, conv_desc, algo, ws_data, ws_size, x1741, grad_in_desc, x744)); }; float* x1744 = (float*)myMalloc(1 * sizeof(float));; x1744[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1744, in_desc, x733, grad_out_desc, x774, conv_desc, algo, ws_data, ws_size, x1744, grad_filt_desc, x259)); }; float* x1747 = (float*)myMalloc(1 * sizeof(float));; x1747[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x757, x757)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1747, grad_out_desc, x774, x1747, grad_bias_desc, x273)); }; float* x1750 = (float*)myMalloc(1 * sizeof(float));; x1750[0] = 1.0f; float* x1752 = (float*)myMalloc(1 * sizeof(float));; x1752[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1750, x_desc, x733, x_desc, x744, x_desc, x733, x1752, x_desc, x744)); }; // conv2D back-propagate float* x1756 = (float*)myMalloc(1 * sizeof(float));; x1756[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x711, x711)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1756, filt_desc, x176, grad_out_desc, x744, conv_desc, algo, ws_data, ws_size, x1756, grad_in_desc, x719)); }; float* x1759 = (float*)myMalloc(1 * sizeof(float));; x1759[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x711, x711)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1759, in_desc, x717, grad_out_desc, x744, conv_desc, algo, ws_data, ws_size, x1759, grad_filt_desc, x279)); }; float* x1762 = (float*)myMalloc(1 * sizeof(float));; x1762[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x727, x727)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1762, grad_out_desc, x744, x1762, grad_bias_desc, x267)); }; float* x1765 = (float*)myMalloc(1 * sizeof(float));; x1765[0] = 0.0f; float* x1767 = (float*)myMalloc(1 * sizeof(float));; x1767[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x633, x633)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x711, x711)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x1767, out_desc, x717, out_desc, x719, in_desc, x702 , x1765, in_desc, x704)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x650, 128, x636, x682, 128, x668, x704, 1, 64, 256, x633, x633, x698, x634, x633, 1); }; float* x1771 = (float*)myMalloc(1 * sizeof(float));; x1771[0] = 1.0f; float* x1773 = (float*)myMalloc(1 * sizeof(float));; x1773[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1771, x_desc, x671, x_desc, x682, x_desc, x671, x1773, x_desc, x682)); }; // conv2D back-propagate float* x1777 = (float*)myMalloc(1 * sizeof(float));; x1777[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1777, filt_desc, x113, grad_out_desc, x682, conv_desc, algo, ws_data, ws_size, x1777, grad_in_desc, x620)); }; float* x1780 = (float*)myMalloc(1 * sizeof(float));; x1780[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1780, in_desc, x609, grad_out_desc, x682, conv_desc, algo, ws_data, ws_size, x1780, grad_filt_desc, x258)); }; float* x1783 = (float*)myMalloc(1 * sizeof(float));; x1783[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x665, x665)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1783, grad_out_desc, x682, x1783, grad_bias_desc, x293)); }; float* x1786 = (float*)myMalloc(1 * sizeof(float));; x1786[0] = 1.0f; float* x1788 = (float*)myMalloc(1 * sizeof(float));; x1788[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1786, x_desc, x639, x_desc, x650, x_desc, x639, x1788, x_desc, x650)); }; // conv2D back-propagate float* x1792 = (float*)myMalloc(1 * sizeof(float));; x1792[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1792, filt_desc, x200, grad_out_desc, x650, conv_desc, algo, ws_data, ws_size, x1792, grad_in_desc, x620)); }; float* x1795 = (float*)myMalloc(1 * sizeof(float));; x1795[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1795, in_desc, x609, grad_out_desc, x650, conv_desc, algo, ws_data, ws_size, x1795, grad_filt_desc, x287)); }; float* x1798 = (float*)myMalloc(1 * sizeof(float));; x1798[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x633, x633)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1798, grad_out_desc, x650, x1798, grad_bias_desc, x297)); }; float* x1801 = (float*)myMalloc(1 * sizeof(float));; x1801[0] = 1.0f; float* x1803 = (float*)myMalloc(1 * sizeof(float));; x1803[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1801, x_desc, x609, x_desc, x620, x_desc, x609, x1803, x_desc, x620)); }; // conv2D back-propagate float* x1807 = (float*)myMalloc(1 * sizeof(float));; x1807[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x524, x524)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1807, filt_desc, x125, grad_out_desc, x620, conv_desc, algo, ws_data, ws_size, x1807, grad_in_desc, x595)); }; float* x1810 = (float*)myMalloc(1 * sizeof(float));; x1810[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x524, x524)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1810, in_desc, x593, grad_out_desc, x620, conv_desc, algo, ws_data, ws_size, x1810, grad_filt_desc, x262)); }; float* x1813 = (float*)myMalloc(1 * sizeof(float));; x1813[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x603, x603)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1813, grad_out_desc, x620, x1813, grad_bias_desc, x275)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x541, 64, x527, x573, 64, x559, x595, 1, 64, 128, x524, x524, x589, x525, x524, 1); }; float* x1817 = (float*)myMalloc(1 * sizeof(float));; x1817[0] = 1.0f; float* x1819 = (float*)myMalloc(1 * sizeof(float));; x1819[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1817, x_desc, x562, x_desc, x573, x_desc, x562, x1819, x_desc, x573)); }; // conv2D back-propagate float* x1823 = (float*)myMalloc(1 * sizeof(float));; x1823[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1823, filt_desc, x152, grad_out_desc, x573, conv_desc, algo, ws_data, ws_size, x1823, grad_in_desc, x511)); }; float* x1826 = (float*)myMalloc(1 * sizeof(float));; x1826[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1826, in_desc, x500, grad_out_desc, x573, conv_desc, algo, ws_data, ws_size, x1826, grad_filt_desc, x271)); }; float* x1829 = (float*)myMalloc(1 * sizeof(float));; x1829[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x556, x556)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1829, grad_out_desc, x573, x1829, grad_bias_desc, x289)); }; float* x1832 = (float*)myMalloc(1 * sizeof(float));; x1832[0] = 1.0f; float* x1834 = (float*)myMalloc(1 * sizeof(float));; x1834[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1832, x_desc, x530, x_desc, x541, x_desc, x530, x1834, x_desc, x541)); }; // conv2D back-propagate float* x1838 = (float*)myMalloc(1 * sizeof(float));; x1838[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1838, filt_desc, x128, grad_out_desc, x541, conv_desc, algo, ws_data, ws_size, x1838, grad_in_desc, x511)); }; float* x1841 = (float*)myMalloc(1 * sizeof(float));; x1841[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1841, in_desc, x500, grad_out_desc, x541, conv_desc, algo, ws_data, ws_size, x1841, grad_filt_desc, x263)); }; float* x1844 = (float*)myMalloc(1 * sizeof(float));; x1844[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x524, x524)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1844, grad_out_desc, x541, x1844, grad_bias_desc, x255)); }; float* x1847 = (float*)myMalloc(1 * sizeof(float));; x1847[0] = 1.0f; float* x1849 = (float*)myMalloc(1 * sizeof(float));; x1849[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1847, x_desc, x500, x_desc, x511, x_desc, x500, x1849, x_desc, x511)); }; // conv2D back-propagate float* x1853 = (float*)myMalloc(1 * sizeof(float));; x1853[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x412, x412)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1853, filt_desc, x131, grad_out_desc, x511, conv_desc, algo, ws_data, ws_size, x1853, grad_in_desc, x486)); }; float* x1856 = (float*)myMalloc(1 * sizeof(float));; x1856[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x412, x412)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1856, in_desc, x484, grad_out_desc, x511, conv_desc, algo, ws_data, ws_size, x1856, grad_filt_desc, x264)); }; float* x1859 = (float*)myMalloc(1 * sizeof(float));; x1859[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x494, x494)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1859, grad_out_desc, x511, x1859, grad_bias_desc, x277)); }; { dim3 grid(28, 2); concat2D_1D_greg_grad<<<grid, 512>>>(x429, 64, x415, x461, 64, x447, x486, 1, 64, 128, x412, x412, x480, x413, x412, 1); }; float* x1863 = (float*)myMalloc(1 * sizeof(float));; x1863[0] = 1.0f; float* x1865 = (float*)myMalloc(1 * sizeof(float));; x1865[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1863, x_desc, x450, x_desc, x461, x_desc, x450, x1865, x_desc, x461)); }; // conv2D back-propagate float* x1869 = (float*)myMalloc(1 * sizeof(float));; x1869[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1869, filt_desc, x236, grad_out_desc, x461, conv_desc, algo, ws_data, ws_size, x1869, grad_in_desc, x399)); }; float* x1872 = (float*)myMalloc(1 * sizeof(float));; x1872[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1872, in_desc, x388, grad_out_desc, x461, conv_desc, algo, ws_data, ws_size, x1872, grad_filt_desc, x299)); }; float* x1875 = (float*)myMalloc(1 * sizeof(float));; x1875[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x444, x444)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1875, grad_out_desc, x461, x1875, grad_bias_desc, x257)); }; float* x1878 = (float*)myMalloc(1 * sizeof(float));; x1878[0] = 1.0f; float* x1880 = (float*)myMalloc(1 * sizeof(float));; x1880[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1878, x_desc, x418, x_desc, x429, x_desc, x418, x1880, x_desc, x429)); }; // conv2D back-propagate float* x1884 = (float*)myMalloc(1 * sizeof(float));; x1884[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1884, filt_desc, x167, grad_out_desc, x429, conv_desc, algo, ws_data, ws_size, x1884, grad_in_desc, x399)); }; float* x1887 = (float*)myMalloc(1 * sizeof(float));; x1887[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1887, in_desc, x388, grad_out_desc, x429, conv_desc, algo, ws_data, ws_size, x1887, grad_filt_desc, x276)); }; float* x1890 = (float*)myMalloc(1 * sizeof(float));; x1890[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x412, x412)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1890, grad_out_desc, x429, x1890, grad_bias_desc, x283)); }; float* x1893 = (float*)myMalloc(1 * sizeof(float));; x1893[0] = 1.0f; float* x1895 = (float*)myMalloc(1 * sizeof(float));; x1895[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1893, x_desc, x388, x_desc, x399, x_desc, x388, x1895, x_desc, x399)); }; // conv2D back-propagate float* x1899 = (float*)myMalloc(1 * sizeof(float));; x1899[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 96, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x366, x366)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x1899, filt_desc, x245, grad_out_desc, x399, conv_desc, algo, ws_data, ws_size, x1899, grad_in_desc, x374)); }; float* x1902 = (float*)myMalloc(1 * sizeof(float));; x1902[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 96, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x366, x366)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1902, in_desc, x372, grad_out_desc, x399, conv_desc, algo, ws_data, ws_size, x1902, grad_filt_desc, x302)); }; float* x1905 = (float*)myMalloc(1 * sizeof(float));; x1905[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x382, x382)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1905, grad_out_desc, x399, x1905, grad_bias_desc, x260)); }; float* x1908 = (float*)myMalloc(1 * sizeof(float));; x1908[0] = 0.0f; float* x1910 = (float*)myMalloc(1 * sizeof(float));; x1910[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x366, x366)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x1910, out_desc, x372, out_desc, x374, in_desc, x343 , x1908, in_desc, x354)); }; float* x1913 = (float*)myMalloc(1 * sizeof(float));; x1913[0] = 1.0f; float* x1915 = (float*)myMalloc(1 * sizeof(float));; x1915[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x1913, x_desc, x343, x_desc, x354, x_desc, x343, x1915, x_desc, x354)); }; // conv2D back-propagate float* x1919 = (float*)myMalloc(1 * sizeof(float));; x1919[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 96, 3, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x1919, in_desc, x327, grad_out_desc, x354, conv_desc, algo, ws_data, ws_size, x1919, grad_filt_desc, x285)); }; float* x1922 = (float*)myMalloc(1 * sizeof(float));; x1922[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 96, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x337, x337)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x1922, grad_out_desc, x354, x1922, grad_bias_desc, x295)); }; // Tensor 'toCPU' invocation. float* x1926 = (float*)myMalloc(1 * sizeof(float));; CUDA_CALL(cudaMemcpy(x1926, x335, 1 * sizeof(float), cudaMemcpyDeviceToHost)); float x1928 = x1926[0]; x315 += x1928; float* x1930 = (float*)myMalloc(1 * sizeof(float));; x1930[0] = 1.0f; float* x1932 = (float*)myMalloc(1 * sizeof(float));; x1932[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,64,x1930,x98,512,x1932, x253, 512, x98,512)); arrayFill<<<28, 512>>>(x253, 0.0f, 32768); float* x1936 = (float*)myMalloc(1 * sizeof(float));; x1936[0] = 1.0f; float* x1938 = (float*)myMalloc(1 * sizeof(float));; x1938[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,48,x1936,x101,1,x1938, x254, 1, x101,1)); arrayFill<<<28, 512>>>(x254, 0.0f, 48); float* x1942 = (float*)myMalloc(1 * sizeof(float));; x1942[0] = 1.0f; float* x1944 = (float*)myMalloc(1 * sizeof(float));; x1944[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x1942,x104,1,x1944, x255, 1, x104,1)); arrayFill<<<28, 512>>>(x255, 0.0f, 64); float* x1948 = (float*)myMalloc(1 * sizeof(float));; x1948[0] = 1.0f; float* x1950 = (float*)myMalloc(1 * sizeof(float));; x1950[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 8192,10,x1948,x107,8192,x1950, x256, 8192, x107,8192)); arrayFill<<<28, 512>>>(x256, 0.0f, 81920); float* x1954 = (float*)myMalloc(1 * sizeof(float));; x1954[0] = 1.0f; float* x1956 = (float*)myMalloc(1 * sizeof(float));; x1956[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x1954,x110,1,x1956, x257, 1, x110,1)); arrayFill<<<28, 512>>>(x257, 0.0f, 64); float* x1960 = (float*)myMalloc(1 * sizeof(float));; x1960[0] = 1.0f; float* x1962 = (float*)myMalloc(1 * sizeof(float));; x1962[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 288,128,x1960,x113,288,x1962, x258, 288, x113,288)); arrayFill<<<28, 512>>>(x258, 0.0f, 36864); float* x1966 = (float*)myMalloc(1 * sizeof(float));; x1966[0] = 1.0f; float* x1968 = (float*)myMalloc(1 * sizeof(float));; x1968[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 32,128,x1966,x116,32,x1968, x259, 32, x116,32)); arrayFill<<<28, 512>>>(x259, 0.0f, 4096); float* x1972 = (float*)myMalloc(1 * sizeof(float));; x1972[0] = 1.0f; float* x1974 = (float*)myMalloc(1 * sizeof(float));; x1974[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,16,x1972,x119,1,x1974, x260, 1, x119,1)); arrayFill<<<28, 512>>>(x260, 0.0f, 16); float* x1978 = (float*)myMalloc(1 * sizeof(float));; x1978[0] = 1.0f; float* x1980 = (float*)myMalloc(1 * sizeof(float));; x1980[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x1978,x122,1,x1980, x261, 1, x122,1)); arrayFill<<<28, 512>>>(x261, 0.0f, 64); float* x1984 = (float*)myMalloc(1 * sizeof(float));; x1984[0] = 1.0f; float* x1986 = (float*)myMalloc(1 * sizeof(float));; x1986[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,32,x1984,x125,128,x1986, x262, 128, x125,128)); arrayFill<<<28, 512>>>(x262, 0.0f, 4096); float* x1990 = (float*)myMalloc(1 * sizeof(float));; x1990[0] = 1.0f; float* x1992 = (float*)myMalloc(1 * sizeof(float));; x1992[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 16,64,x1990,x128,16,x1992, x263, 16, x128,16)); arrayFill<<<28, 512>>>(x263, 0.0f, 1024); float* x1996 = (float*)myMalloc(1 * sizeof(float));; x1996[0] = 1.0f; float* x1998 = (float*)myMalloc(1 * sizeof(float));; x1998[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,16,x1996,x131,128,x1998, x264, 128, x131,128)); arrayFill<<<28, 512>>>(x264, 0.0f, 2048); float* x2002 = (float*)myMalloc(1 * sizeof(float));; x2002[0] = 1.0f; float* x2004 = (float*)myMalloc(1 * sizeof(float));; x2004[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x2002,x134,1,x2004, x265, 1, x134,1)); arrayFill<<<28, 512>>>(x265, 0.0f, 256); float* x2008 = (float*)myMalloc(1 * sizeof(float));; x2008[0] = 1.0f; float* x2010 = (float*)myMalloc(1 * sizeof(float));; x2010[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 384,48,x2008,x137,384,x2010, x266, 384, x137,384)); arrayFill<<<28, 512>>>(x266, 0.0f, 18432); float* x2014 = (float*)myMalloc(1 * sizeof(float));; x2014[0] = 1.0f; float* x2016 = (float*)myMalloc(1 * sizeof(float));; x2016[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,32,x2014,x140,1,x2016, x267, 1, x140,1)); arrayFill<<<28, 512>>>(x267, 0.0f, 32); float* x2020 = (float*)myMalloc(1 * sizeof(float));; x2020[0] = 1.0f; float* x2022 = (float*)myMalloc(1 * sizeof(float));; x2022[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x2020,x143,1,x2022, x268, 1, x143,1)); arrayFill<<<28, 512>>>(x268, 0.0f, 128); float* x2026 = (float*)myMalloc(1 * sizeof(float));; x2026[0] = 1.0f; float* x2028 = (float*)myMalloc(1 * sizeof(float));; x2028[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x2026,x146,1,x2028, x269, 1, x146,1)); arrayFill<<<28, 512>>>(x269, 0.0f, 256); float* x2032 = (float*)myMalloc(1 * sizeof(float));; x2032[0] = 1.0f; float* x2034 = (float*)myMalloc(1 * sizeof(float));; x2034[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 432,192,x2032,x149,432,x2034, x270, 432, x149,432)); arrayFill<<<28, 512>>>(x270, 0.0f, 82944); float* x2038 = (float*)myMalloc(1 * sizeof(float));; x2038[0] = 1.0f; float* x2040 = (float*)myMalloc(1 * sizeof(float));; x2040[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 144,64,x2038,x152,144,x2040, x271, 144, x152,144)); arrayFill<<<28, 512>>>(x271, 0.0f, 9216); float* x2044 = (float*)myMalloc(1 * sizeof(float));; x2044[0] = 1.0f; float* x2046 = (float*)myMalloc(1 * sizeof(float));; x2046[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x2044,x155,1,x2046, x272, 1, x155,1)); arrayFill<<<28, 512>>>(x272, 0.0f, 64); float* x2050 = (float*)myMalloc(1 * sizeof(float));; x2050[0] = 1.0f; float* x2052 = (float*)myMalloc(1 * sizeof(float));; x2052[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x2050,x158,1,x2052, x273, 1, x158,1)); arrayFill<<<28, 512>>>(x273, 0.0f, 128); float* x2056 = (float*)myMalloc(1 * sizeof(float));; x2056[0] = 1.0f; float* x2058 = (float*)myMalloc(1 * sizeof(float));; x2058[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 48,192,x2056,x161,48,x2058, x274, 48, x161,48)); arrayFill<<<28, 512>>>(x274, 0.0f, 9216); float* x2062 = (float*)myMalloc(1 * sizeof(float));; x2062[0] = 1.0f; float* x2064 = (float*)myMalloc(1 * sizeof(float));; x2064[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,32,x2062,x164,1,x2064, x275, 1, x164,1)); arrayFill<<<28, 512>>>(x275, 0.0f, 32); float* x2068 = (float*)myMalloc(1 * sizeof(float));; x2068[0] = 1.0f; float* x2070 = (float*)myMalloc(1 * sizeof(float));; x2070[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 16,64,x2068,x167,16,x2070, x276, 16, x167,16)); arrayFill<<<28, 512>>>(x276, 0.0f, 1024); float* x2074 = (float*)myMalloc(1 * sizeof(float));; x2074[0] = 1.0f; float* x2076 = (float*)myMalloc(1 * sizeof(float));; x2076[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,16,x2074,x170,1,x2076, x277, 1, x170,1)); arrayFill<<<28, 512>>>(x277, 0.0f, 16); float* x2080 = (float*)myMalloc(1 * sizeof(float));; x2080[0] = 1.0f; float* x2082 = (float*)myMalloc(1 * sizeof(float));; x2082[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x2080,x173,1,x2082, x278, 1, x173,1)); arrayFill<<<28, 512>>>(x278, 0.0f, 256); float* x2086 = (float*)myMalloc(1 * sizeof(float));; x2086[0] = 1.0f; float* x2088 = (float*)myMalloc(1 * sizeof(float));; x2088[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,32,x2086,x176,256,x2088, x279, 256, x176,256)); arrayFill<<<28, 512>>>(x279, 0.0f, 8192); float* x2092 = (float*)myMalloc(1 * sizeof(float));; x2092[0] = 1.0f; float* x2094 = (float*)myMalloc(1 * sizeof(float));; x2094[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,256,x2092,x179,576,x2094, x280, 576, x179,576)); arrayFill<<<28, 512>>>(x280, 0.0f, 147456); float* x2098 = (float*)myMalloc(1 * sizeof(float));; x2098[0] = 1.0f; float* x2100 = (float*)myMalloc(1 * sizeof(float));; x2100[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,192,x2098,x182,1,x2100, x281, 1, x182,1)); arrayFill<<<28, 512>>>(x281, 0.0f, 192); float* x2104 = (float*)myMalloc(1 * sizeof(float));; x2104[0] = 1.0f; float* x2106 = (float*)myMalloc(1 * sizeof(float));; x2106[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,256,x2104,x185,576,x2106, x282, 576, x185,576)); arrayFill<<<28, 512>>>(x282, 0.0f, 147456); float* x2110 = (float*)myMalloc(1 * sizeof(float));; x2110[0] = 1.0f; float* x2112 = (float*)myMalloc(1 * sizeof(float));; x2112[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x2110,x188,1,x2112, x283, 1, x188,1)); arrayFill<<<28, 512>>>(x283, 0.0f, 64); float* x2116 = (float*)myMalloc(1 * sizeof(float));; x2116[0] = 1.0f; float* x2118 = (float*)myMalloc(1 * sizeof(float));; x2118[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,192,x2116,x191,1,x2118, x284, 1, x191,1)); arrayFill<<<28, 512>>>(x284, 0.0f, 192); float* x2122 = (float*)myMalloc(1 * sizeof(float));; x2122[0] = 1.0f; float* x2124 = (float*)myMalloc(1 * sizeof(float));; x2124[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 27,96,x2122,x194,27,x2124, x285, 27, x194,27)); arrayFill<<<28, 512>>>(x285, 0.0f, 2592); float* x2128 = (float*)myMalloc(1 * sizeof(float));; x2128[0] = 1.0f; float* x2130 = (float*)myMalloc(1 * sizeof(float));; x2130[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 384,64,x2128,x197,384,x2130, x286, 384, x197,384)); arrayFill<<<28, 512>>>(x286, 0.0f, 24576); float* x2134 = (float*)myMalloc(1 * sizeof(float));; x2134[0] = 1.0f; float* x2136 = (float*)myMalloc(1 * sizeof(float));; x2136[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 32,128,x2134,x200,32,x2136, x287, 32, x200,32)); arrayFill<<<28, 512>>>(x287, 0.0f, 4096); float* x2140 = (float*)myMalloc(1 * sizeof(float));; x2140[0] = 1.0f; float* x2142 = (float*)myMalloc(1 * sizeof(float));; x2142[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 288,128,x2140,x203,288,x2142, x288, 288, x203,288)); arrayFill<<<28, 512>>>(x288, 0.0f, 36864); float* x2146 = (float*)myMalloc(1 * sizeof(float));; x2146[0] = 1.0f; float* x2148 = (float*)myMalloc(1 * sizeof(float));; x2148[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x2146,x206,1,x2148, x289, 1, x206,1)); arrayFill<<<28, 512>>>(x289, 0.0f, 64); float* x2152 = (float*)myMalloc(1 * sizeof(float));; x2152[0] = 1.0f; float* x2154 = (float*)myMalloc(1 * sizeof(float));; x2154[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x2152,x209,64,x2154, x290, 64, x209,64)); arrayFill<<<28, 512>>>(x290, 0.0f, 16384); float* x2158 = (float*)myMalloc(1 * sizeof(float));; x2158[0] = 1.0f; float* x2160 = (float*)myMalloc(1 * sizeof(float));; x2160[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 432,192,x2158,x212,432,x2160, x291, 432, x212,432)); arrayFill<<<28, 512>>>(x291, 0.0f, 82944); float* x2164 = (float*)myMalloc(1 * sizeof(float));; x2164[0] = 1.0f; float* x2166 = (float*)myMalloc(1 * sizeof(float));; x2166[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x2164,x215,1,x2166, x292, 1, x215,1)); arrayFill<<<28, 512>>>(x292, 0.0f, 256); float* x2170 = (float*)myMalloc(1 * sizeof(float));; x2170[0] = 1.0f; float* x2172 = (float*)myMalloc(1 * sizeof(float));; x2172[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x2170,x218,1,x2172, x293, 1, x218,1)); arrayFill<<<28, 512>>>(x293, 0.0f, 128); float* x2176 = (float*)myMalloc(1 * sizeof(float));; x2176[0] = 1.0f; float* x2178 = (float*)myMalloc(1 * sizeof(float));; x2178[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,48,x2176,x221,256,x2178, x294, 256, x221,256)); arrayFill<<<28, 512>>>(x294, 0.0f, 12288); float* x2182 = (float*)myMalloc(1 * sizeof(float));; x2182[0] = 1.0f; float* x2184 = (float*)myMalloc(1 * sizeof(float));; x2184[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,96,x2182,x224,1,x2184, x295, 1, x224,1)); arrayFill<<<28, 512>>>(x295, 0.0f, 96); float* x2188 = (float*)myMalloc(1 * sizeof(float));; x2188[0] = 1.0f; float* x2190 = (float*)myMalloc(1 * sizeof(float));; x2190[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,192,x2188,x227,1,x2190, x296, 1, x227,1)); arrayFill<<<28, 512>>>(x296, 0.0f, 192); float* x2194 = (float*)myMalloc(1 * sizeof(float));; x2194[0] = 1.0f; float* x2196 = (float*)myMalloc(1 * sizeof(float));; x2196[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x2194,x230,1,x2196, x297, 1, x230,1)); arrayFill<<<28, 512>>>(x297, 0.0f, 128); float* x2200 = (float*)myMalloc(1 * sizeof(float));; x2200[0] = 1.0f; float* x2202 = (float*)myMalloc(1 * sizeof(float));; x2202[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,192,x2200,x233,1,x2202, x298, 1, x233,1)); arrayFill<<<28, 512>>>(x298, 0.0f, 192); float* x2206 = (float*)myMalloc(1 * sizeof(float));; x2206[0] = 1.0f; float* x2208 = (float*)myMalloc(1 * sizeof(float));; x2208[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 144,64,x2206,x236,144,x2208, x299, 144, x236,144)); arrayFill<<<28, 512>>>(x299, 0.0f, 9216); float* x2212 = (float*)myMalloc(1 * sizeof(float));; x2212[0] = 1.0f; float* x2214 = (float*)myMalloc(1 * sizeof(float));; x2214[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 48,192,x2212,x239,48,x2214, x300, 48, x239,48)); arrayFill<<<28, 512>>>(x300, 0.0f, 9216); float* x2218 = (float*)myMalloc(1 * sizeof(float));; x2218[0] = 1.0f; float* x2220 = (float*)myMalloc(1 * sizeof(float));; x2220[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x2218,x242,64,x2220, x301, 64, x242,64)); arrayFill<<<28, 512>>>(x301, 0.0f, 16384); float* x2224 = (float*)myMalloc(1 * sizeof(float));; x2224[0] = 1.0f; float* x2226 = (float*)myMalloc(1 * sizeof(float));; x2226[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 96,16,x2224,x245,96,x2226, x302, 96, x245,96)); arrayFill<<<28, 512>>>(x302, 0.0f, 1536); float* x2230 = (float*)myMalloc(1 * sizeof(float));; x2230[0] = 1.0f; float* x2232 = (float*)myMalloc(1 * sizeof(float));; x2232[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,10,x2230,x248,1,x2232, x303, 1, x248,1)); arrayFill<<<28, 512>>>(x303, 0.0f, 10); float* x2236 = (float*)myMalloc(1 * sizeof(float));; x2236[0] = 1.0f; float* x2238 = (float*)myMalloc(1 * sizeof(float));; x2238[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,48,x2236,x251,1,x2238, x304, 1, x251,1)); arrayFill<<<28, 512>>>(x304, 0.0f, 48); int32_t x2242 = x321 + 1; int32_t x2244 = x2242 % x2243; bool x2245 = x2244 == 0; if (x2245) { float x2250 = x315; double x2246 = (double)x322; double x2247 = 100.0 * x2246; double x2249 = x2247 / x2248; float x2251 = (float)x321; float x2252 = x2250 / x2251; printf("Train epoch %d: [%d/%d (%.0f%%)] Average Loss: %.6f\n",x311,x322,x11,x2249,x2252); fflush(stdout); } else { } int64_t x2257 = (long)mallocAddr; int64_t x2258 = x2257 - x307; memset((void*)x307, 0, x2258); mallocAddr = (void*)x307; int64_t x2261 = (long)gpuMallocAddr; int64_t x2262 = x2261 - x308; cudaMemset((void*)x308, 0, x2262); gpuMallocAddr = (void*)x308; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x2269 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); double x2270 = (double)x2269; double x2271 = x2270 / 1000000.0; x306[x311] = x2271; int64_t x2273 = x2269 / 1000LL; int64_t x2275 = x2269 / x2274; printf("Training completed in %ldms (%ld us/images)\n",x2273,x2275); float x2277 = x315; float x2279 = x2277 / x2278; double x2280 = (double)x2279; x305[x311] = x2280; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x2286 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); sort(x306, x306 + 4); double x2292 = x306[2]; int64_t x2293 = (long)fopen(x0, "w"); fprintf((FILE *)x2293, "unit: %s\n", "1 epoch"); for(int x2295=0; x2295 < 4; x2295++) { double x2296 = x305[x2295]; fprintf((FILE *)x2293, "%lf\n", x2296); } fprintf((FILE *)x2293, "run time: %lf %lf\n", x39, x2292); fclose((FILE*)x2293); // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
the_stack
#ifdef RAPIDJSON_DEFINED #include "rapidjson/document.h" #include "rapidjson/prettywriter.h" #include "rapidjson/filestream.h" #endif using namespace std; namespace amgx { #ifdef RAPIDJSON_DEFINED static int unnamed_scope_counter = 0; static rapidjson::Document json_parser; //made as global to avoid passing template parameter memory allocator via parameters #endif // trim from start static inline std::string &ltrim(std::string &s) { s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace)))); return s; } // trim from end static inline std::string &rtrim(std::string &s) { s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end()); return s; } // trim from both ends static inline std::string &trim(std::string &s) { return ltrim(rtrim(s)); } AMG_Config::ParamDesc AMG_Config::param_desc; __inline__ bool allowed_symbol(const char &a) { return (a >= 'a' && a <= 'z') || (a >= 'A' && a <= 'Z') || (a >= '0' && a <= '9') || (a == '=') || (a == '_') || (a == '.') || (a == '-') || (a == '+'); } // Parses the supplied parameter string AMGX_ERROR AMG_Config::parseParameterString(const char *str) { // rapidjson doesn't handle NULL if (!str) return AMGX_ERR_CONFIGURATION; try { #ifdef RAPIDJSON_DEFINED if (parse_json_string(str) != AMGX_OK) { #endif //copy to a temporary array to avoid destroying the string string params(str); // Read the config version int config_version = getConfigVersion(params); // Parse the parameter string if (parseString(params, config_version) != AMGX_OK) { std::string err = "Error parsing parameter string: " + params; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } #ifdef RAPIDJSON_DEFINED } #endif } catch (amgx_exception &e) { amgx_printf("Error parsing parameter string: %s\n", e.what()); return e.reason(); } catch (...) { amgx_printf("Error parsing parameter string\n"); return AMGX_ERR_CONFIGURATION; } return AMGX_OK; } // Parses the supplied parameter string AMGX_ERROR AMG_Config::parseParameterStringAndFile(const char *str, const char *filename) { AMGX_ERROR ret = AMGX_OK; try { ret = parseFile(filename); if (ret == AMGX_OK) { ret = parseParameterString(str); } } catch (amgx_exception &e) { amgx_printf("Error parsing parameter string: %s\n", e.what()); ret = e.reason(); } catch (...) { amgx_printf("Error parsing parameter string\n"); ret = AMGX_ERR_CONFIGURATION; } return ret; } // Extracts a single parameter line from the config string and increments idx void AMG_Config::getOneParameterLine(std::string &params, std::string &param, int &idx) { param.erase(); while ((idx < params.length()) && (params[idx] != ',') && (params[idx] != ';')) // config delimiters { param += params[idx]; idx ++; } idx++; } // Gets an input config_string, gets the config_version value and removes the config_version entry from the file int AMG_Config::getConfigVersion(std::string &params) { int idx = 0; std::string param; getOneParameterLine(params, param, idx); if (param.length() > 2 && param.find_first_not_of(' ') != std::string::npos) /* check that param is not empty and check length: one for parameter name, one for the equal sign, one for the parameter value. otherwise - this is error */ { // Extract the name, value, current_scope, new_scope string name, value, current_scope, new_scope; extractParamInfo(param, name, value, current_scope, new_scope); int config_version; if (name == "config_version") { config_version = getValue<int>(value.c_str()); if (config_version != 1 && config_version != 2) { std::string err = "Error, config_version must be 1 or 2. Config string is " + std::string(param); FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Erase the config_version entry params.erase(0, idx); } else { config_version = 1; } return config_version; } return 1; } void AMG_Config::convertToCurrentConfigVersion(std::string &params, int config_version) { // -------------------------- // DO THE CONVERSION HERE // -------------------------- std::string old_params = params; params.erase(); if (config_version == 1) { int idx = 0; std::string param; while (idx < old_params.length()) { getOneParameterLine(old_params, param, idx); if (param.length() > 2 && param.find_first_not_of(' ') != std::string::npos) /* check that param is not empty and check length: one for parameter name, one for the equal sign, one for the parameter value. otherwise - this is error */ { std::string name, value, current_scope, new_scope; extractParamInfo(param, name, value, current_scope, new_scope); if (current_scope != "default" || new_scope != "default") { std::string err = "Error parsing parameter string: " + param + " . Scopes only supported with config_version=2 and higher. Add \"config_version=2\" to the config string to use nested solvers"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } //else if (name == "smoother" ) //{ // Add coloring if necessary //if (value=="MULTICOLOR_DILU" || value=="MULTICOLOR_GS" || value=="MULTICOLOR_ILU") //{ // params += "coloring_level=1 ; "; //} //params += ( name + "=" + value + " ; "); //} else if (name == "smoother_weight") { params += "relaxation_factor=" + value + " ; "; } else if (name == "min_block_rows") { params += "min_coarse_rows=" + value + " ; "; } else if (value == "JACOBI" || value == "JACOBI_NO_CUSP") { params += name + "=" + "BLOCK_JACOBI ; "; } else { // Just add it to default scope params += ( name + "=" + value + " ; "); } } } } else if (config_version == 2) { } else { std::stringstream err; err << "Invalid config_version (config_version must be 1 or 2)" << std::endl; FatalError(err.str().c_str(), AMGX_ERR_CONFIGURATION); } } // Parses the supplied parameter string AMGX_ERROR AMG_Config::parseString(std::string &params, int &config_version) { try { m_config_version = config_version; if (m_config_version != this->m_latest_config_version) { size_t pos = params.find("verbosity_level"); pos = params.find("=", pos); pos++; while (params[pos] == ' ') { pos++; } // skip spaces bool print = (pos == string::npos) || (params[pos] > '2'); std::string ss = "Converting config string to current config version\n"; if (print) { #ifdef AMGX_WITH_MPI amgx_distributed_output(ss.c_str(), ss.length()); #else amgx_output(ss.c_str(), ss.length()); #endif } convertToCurrentConfigVersion(params, config_version); ss = "Parsing configuration string: " + params + "\n"; if (print) { #ifdef AMGX_WITH_MPI amgx_distributed_output(ss.c_str(), ss.length()); #else amgx_output(ss.c_str(), ss.length()); #endif } } // Parse the individual parameters int idx = 0; std::string param; while (idx < params.length()) { getOneParameterLine(params, param, idx); if (param.length() > 2 && param.find_first_not_of(' ') != std::string::npos) /* check that param is not empty and check length: one for parameter name, one for the equal sign, one for the parameter value. otherwise - this is error */ { setParameter(param); } } } catch (amgx_exception &e) { amgx_printf("Error parsing parameter string: %s\n", e.what()); return e.reason(); } catch (...) { amgx_printf("Error parsing parameter string\n"); return AMGX_ERR_CONFIGURATION; } return AMGX_OK; } AMGX_ERROR AMG_Config::getParameterStringFromFile(const char *filename, std::string &params) { ifstream fin; try { // Store the file content into a string params = ""; fin.open(filename); if (!fin) { char error[500]; sprintf(error, "Error opening file '%s'", filename); FatalError(error, AMGX_ERR_IO); } while (!fin.eof()) { string line; std::getline(fin, line); line = trim(line); if (line.empty() || line[0] == '#') { continue; } params += (line + ", "); } fin.close(); } catch (amgx_exception &e) { amgx_output(e.what(), strlen(e.what())); if (fin) { fin.close(); } return e.reason(); } catch (...) { return AMGX_ERR_UNKNOWN; } return AMGX_OK; } std::string AMG_Config::getParamTypeName(const std::type_info *param_type) { // stupid but portable version to avoid demangling if (typeid(int) == *param_type) { return "int"; } else if (typeid(size_t) == *param_type) { return "size_t"; } else if (typeid(double) == *param_type) { return "double"; } else if (typeid(std::string) == *param_type) { return "string"; } else if (typeid(AlgorithmType) == *param_type) { return "AlgorithmType"; } else if (typeid(ViewType) == *param_type) { return "ViewType"; } else if (typeid(ColoringType) == *param_type) { return "ColoringType"; } else if (typeid(BlockFormat) == *param_type) { return "BlockFormat"; } else if (typeid(NormType) == *param_type) { return "NormType"; } return "Unknown type"; } template<typename T> void AMG_Config::setNamedParameter(const string &name, const T &c_value, const std::string &current_scope, const std::string &new_scope, ParamDesc::iterator &param_desc_iter) { string err = "Parameter " + name + "(" + current_scope + ") is of unknown type, cannot import value."; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } template<> void AMG_Config::setNamedParameter(const string &name, const std::string &c_value, const std::string &current_scope, const std::string &new_scope, ParamDesc::iterator &param_desc_iter) { if (*(param_desc_iter->second.type) == typeid(AlgorithmType)) { setParameter(name, getValue<AlgorithmType>(c_value.c_str()), current_scope, new_scope); } else if (*(param_desc_iter->second.type) == typeid(ViewType)) { setParameter(name, getValue<ViewType>(c_value.c_str()), current_scope, new_scope); } else if (*(param_desc_iter->second.type) == typeid(ColoringType)) { setParameter(name, getValue<ColoringType>(c_value.c_str()), current_scope, new_scope); } else if (*(param_desc_iter->second.type) == typeid(BlockFormat)) { setParameter(name, getValue<BlockFormat>(c_value.c_str()), current_scope, new_scope); } else if (*(param_desc_iter->second.type) == typeid(NormType)) { setParameter(name, getValue<NormType>(c_value.c_str()), current_scope, new_scope); } else if (*(param_desc_iter->second.type) == typeid(std::string)) { setParameter(name, c_value, current_scope, new_scope); } else { string err = "Incorrect config entry. Type of the parameter \"" + name + "\" in the config is string, but " + getParamTypeName(param_desc_iter->second.type) + " is expected"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } } template<> void AMG_Config::setNamedParameter(const string &name, const double &c_value, const std::string &current_scope, const std::string &new_scope, ParamDesc::iterator &param_desc_iter) { if (typeid(double) == *(param_desc_iter->second.type)) { setParameter(name, c_value, current_scope, new_scope); } else if (typeid(int) == *(param_desc_iter->second.type)) { int _i_val = (int)(c_value); setParameter(name, _i_val, current_scope, new_scope); } else { string err = "Incorrect config entry. Type of the parameter \"" + name + "\" in the config is double, but " + getParamTypeName(param_desc_iter->second.type) + " is expected"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } } template<> void AMG_Config::setNamedParameter(const string &name, const int &c_value, const std::string &current_scope, const std::string &new_scope, ParamDesc::iterator &param_desc_iter) { if (typeid(int) == *(param_desc_iter->second.type)) { setParameter(name, c_value, current_scope, new_scope); } else if (typeid(double) == *(param_desc_iter->second.type)) { double _d_val = (double)(c_value); setParameter(name, _d_val, current_scope, new_scope); } else { string err = "Incorrect config entry. Type of the parameter \"" + name + "\" in the config is int, but " + getParamTypeName(param_desc_iter->second.type) + " is expected"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } } template<typename T> void AMG_Config::importNamedParameter(const char *c_name, const T &c_value, const std::string &current_scope, const std::string &new_scope) { std::string name = c_name; // Add new_scope to scope vector if ( find(m_scope_vector.begin(), m_scope_vector.end(), new_scope) == m_scope_vector.end()) { m_scope_vector.push_back(new_scope); } else if (new_scope != "default" && !getAllowConfigurationMod()) { std::string err = "Incorrect config entry (new scope already defined): " + new_scope; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // extract the name, value, new_scope and old_scope //verify parameter was registered ParamDesc::iterator iter = param_desc.find(string(name)); if (iter == param_desc.end()) { string err = "Variable '" + string(name) + "' not registered"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } if ( (name == "determinism_flag" || name == "block_format" || name == "separation_interior" || name == "separation_exterior" || name == "min_rows_latency_hiding" || name == "fine_level_consolidation" || name == "use_cuda_ipc_consolidation") && current_scope != "default" ) { string err = "Incorrect config entry. Parameter " + name + " can only be specified with default scope."; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Check that new scope is only associated with a solver if (new_scope != "default" && find(m_solver_list.begin(), m_solver_list.end(), name) == m_solver_list.end() ) { string err = "Incorrect config entry. New scope can only be associated with a solver. new_scope=" + new_scope + ", name=" + name + "."; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Set the new parameter name, value setNamedParameter(name, c_value, current_scope, new_scope, iter); } #ifdef RAPIDJSON_DEFINED void AMG_Config::import_json_object(rapidjson::Value &obj, bool outer) { const char *json_type_names[] = { "Null", "False", "True", "Object", "Array", "String", "Number" }; std::string current_scope = "default"; std::string default_new_scope = "default"; if (obj.HasMember("scope")) { current_scope = obj["scope"].GetString(); } for (rapidjson::Value::MemberIterator iter = obj.MemberBegin(); iter != obj.MemberEnd(); ++iter) { if (strcmp(iter->name.GetString(), "config_version") == 0 || strcmp(iter->name.GetString(), "scope") == 0) { continue; } if (strcmp(iter->name.GetString(), "solver") == 0 && !outer) { continue; } if (strcmp(iter->name.GetString(), "eig_solver") == 0 && !outer) { continue; } //printf("Parsing parameter with name \"%s\" of type %s\n", iter->name.GetString(), json_type_names[iter->value.GetType()]); if (iter->value.IsObject()) { if (!iter->value.HasMember("scope")) { char tmp[32]; #ifdef _WIN32 _snprintf( #else snprintf( #endif tmp, 31, "unnamed_solver_%d", unnamed_scope_counter++); rapidjson::Value new_val; new_val.SetString(tmp, strlen(tmp)); iter->value.AddMember("scope", new_val, json_parser.GetAllocator()); } importNamedParameter(iter->name.GetString(), std::string(iter->value["solver"].GetString()), current_scope, std::string(iter->value["scope"].GetString())); import_json_object(iter->value, false); } else if (iter->value.IsInt()) { //printf("Parsing as int\n"); importNamedParameter(iter->name.GetString(), iter->value.GetInt(), current_scope, default_new_scope); } else if (iter->value.IsDouble()) { //printf("Parsing as double\n"); importNamedParameter(iter->name.GetString(), iter->value.GetDouble(), current_scope, default_new_scope); } else if (iter->value.IsString()) { //printf("Parsing as string\n"); importNamedParameter(iter->name.GetString(), std::string(iter->value.GetString()), current_scope, default_new_scope); } else { std::string err = "Cannot import parameter \"" + std::string(iter->name.GetString()) + "\" of type " + std::string(json_type_names[iter->value.GetType()]); } } } AMGX_ERROR AMG_Config::parse_json_file(const char *filename) { ifstream fin; try { // Store the file content into a string std::string params = ""; fin.open(filename); if (!fin) { char error[500]; sprintf(error, "Error opening file '%s'", filename); FatalError(error, AMGX_ERR_IO); } while (!fin.eof()) { string line; std::getline(fin, line); //line=trim(line); if (line.empty()) { continue; } params += (line + "\n"); } fin.close(); // start parsing if (json_parser.Parse<0>(params.c_str()).HasParseError()) { std::string tmp = "Cannot read file as JSON object, trying as AMGX config\n"; amgx_distributed_output(tmp.c_str(), tmp.length()); return AMGX_ERR_NOT_IMPLEMENTED; // } // write json cfg to stdout /*rapidjson::FileStream f(stdout); rapidjson::PrettyWriter<rapidjson::FileStream> writer(f); json_parser.Accept(writer); std::cout << std::endl;*/ import_json_object(json_parser, true); } catch (amgx_exception &e) { amgx_distributed_output(e.what(), strlen(e.what())); if (fin) { fin.close(); } return e.reason(); } catch (...) { return AMGX_ERR_UNKNOWN; } return AMGX_OK; } AMGX_ERROR AMG_Config::parse_json_string(const char *str) { try { // start parsing if (json_parser.Parse<0>(str).HasParseError()) { std::string tmp = "Cannot read file as JSON object, trying as AMGX config\n"; amgx_distributed_output(tmp.c_str(), tmp.length()); return AMGX_ERR_NOT_IMPLEMENTED; // } /*rapidjson::FileStream f(stdout); rapidjson::PrettyWriter<rapidjson::FileStream> writer(f); json_parser.Accept(writer); std::cout << std::endl; */ import_json_object(json_parser, true); } catch (amgx_exception &e) { amgx_distributed_output(e.what(), strlen(e.what())); return e.reason(); } catch (...) { return AMGX_ERR_UNKNOWN; } return AMGX_OK; } void getParameterValueString(std::string &buffer, const std::type_info *type, const Parameter &param) { if (*type == typeid(double)) { buffer.resize(32); #ifdef _WIN32 _snprintf( #else snprintf( #endif &buffer[0], 31, "%f", param.get<double>()); } else if (*type == typeid(size_t)) { buffer.resize(32); #ifdef _WIN32 _snprintf( #else snprintf( #endif &buffer[0], 31, "%zu", param.get<size_t>()); } else if (*type == typeid(int)) { buffer.resize(32); #ifdef _WIN32 _snprintf( #else snprintf( #endif &buffer[0], 31, "%d", param.get<int>()); } else if (*type == typeid(std::string)) { buffer = param.get<std::string>(); } else if (*type == typeid(AlgorithmType)) { buffer.assign(getString(param.get<AlgorithmType>())); } else if (*type == typeid(ViewType)) { buffer.assign(getString(param.get<ViewType>())); } else if (*type == typeid(ColoringType)) { buffer.assign(getString(param.get<ColoringType>())); } else if (*type == typeid(BlockFormat)) { buffer.assign(getString(param.get<BlockFormat>())); } else if (*type == typeid(NormType)) { buffer.assign(getString(param.get<NormType>())); } else { FatalError("Unknown type met while processing parameter", AMGX_ERR_CONFIGURATION) } } // Not templating this one because we want to separate strings, doubles and ints in JSON document instead of writing everything as strings void fillJSONValueWithParameter(rapidjson::Value &val, const ParameterDescription &desc, rapidjson::Document::AllocatorType &allocator) { std::string buffer; rapidjson::Value default_value; { if (*(desc.type) == typeid(int)) { default_value.SetInt(desc.default_value.get<int>()); } else if (*(desc.type) == typeid(size_t)) { default_value.SetInt(desc.default_value.get<size_t>()); } else if (*(desc.type) == typeid(double)) { default_value.SetInt(desc.default_value.get<double>()); } else { getParameterValueString(buffer, desc.type, desc.default_value); default_value.SetString(buffer.c_str(), allocator); } } val.AddMember("default_value", default_value, allocator); if (desc.allowed_values.pm_type != PM_NOT_SET) { if (desc.allowed_values.pm_type == PM_SET) { rapidjson::Value allowed_values_obj(rapidjson::kArrayType); const std::vector<Parameter> &values_set = desc.allowed_values.value_set; std::string buffer; for (int i = 0; i < values_set.size(); i++) { if (*(desc.type) == typeid(int)) { allowed_values_obj.PushBack(values_set[i].get<int>(), allocator); } else if (*(desc.type) == typeid(size_t)) { allowed_values_obj.PushBack(values_set[i].get<size_t>(), allocator); } else if (*(desc.type) == typeid(double)) { allowed_values_obj.PushBack(values_set[i].get<double>(), allocator); } else { getParameterValueString(buffer, desc.type, values_set[i]); rapidjson::Value temp_value(buffer.c_str(), allocator); allowed_values_obj.PushBack(temp_value, allocator); } } val.AddMember("allowed_values", allowed_values_obj, allocator); } else if (desc.allowed_values.pm_type == PM_MINMAX) { rapidjson::Value allowed_values_obj(rapidjson::kObjectType); const std::pair<Parameter, Parameter> &values_pair = desc.allowed_values.value_min_max; std::string buffer; { if (*(desc.type) == typeid(int)) { allowed_values_obj.AddMember("min", values_pair.first.get<int>(), allocator); allowed_values_obj.AddMember("max", values_pair.second.get<int>(), allocator); } else if (*(desc.type) == typeid(size_t)) { allowed_values_obj.AddMember("min", values_pair.first.get<size_t>(), allocator); allowed_values_obj.AddMember("max", values_pair.second.get<size_t>(), allocator); } else if (*(desc.type) == typeid(double)) { allowed_values_obj.AddMember("min", (double)(values_pair.first.get<double>()), allocator); allowed_values_obj.AddMember("max", (double)(values_pair.second.get<double>()), allocator); } else { getParameterValueString(buffer, desc.type, values_pair.first); rapidjson::Value temp_value(buffer.c_str(), allocator); allowed_values_obj.AddMember("min", temp_value, allocator); getParameterValueString(buffer, desc.type, values_pair.second); temp_value.SetString(buffer.c_str(), allocator); allowed_values_obj.AddMember("max", temp_value, allocator); } } val.AddMember("allowed_values", allowed_values_obj, allocator); } } } #endif AMGX_ERROR AMG_Config::write_parameters_description_json(const char *filename) { try { #ifndef RAPIDJSON_DEFINED FatalError("This build does not support JSON.", AMGX_ERR_NOT_IMPLEMENTED); #else rapidjson::Document json_out; json_out.SetObject(); for (ParamDesc::const_iterator iter = param_desc.begin(); iter != param_desc.end(); iter++) { // new entry rapidjson::Value param_object(rapidjson::kObjectType); // adding description param_object.AddMember("description", iter->second.description.c_str(), json_out.GetAllocator()); // adding parameter type name rapidjson::Value param_type(AMG_Config::getParamTypeName(iter->second.type).c_str(), json_out.GetAllocator()); param_object.AddMember("parameter_type", param_type, json_out.GetAllocator()); //adding default value fillJSONValueWithParameter(param_object, iter->second, json_out.GetAllocator()); // adding entry to output doc json_out.AddMember(iter->first.c_str(), param_object, json_out.GetAllocator()); } // write json cfg to stdout FILE *fout = fopen(filename, "w"); if (!fout) { std::string tmp = "Cannot open output file" + std::string(filename) + " to write parameters description"; FatalError(tmp.c_str(), AMGX_ERR_IO); } rapidjson::FileStream f(fout); rapidjson::PrettyWriter<rapidjson::FileStream> writer(f); json_out.Accept(writer); fclose(fout); #endif } catch (amgx_exception &e) { amgx_output(e.what(), strlen(e.what())); return e.reason(); } catch (...) { return AMGX_ERR_UNKNOWN; } return AMGX_OK; } AMGX_ERROR AMG_Config::parseFile(const char *filename) { try { #ifdef RAPIDJSON_DEFINED AMGX_ERROR json_ret = parse_json_file(filename); // if parsed ok, then we are done here, otherwise - try to parse as a string if (json_ret == AMGX_OK) { return json_ret; } #endif std::string params; // Get the parameter string corresponding to file if (getParameterStringFromFile(filename, params) != AMGX_OK) { std::string err = "Error parsing parameter file: " + std::string(filename); FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Read the config version int config_version = getConfigVersion(params); // Parse the string if (parseString(params, config_version) != AMGX_OK) { std::string err = "Error parsing parameter string obtained from file: " + params; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } } catch (amgx_exception &e) { amgx_output(e.what(), strlen(e.what())); return e.reason(); } catch (...) { return AMGX_ERR_UNKNOWN; } return AMGX_OK; } template <typename Type> void AMG_Config::getParameter(const string &name, Type &value, const string &current_scope, string &new_scope) const { //verify the parameter has been registered ParamDesc::const_iterator desc_iter = param_desc.find(name); std::string err; if (desc_iter == param_desc.end()) { err = "getParameter error: '" + string(name) + "' not found\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } //verify the types match if (desc_iter->second.type != &typeid(Type)) { err = "getParameter error: '" + string(name) + "' type miss match\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Check if the parameter name/scope pair has been set ParamDB::const_iterator param_iter = m_params.find(make_pair(current_scope, name)); // Get the value and new_scope if (param_iter == m_params.end()) { value = desc_iter->second.default_value.get<Type>(); new_scope = "default"; } else { value = param_iter->second.second.get<Type>(); //return the parameter value new_scope = param_iter->second.first; //return the new_scope associated with parameter } } template <typename Type> Type AMG_Config::getParameter(const string &name, const string &current_scope) const { Type value; std::string new_scope; // For cases where the new scope is not required getParameter(name, value, current_scope, new_scope); return value; } template <typename Type> void AMG_Config::setParameter(std::string name, Type value, const std::string &current_scope, const std::string &new_scope) { //verify that the parameter has been registered ParamDesc::iterator iter = param_desc.find(name); std::string err; if (iter == param_desc.end()) { err = "setParameter error: '" + string(name) + "' not found\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } if (iter->second.type != &typeid(Type)) { err = "setParameter error: '" + string(name) + "' type miss match\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } m_params[make_pair(current_scope, name)] = make_pair(new_scope, value); } template <typename Type> void AMG_Config::setParameter(std::string name, Type value, const std::string &current_scope) { setParameter(name, value, current_scope, "default"); } /* template <typename Type> void AMG_Config::setParameter(std::string name, Type value) { setParameter(name,value,"default","default"); } */ template <> void AMG_Config::setParameter(std::string name, void *value, const std::string &current_scope, const std::string &new_scope) { //verify that the parameter has been registered ParamDesc::iterator iter = param_desc.find(name); std::string err; if (iter == param_desc.end()) { err = "setParameter error: '" + string(name) + "' not found\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } if (iter->second.type != &typeid(void *)) { err = "setParameter error: '" + string(name) + "' type miss match\n"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } m_params[make_pair(current_scope, name)] = make_pair(new_scope, value); } template <> void AMG_Config::setParameter(std::string name, void *value, const std::string &current_scope) { setParameter<void *>(name, value, current_scope, "default"); } std::string AMG_Config::getParameterString(Parameter &parameter, ParameterDescription &param_desc) { stringstream ss; if (*(param_desc.type) == typeid(string)) { ss << parameter.get<std::string>() ; } else if (*(param_desc.type) == typeid(int)) { ss << parameter.get<int>() ; } else if (*(param_desc.type) == typeid(size_t)) { ss << parameter.get<size_t>() ; } else if (*(param_desc.type) == typeid(float)) { ss << parameter.get<float>() ; } else if (*(param_desc.type) == typeid(double)) { ss << parameter.get<double>() ; } else if (*(param_desc.type) == typeid(AlgorithmType)) { ss << getString(parameter.get<AlgorithmType>()) ; } else if (*(param_desc.type) == typeid(ViewType)) { ss << getString(parameter.get<ViewType>()) ; } else if (*(param_desc.type) == typeid(ColoringType)) { ss << getString(parameter.get<ColoringType>()) ; } else if (*(param_desc.type) == typeid(BlockFormat)) { ss << getString(parameter.get<BlockFormat>()) ; } else if (*(param_desc.type) == typeid(NormType)) { ss << getString(parameter.get<NormType>()) ; } else if (*(param_desc.type) == typeid(void *)) { ss << parameter.get<void *>(); } else { string err = "getParameterString is not implemented for the datatype of value'" + param_desc.name + "'"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } return ss.str(); } void AMG_Config::printOptions() { std::stringstream ss; for (ParamDesc::iterator iter = param_desc.begin(); iter != param_desc.end(); iter++) { ss << " " << iter->second.name << ": " << iter->second.description << endl; } amgx_output(ss.str().c_str(), ss.str().length()); } void AMG_Config::printAMGConfig() { std::stringstream config_ss; std::stringstream ss; int devId; cudaGetDevice(&devId); cudaDeviceProp deviceProp = getDeviceProperties(); //ss << "HP Scalar Type: " << scalar_hp << endl; //ss << "LP Scalar Type: " << scalar_lp << endl; ss << "Device " << devId << ": " << deviceProp.name << endl; ss << "AMG Configuration: " << endl; config_ss << endl; config_ss << "Default values:" << std::endl ; config_ss << std::endl; for (ParamDesc::iterator iter = param_desc.begin(); iter != param_desc.end(); iter++) { config_ss << " " << iter->second.name << " = "; config_ss << getParameterString(iter->second.default_value, iter->second); config_ss << endl; } config_ss << std::endl; config_ss << " User-defined parameters:" << std::endl; config_ss << " Current_scope:parameter_name(new_scope) = parameter_value" << std::endl; config_ss << std::endl; for (ParamDB::iterator iter = m_params.begin(); iter != m_params.end(); iter++) { // Search for the name in ParamDesc database ParamDesc::iterator desc_iter = param_desc.find(iter->first.second); config_ss << " " ; //if (iter->first.first != "default") config_ss << iter->first.first << ":"; config_ss << iter->first.second; if ( iter->second.first != "default") { config_ss << "(" << iter->second.first << ")"; } config_ss << " = " ; config_ss << getParameterString(iter->second.second, desc_iter->second); config_ss << endl; } config_ss << endl; AMGXLOG("AMG Configuration", config_ss.str()) AMGXLOG("Device", deviceProp.name) amgx_output(ss.str().c_str(), ss.str().length()); amgx_output(config_ss.str().c_str(), config_ss.str().length()); //MPI_Barrier( MPI_COMM_WORLD); } AMGX_ERROR AMG_Config::checkString(std::string &str) { string::iterator it; for (it = str.begin(); it < str.end(); it++) { if (!allowed_symbol(*it)) { return AMGX_ERR_CONFIGURATION; } } // check if string is empty if (str.find_first_not_of(' ') == std::string::npos) { return AMGX_ERR_CONFIGURATION; } return AMGX_OK; } void AMG_Config::extractParamInfo(const string &str, string &name, string &value, string &current_scope, string &new_scope) { string tmp(str); //locate the split if ( std::count(tmp.begin(), tmp.end(), '=') != 1) { tmp = "Incorrect config entry (number of equal signs is not 1) : " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } size_t split_loc = tmp.find("="); value = tmp.substr(split_loc + 1); name = tmp.substr(0, split_loc); // Extract the new scope int num_left_brackets = std::count(name.begin(), name.end(), '('); if ( num_left_brackets == std::count(name.begin(), name.end(), ')') && (num_left_brackets == 0 || num_left_brackets == 1) ) { if (num_left_brackets == 0) { new_scope = "default"; } else if (num_left_brackets == 1) { size_t split_loc_l = name.find("("); size_t split_loc_r = name.find(")"); new_scope = name.substr(split_loc_l + 1, split_loc_r - split_loc_l - 1); name = name.substr(0, split_loc_l); if (checkString(trim(new_scope)) != AMGX_OK) { std::string tmp = "Incorrect config entry (invalid symbol or empty string after trimming new_scope): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } if (new_scope == "default") { tmp = "Incorrect config entry (new scope cannot be default scope): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } } } else { tmp = "Incorrect config entry (incorrect number of parentheses or unbalanced parantheses): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } // Extract current scope and name split_loc = name.find(":"); int num_colon = std::count(name.begin(), name.end(), ':'); if (num_colon == 0) { // do nothing, will check later if name==solver current_scope = "default"; } else if (num_colon == 1) { size_t split_loc_l = name.find(":"); current_scope = name.substr(0, split_loc_l); name = name.substr(split_loc_l + 1); if (checkString(trim(current_scope)) != AMGX_OK) { std::string tmp = "Incorrect config entry (invalid string or empty string after trimming current_scope): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } } else { tmp = "Incorrect config entry (number of colons is > 1): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } // ----------------- // strip strings // ----------------- if (checkString(trim(value)) != AMGX_OK || checkString(trim(name)) != AMGX_OK) { std::string tmp = "Incorrect config entry (invalid string or empty string after stripping name or value): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } } void AMG_Config::setParameter(const string &str) { string name, value, current_scope, new_scope; string tmp; extractParamInfo(str, name, value, current_scope, new_scope); // Add new_scope to scope vector if ( find(m_scope_vector.begin(), m_scope_vector.end(), new_scope) == m_scope_vector.end()) { m_scope_vector.push_back(new_scope); } else if (new_scope != "default" && !getAllowConfigurationMod()) { tmp = "Incorrect config entry (new scope already defined): " + str; FatalError(tmp.c_str(), AMGX_ERR_CONFIGURATION); } // extract the name, value, new_scope and old_scope //verify parameter was registered ParamDesc::iterator iter = param_desc.find(string(name)); if (iter == param_desc.end()) { string err = "Variable '" + string(name) + "' not registered"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } if ( (name == "determinism_flag" || name == "block_format" || name == "separation_interior" || name == "separation_exterior" || name == "min_rows_latency_hiding" || name == "fine_level_consolidation" || name == "use_cuda_ipc_consolidation") && current_scope != "default" ) { string err = "Incorrect config entry. Parameter " + name + " can only be specified with default scope."; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Check that new scope is only associated with a solver if (new_scope != "default" && find(m_solver_list.begin(), m_solver_list.end(), name) == m_solver_list.end() ) { string err = "Incorrect config entry. New scope can only be associated with a solver. new_scope=" + new_scope + ", name=" + name + "."; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } // Set the new parameter name, value if (*(iter->second.type) == typeid(string)) { setParameter(name, value, current_scope, new_scope); } else if (*(iter->second.type) == typeid(int)) { setParameter(name, getValue<int>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(size_t)) { setParameter(name, getValue<size_t>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(float)) { setParameter(name, getValue<float>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(double)) { setParameter(name, getValue<double>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(AlgorithmType)) { setParameter(name, getValue<AlgorithmType>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(ViewType)) { setParameter(name, getValue<ViewType>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(ColoringType)) { setParameter(name, getValue<ColoringType>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(BlockFormat)) { setParameter(name, getValue<BlockFormat>(value.c_str()), current_scope, new_scope); } else if (*(iter->second.type) == typeid(NormType)) { setParameter(name, getValue<NormType>(value.c_str()), current_scope, new_scope); } else { string err = "getValue is not implemented for the datatype of variable '" + name + "'"; FatalError(err.c_str(), AMGX_ERR_CONFIGURATION); } } AMG_Config::AMG_Config() : ref_count(1), m_latest_config_version(2), m_config_version(0), m_allow_cfg_mod(0) { m_scope_vector.push_back("default"); m_solver_list.push_back("solver"); m_solver_list.push_back("preconditioner"); m_solver_list.push_back("smoother"); m_solver_list.push_back("coarse_solver"); m_solver_list.push_back("cpr_first_stage_preconditioner"); m_solver_list.push_back("cpr_second_stage_preconditioner"); } void AMG_Config::clear() { m_params.clear(); m_scope_vector.clear(); m_scope_vector.push_back("default"); } // Template specialization template string AMG_Config::getParameter(const std::string &, const std::string &) const; template AlgorithmType AMG_Config::getParameter(const std::string &, const std::string &) const; template ViewType AMG_Config::getParameter(const std::string &, const std::string &) const; template ColoringType AMG_Config::getParameter(const std::string &, const std::string &) const; template BlockFormat AMG_Config::getParameter(const std::string &, const std::string &) const; template NormType AMG_Config::getParameter(const std::string &, const std::string &) const; template int AMG_Config::getParameter(const std::string &, const std::string &) const; template size_t AMG_Config::getParameter(const std::string &, const std::string &) const; template float AMG_Config::getParameter(const std::string &, const std::string &) const; template double AMG_Config::getParameter(const std::string &, const std::string &) const; template void *AMG_Config::getParameter(const std::string &, const std::string &) const; template void AMG_Config::getParameter(const std::string &, std::string &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, ViewType &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, ColoringType &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, AlgorithmType &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, BlockFormat &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, NormType &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, int &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, size_t &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, float &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, double &, const std::string &, std::string &) const; template void AMG_Config::getParameter(const std::string &, void *&, const std::string &, std::string &) const; template void AMG_Config::setParameter(std::string, std::string, const std::string &) ; template void AMG_Config::setParameter(std::string, AlgorithmType, const std::string &) ; template void AMG_Config::setParameter(std::string, ViewType, const std::string &) ; template void AMG_Config::setParameter(std::string, ColoringType, const std::string &) ; template void AMG_Config::setParameter(std::string, BlockFormat, const std::string &) ; template void AMG_Config::setParameter(std::string, NormType, const std::string &) ; template void AMG_Config::setParameter(std::string, int, const std::string &) ; template void AMG_Config::setParameter(std::string, size_t, const std::string &) ; template void AMG_Config::setParameter(std::string, float, const std::string &) ; template void AMG_Config::setParameter(std::string, double, const std::string &) ; template void AMG_Config::setParameter(std::string, std::string, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, AlgorithmType, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, ViewType, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, ColoringType, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, BlockFormat, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, NormType, const std::string &, const std::string & ) ; template void AMG_Config::setParameter(std::string, int, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, size_t, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, float, const std::string &, const std::string &) ; template void AMG_Config::setParameter(std::string, double, const std::string &, const std::string &) ; AMG_Configuration::AMG_Configuration() { amg_config = new AMG_Config; }; AMG_Configuration::AMG_Configuration(const AMG_Configuration &cfg) { amg_config = cfg.amg_config; amg_config->ref_count++; } AMG_Configuration &AMG_Configuration::operator=(const AMG_Configuration &cfg) { amg_config = cfg.amg_config; amg_config->ref_count++; return *this; } AMG_Configuration::~AMG_Configuration() { if (--amg_config->ref_count == 0) { delete amg_config; } }; /******************************************** * Gets a parameter from the database and * throws an exception if it does not exist. *********************************************/ template <typename Type> Type AMG_Configuration::getParameter(std::string name) const {amg_config->getParameter<Type>(name, "default");} /********************************************** * Sets a parameter in the database * throws an exception if it does not exist. *********************************************/ template <typename Type> void AMG_Configuration::setParameter(std::string name, Type value, const std::string &current_scope) {amg_config->setParameter(name, value, current_scope);} template <> void AMG_Configuration::setParameter(std::string name, int value, const std::string &current_scope) {amg_config->setParameter(name, value, current_scope);} template <> void AMG_Configuration::setParameter(std::string name, double value, const std::string &current_scope) {amg_config->setParameter(name, value, current_scope);} template <> void AMG_Configuration::setParameter(std::string name, void *value, const std::string &current_scope) {amg_config->setParameter(name, value, current_scope);} /**************************************************** * Parse paramters string * scope:name(new_scope)=value, scope:name(new_scope)=value, ..., scope:name(new_scope)=value * and store the variables in the parameter database ****************************************************/ AMGX_ERROR AMG_Configuration::parseParameterString(const char *str) {return amg_config->parseParameterString(str);} /**************************************************** * Parse a config file in the format * scope:name(new_scope)=value * scope:name(new_scope)=value * ... * scope:name(new_scope)=value * and store the variables in the parameter database ****************************************************/ AMGX_ERROR AMG_Configuration::parseFile(const char *filename) {return amg_config->parseFile(filename); } /**************************************************** * Parse paramters string * scope:name(new_scope)=value, scope:name(new_scope)=value, ..., scope:name(new_scope)=value * and store the variables in the parameter database ****************************************************/ AMGX_ERROR AMG_Configuration::parseParameterStringAndFile(const char *str, const char *filename) {return amg_config->parseParameterStringAndFile(str, filename);} /**************************************************** * Print the options for AMG ***************************************************/ void AMG_Configuration::printOptions() {AMG_Config::printOptions(); } } // namespace amgx
the_stack
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *weight, THCTensor *bias, int kH, int kW, int dH, int dW, int padH, int padW, long inputHeight, long inputWidth, long outputHeight, long outputWidth) { THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->nDimension; int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THCUNN_argCheck(state, ndim == 3 || ndim == 4, 2, input, "3D or 4D input tensor expected but got: %s"); long nInputPlane = weight->size[2] / (kH * kW); long nOutputPlane = weight->size[1]; if (bias != NULL) { THCUNN_check_dim_size(state, bias, 3, 0, nOutputPlane); THCUNN_check_dim_size(state, bias, 3, 1, outputHeight); THCUNN_check_dim_size(state, bias, 3, 2, outputWidth); } THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); } } static THCTensor* THNN_(view_weight_local)( THCState *state, THCTensor *_weight) { THCTensor *weight = THCTensor_(newContiguous)(state, _weight); THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4, "weight tensor should be 3D or 6D - got %dD", weight->nDimension); if (weight->nDimension == 6) { long s1 = weight->size[0] * weight->size[1]; long s2 = weight->size[2]; long s3 = weight->size[3] * weight->size[4] * weight->size[5]; THCTensor *old_weight = weight; weight = THCTensor_(newWithStorage3d)(state, weight->storage, weight->storageOffset, s1, -1, s2, -1, s3, -1); THCTensor_(free)(state, old_weight); } return weight; } void THNN_(SpatialConvolutionLocal_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCTensor *weight, THCTensor *bias, THCTensor *finput, THCTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight) { THCUNN_assertSameGPU(state, 5, input, output, weight, bias, finput); weight = THNN_(view_weight_local)(state, weight); THNN_(SpatialConvolutionLocal_shapeCheck) (state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THCTensor_(newContiguous)(state, input); long nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH); long nOutputPlane = THCTensor_(size)(state,weight,1); int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth); } // Batch size + input planes long batchSize = input->size[0]; // Resize output THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); // Augment the input THCTensor_(resize3d)(state, finput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *finput_n = THCTensor_(new)(state); THCTensor *output_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { THCTensor *finput3d, *output3d; THCTensor *wslice = THCTensor_(new)(state); THCTensor *islice = THCTensor_(new)(state); THCTensor *oslice = THCTensor_(new)(state); // Matrix mulitply per output: THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, finput_n, finput, 0, elt); THCTensor_(select)(state, output_n, output, 0, elt); // Extract columns: im2col( THCState_getCurrentStream(state), THCTensor_(data)(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCTensor_(data)(state, finput_n) ); output3d = THCTensor_(newWithStorage3d)(state, output_n->storage, output_n->storageOffset, outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); finput3d = THCTensor_(newWithStorage3d)(state, finput_n->storage, finput_n->storageOffset, outputHeight*outputWidth, 1, kW*kH*nInputPlane, outputHeight*outputWidth, 1, kW*kH*nInputPlane*outputHeight*outputWidth); THCTensor_(copy)(state, output_n, bias); // weight: oH*oW x nOutputPlane x nInputPlane*kH*kW // finput3d: oH*oW x nInputPlane*kH*kW x 1 THCTensor_(baddbmm)(state, output3d, ScalarConvert<int, real>::to(1), output3d, ScalarConvert<int, real>::to(1), weight, finput3d); // output3d: oH*oW x nOutputPlane x 1 THCTensor_(free)(state, output3d); THCTensor_(free)(state, finput3d); THCTensor_(free)(state, wslice); THCTensor_(free)(state, islice); THCTensor_(free)(state, oslice); } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, finput_n); THCTensor_(free)(state, output_n); // Resize output if (batch == 0) { THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth); THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); } THCTensor_(free)(state, input); THCTensor_(free)(state, weight); } void THNN_(SpatialConvolutionLocal_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *weight, THCTensor *finput, THCTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight) { THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, fgradInput, gradInput); weight = THNN_(view_weight_local)(state, weight); THNN_(SpatialConvolutionLocal_shapeCheck) (state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); long nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH); long nOutputPlane = THCTensor_(size)(state,weight,1); int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth); THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth); } // Batch size + input planes long batchSize = input->size[0]; // Resize output THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); // Resize temporary columns THCTensor_(resize3d)(state, fgradInput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth); // Helpers THCTensor *gradInput_n = THCTensor_(new)(state); THCTensor *fgradInput_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); THCTensor *tweight = THCTensor_(new)(state); THCTensor_(transpose)(state, tweight, weight, 1, 2); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { THCTensor *gradOutput3d, *fgradInput3d; THCTensor *wslice = THCTensor_(new)(state); THCTensor *gislice = THCTensor_(new)(state); THCTensor *goslice = THCTensor_(new)(state); // Matrix mulitply per sample: THCTensor_(select)(state, gradInput_n, gradInput, 0, elt); THCTensor_(select)(state, fgradInput_n, fgradInput, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); gradOutput3d = THCTensor_(newWithStorage3d)(state, gradOutput_n->storage, gradOutput_n->storageOffset, outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); fgradInput3d = THCTensor_(newWithStorage3d)(state, fgradInput_n->storage, fgradInput_n->storageOffset, outputHeight*outputWidth, 1, kW*kH*nInputPlane, outputHeight*outputWidth, 1, kW*kH*nInputPlane*outputHeight*outputWidth); // weight: oH*oW x nInputPlane*kH*kW x nOutputPlane // gradOutput3d: oH*oW x nOutputPlane x 1 THCTensor_(baddbmm)(state, fgradInput3d, ScalarConvert<int, real>::to(0), fgradInput3d, ScalarConvert<int, real>::to(1), tweight, gradOutput3d); // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1 // Unpack columns back into input: col2im<real, accreal>( THCState_getCurrentStream(state), THCTensor_(data)(state, fgradInput_n), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCTensor_(data)(state, gradInput_n) ); THCTensor_(free)(state, gradOutput3d); THCTensor_(free)(state, fgradInput3d); THCTensor_(free)(state, wslice); THCTensor_(free)(state, gislice); THCTensor_(free)(state, goslice); } // Free THCTensor_(free)(state, gradInput_n); THCTensor_(free)(state, fgradInput_n); THCTensor_(free)(state, gradOutput_n); // Resize output if (batch == 0) { THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth); } THCTensor_(free)(state, tweight); THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, weight); } void THNN_(SpatialConvolutionLocal_accGradParameters)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradWeight, THCTensor *gradBias, THCTensor *finput, THCTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight, accreal scale_) { real scale = ScalarConvert<accreal, real>::to(scale_); THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, finput); THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous"); THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous"); gradWeight = THNN_(view_weight_local)(state, gradWeight); THNN_(SpatialConvolutionLocal_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); long nInputPlane = THCTensor_(size)(state,gradWeight,2)/(kW*kH); long nOutputPlane = THCTensor_(size)(state,gradWeight,1); int batch = 1; if (input->nDimension == 3) { // Force batch batch = 0; THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth); THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth); } // Batch size + input planes long batchSize = input->size[0]; // Helpers THCTensor *input_n = THCTensor_(new)(state); THCTensor *finput_n = THCTensor_(new)(state); THCTensor *gradOutput_n = THCTensor_(new)(state); // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { THCTensor *gradOutput3d, *finput3d; THCTensor *gwslice = THCTensor_(new)(state); THCTensor *islice = THCTensor_(new)(state); THCTensor *goslice = THCTensor_(new)(state); // Matrix mulitply per output: THCTensor_(select)(state, input_n, input, 0, elt); THCTensor_(select)(state, finput_n, finput, 0, elt); THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); gradOutput3d = THCTensor_(newWithStorage3d)(state, gradOutput_n->storage, gradOutput_n->storageOffset, outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); finput3d = THCTensor_(newWithStorage3d)(state, finput_n->storage, finput_n->storageOffset, outputHeight*outputWidth, 1, 1, kW*kH*nInputPlane*outputHeight*outputWidth, kW*kH*nInputPlane, outputHeight*outputWidth); // Extract columns: im2col( THCState_getCurrentStream(state), THCTensor_(data)(state, input_n), nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, 1, 1, THCTensor_(data)(state, finput_n) ); // gradOutput3d: oH*oW x nOutputPlane x 1 // finput3d: oH*oW x 1 x kW*kH*nInputPlane THCTensor_(baddbmm)(state, gradWeight, ScalarConvert<int, real>::to(1), gradWeight, scale, gradOutput3d, finput3d); // gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane THCTensor_(cadd)(state, gradBias, gradBias, scale, gradOutput_n); THCTensor_(free)(state, gradOutput3d); THCTensor_(free)(state, finput3d); THCTensor_(free)(state, gwslice); THCTensor_(free)(state, goslice); THCTensor_(free)(state, islice); } // Free THCTensor_(free)(state, input_n); THCTensor_(free)(state, finput_n); THCTensor_(free)(state, gradOutput_n); // Resize if (batch == 0) { THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth); THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); } THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, gradWeight); } #endif
the_stack
#include <map> #include "cuda_runtime.h" #include "miner.h" #include "salsa_kernel.h" #include "kepler_kernel.h" #define TEXWIDTH 32768 #define THREADS_PER_WU 4 // four threads per hash typedef enum { ANDERSEN, SIMPLE } MemoryAccess; // scratchbuf constants (pointers to scratch buffer for each warp, i.e. 32 hashes) __constant__ uint32_t* c_V[TOTAL_WARP_LIMIT]; // iteration count N __constant__ uint32_t c_N; __constant__ uint32_t c_N_1; // N-1 // scratch buffer size SCRATCH __constant__ uint32_t c_SCRATCH; __constant__ uint32_t c_SCRATCH_WU_PER_WARP; // (SCRATCH * WU_PER_WARP) __constant__ uint32_t c_SCRATCH_WU_PER_WARP_1; // (SCRATCH * WU_PER_WARP) - 1 // using texture references for the "tex" variants of the B kernels texture<uint4, 1, cudaReadModeElementType> texRef1D_4_V; texture<uint4, 2, cudaReadModeElementType> texRef2D_4_V; template <int ALGO> __device__ __forceinline__ void block_mixer(uint4 &b, uint4 &bx, const int x1, const int x2, const int x3); static __host__ __device__ uint4& operator ^= (uint4& left, const uint4& right) { left.x ^= right.x; left.y ^= right.y; left.z ^= right.z; left.w ^= right.w; return left; } static __host__ __device__ uint4& operator += (uint4& left, const uint4& right) { left.x += right.x; left.y += right.y; left.z += right.z; left.w += right.w; return left; } static __device__ uint4 __shfl(const uint4 bx, int target_thread) { return make_uint4( __shfl((int)bx.x, target_thread), __shfl((int)bx.y, target_thread), __shfl((int)bx.z, target_thread), __shfl((int)bx.w, target_thread) ); } /* write_keys writes the 8 keys being processed by a warp to the global * scratchpad. To effectively use memory bandwidth, it performs the writes * (and reads, for read_keys) 128 bytes at a time per memory location * by __shfl'ing the 4 entries in bx to the threads in the next-up * thread group. It then has eight threads together perform uint4 * (128 bit) writes to the destination region. This seems to make * quite effective use of memory bandwidth. An approach that spread * uint32s across more threads was slower because of the increased * computation it required. * * "start" is the loop iteration producing the write - the offset within * the block's memory. * * Internally, this algorithm first __shfl's the 4 bx entries to * the next up thread group, and then uses a conditional move to * ensure that odd-numbered thread groups exchange the b/bx ordering * so that the right parts are written together. * * Thanks to Babu for helping design the 128-bit-per-write version. * * _direct lets the caller specify the absolute start location instead of * the relative start location, as an attempt to reduce some recomputation. */ template <MemoryAccess SCHEME> __device__ __forceinline__ void write_keys_direct(const uint4 &b, const uint4 &bx, uint32_t start) { uint32_t *scratch = c_V[(blockIdx.x*blockDim.x + threadIdx.x)/32]; if (SCHEME == ANDERSEN) { int target_thread = (threadIdx.x + 4)%32; uint4 t=b, t2=__shfl(bx, target_thread); int t2_start = __shfl((int)start, target_thread) + 4; bool c = (threadIdx.x & 0x4); *((uint4 *)(&scratch[c ? t2_start : start])) = (c ? t2 : t); *((uint4 *)(&scratch[c ? start : t2_start])) = (c ? t : t2); } else if (SCHEME == SIMPLE) { *((uint4 *)(&scratch[start ])) = b; *((uint4 *)(&scratch[start+16])) = bx; } } template <MemoryAccess SCHEME, int TEX_DIM> __device__ __forceinline__ void read_keys_direct(uint4 &b, uint4 &bx, uint32_t start) { uint32_t *scratch; if (TEX_DIM == 0) scratch = c_V[(blockIdx.x*blockDim.x + threadIdx.x)/32]; if (SCHEME == ANDERSEN) { int t2_start = __shfl((int)start, (threadIdx.x + 4)%32) + 4; if (TEX_DIM > 0) { start /= 4; t2_start /= 4; } bool c = (threadIdx.x & 0x4); if (TEX_DIM == 0) { b = *((uint4 *)(&scratch[c ? t2_start : start])); bx = *((uint4 *)(&scratch[c ? start : t2_start])); } else if (TEX_DIM == 1) { b = tex1Dfetch(texRef1D_4_V, c ? t2_start : start); bx = tex1Dfetch(texRef1D_4_V, c ? start : t2_start); } else if (TEX_DIM == 2) { b = tex2D(texRef2D_4_V, 0.5f + ((c ? t2_start : start)%TEXWIDTH), 0.5f + ((c ? t2_start : start)/TEXWIDTH)); bx = tex2D(texRef2D_4_V, 0.5f + ((c ? start : t2_start)%TEXWIDTH), 0.5f + ((c ? start : t2_start)/TEXWIDTH)); } uint4 tmp = b; b = (c ? bx : b); bx = (c ? tmp : bx); bx = __shfl(bx, (threadIdx.x + 28)%32); } else { if (TEX_DIM == 0) b = *((uint4 *)(&scratch[start])); else if (TEX_DIM == 1) b = tex1Dfetch(texRef1D_4_V, start/4); else if (TEX_DIM == 2) b = tex2D(texRef2D_4_V, 0.5f + ((start/4)%TEXWIDTH), 0.5f + ((start/4)/TEXWIDTH)); if (TEX_DIM == 0) bx = *((uint4 *)(&scratch[start+16])); else if (TEX_DIM == 1) bx = tex1Dfetch(texRef1D_4_V, (start+16)/4); else if (TEX_DIM == 2) bx = tex2D(texRef2D_4_V, 0.5f + (((start+16)/4)%TEXWIDTH), 0.5f + (((start+16)/4)/TEXWIDTH)); } } __device__ __forceinline__ void primary_order_shuffle(uint4 &b, uint4 &bx) { /* Inner loop shuffle targets */ int x1 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+1)&0x3); int x2 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+2)&0x3); int x3 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+3)&0x3); b.w = __shfl((int)b.w, x1); b.z = __shfl((int)b.z, x2); b.y = __shfl((int)b.y, x3); uint32_t tmp = b.y; b.y = b.w; b.w = tmp; bx.w = __shfl((int)bx.w, x1); bx.z = __shfl((int)bx.z, x2); bx.y = __shfl((int)bx.y, x3); tmp = bx.y; bx.y = bx.w; bx.w = tmp; } /* * load_key loads a 32*32bit key from a contiguous region of memory in B. * The input keys are in external order (i.e., 0, 1, 2, 3, ...). * After loading, each thread has its four b and four bx keys stored * in internal processing order. */ __device__ __forceinline__ void load_key_salsa(const uint32_t *B, uint4 &b, uint4 &bx) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; // Read in permuted order. Key loads are not our bottleneck right now. b.x = B[key_offset + 4*thread_in_block + (thread_in_block+0)%4]; b.y = B[key_offset + 4*thread_in_block + (thread_in_block+1)%4]; b.z = B[key_offset + 4*thread_in_block + (thread_in_block+2)%4]; b.w = B[key_offset + 4*thread_in_block + (thread_in_block+3)%4]; bx.x = B[key_offset + 4*thread_in_block + (thread_in_block+0)%4 + 16]; bx.y = B[key_offset + 4*thread_in_block + (thread_in_block+1)%4 + 16]; bx.z = B[key_offset + 4*thread_in_block + (thread_in_block+2)%4 + 16]; bx.w = B[key_offset + 4*thread_in_block + (thread_in_block+3)%4 + 16]; primary_order_shuffle(b, bx); } /* * store_key performs the opposite transform as load_key, taking * internally-ordered b and bx and storing them into a contiguous * region of B in external order. */ __device__ __forceinline__ void store_key_salsa(uint32_t *B, uint4 &b, uint4 &bx) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; primary_order_shuffle(b, bx); B[key_offset + 4*thread_in_block + (thread_in_block+0)%4] = b.x; B[key_offset + 4*thread_in_block + (thread_in_block+1)%4] = b.y; B[key_offset + 4*thread_in_block + (thread_in_block+2)%4] = b.z; B[key_offset + 4*thread_in_block + (thread_in_block+3)%4] = b.w; B[key_offset + 4*thread_in_block + (thread_in_block+0)%4 + 16] = bx.x; B[key_offset + 4*thread_in_block + (thread_in_block+1)%4 + 16] = bx.y; B[key_offset + 4*thread_in_block + (thread_in_block+2)%4 + 16] = bx.z; B[key_offset + 4*thread_in_block + (thread_in_block+3)%4 + 16] = bx.w; } /* * load_key loads a 32*32bit key from a contiguous region of memory in B. * The input keys are in external order (i.e., 0, 1, 2, 3, ...). * After loading, each thread has its four b and four bx keys stored * in internal processing order. */ __device__ __forceinline__ void load_key_chacha(const uint32_t *B, uint4 &b, uint4 &bx) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; // Read in permuted order. Key loads are not our bottleneck right now. b.x = B[key_offset + 4*0 + thread_in_block%4]; b.y = B[key_offset + 4*1 + thread_in_block%4]; b.z = B[key_offset + 4*2 + thread_in_block%4]; b.w = B[key_offset + 4*3 + thread_in_block%4]; bx.x = B[key_offset + 4*0 + thread_in_block%4 + 16]; bx.y = B[key_offset + 4*1 + thread_in_block%4 + 16]; bx.z = B[key_offset + 4*2 + thread_in_block%4 + 16]; bx.w = B[key_offset + 4*3 + thread_in_block%4 + 16]; } /* * store_key performs the opposite transform as load_key, taking * internally-ordered b and bx and storing them into a contiguous * region of B in external order. */ __device__ __forceinline__ void store_key_chacha(uint32_t *B, const uint4 &b, const uint4 &bx) { int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int key_offset = scrypt_block * 32; uint32_t thread_in_block = threadIdx.x % 4; B[key_offset + 4*0 + thread_in_block%4] = b.x; B[key_offset + 4*1 + thread_in_block%4] = b.y; B[key_offset + 4*2 + thread_in_block%4] = b.z; B[key_offset + 4*3 + thread_in_block%4] = b.w; B[key_offset + 4*0 + thread_in_block%4 + 16] = bx.x; B[key_offset + 4*1 + thread_in_block%4 + 16] = bx.y; B[key_offset + 4*2 + thread_in_block%4 + 16] = bx.z; B[key_offset + 4*3 + thread_in_block%4 + 16] = bx.w; } template <int ALGO> __device__ __forceinline__ void load_key(const uint32_t *B, uint4 &b, uint4 &bx) { switch(ALGO) { case A_SCRYPT: load_key_salsa(B, b, bx); break; case A_SCRYPT_JANE: load_key_chacha(B, b, bx); break; } } template <int ALGO> __device__ __forceinline__ void store_key(uint32_t *B, uint4 &b, uint4 &bx) { switch(ALGO) { case A_SCRYPT: store_key_salsa(B, b, bx); break; case A_SCRYPT_JANE: store_key_chacha(B, b, bx); break; } } /* * salsa_xor_core (Salsa20/8 cypher) * The original scrypt called: * xor_salsa8(&X[0], &X[16]); <-- the "b" loop * xor_salsa8(&X[16], &X[0]); <-- the "bx" loop * This version is unrolled to handle both of these loops in a single * call to avoid unnecessary data movement. */ #define XOR_ROTATE_ADD(dst, s1, s2, amt) { uint32_t tmp = s1+s2; dst ^= ((tmp<<amt)|(tmp>>(32-amt))); } __device__ __forceinline__ void salsa_xor_core(uint4 &b, uint4 &bx, const int x1, const int x2, const int x3) { uint4 x; b ^= bx; x = b; // Enter in "primary order" (t0 has 0, 4, 8, 12) // (t1 has 5, 9, 13, 1) // (t2 has 10, 14, 2, 6) // (t3 has 15, 3, 7, 11) #pragma unroll for (int j = 0; j < 4; j++) { // Mixing phase of salsa XOR_ROTATE_ADD(x.y, x.x, x.w, 7); XOR_ROTATE_ADD(x.z, x.y, x.x, 9); XOR_ROTATE_ADD(x.w, x.z, x.y, 13); XOR_ROTATE_ADD(x.x, x.w, x.z, 18); /* Transpose rows and columns. */ /* Unclear if this optimization is needed: These are ordered based * upon the dependencies needed in the later xors. Compiler should be * able to figure this out, but might as well give it a hand. */ x.y = __shfl((int)x.y, x3); x.w = __shfl((int)x.w, x1); x.z = __shfl((int)x.z, x2); /* The next XOR_ROTATE_ADDS could be written to be a copy-paste of the first, * but the register targets are rewritten here to swap x[1] and x[3] so that * they can be directly shuffled to and from our peer threads without * reassignment. The reverse shuffle then puts them back in the right place. */ XOR_ROTATE_ADD(x.w, x.x, x.y, 7); XOR_ROTATE_ADD(x.z, x.w, x.x, 9); XOR_ROTATE_ADD(x.y, x.z, x.w, 13); XOR_ROTATE_ADD(x.x, x.y, x.z, 18); x.w = __shfl((int)x.w, x3); x.y = __shfl((int)x.y, x1); x.z = __shfl((int)x.z, x2); } b += x; // The next two lines are the beginning of the BX-centric loop iteration bx ^= b; x = bx; // This is a copy of the same loop above, identical but stripped of comments. // Duplicated so that we can complete a bx-based loop with fewer register moves. #pragma unroll for (int j = 0; j < 4; j++) { XOR_ROTATE_ADD(x.y, x.x, x.w, 7); XOR_ROTATE_ADD(x.z, x.y, x.x, 9); XOR_ROTATE_ADD(x.w, x.z, x.y, 13); XOR_ROTATE_ADD(x.x, x.w, x.z, 18); x.y = __shfl((int)x.y, x3); x.w = __shfl((int)x.w, x1); x.z = __shfl((int)x.z, x2); XOR_ROTATE_ADD(x.w, x.x, x.y, 7); XOR_ROTATE_ADD(x.z, x.w, x.x, 9); XOR_ROTATE_ADD(x.y, x.z, x.w, 13); XOR_ROTATE_ADD(x.x, x.y, x.z, 18); x.w = __shfl((int)x.w, x3); x.y = __shfl((int)x.y, x1); x.z = __shfl((int)x.z, x2); } // At the end of these iterations, the data is in primary order again. #undef XOR_ROTATE_ADD bx += x; } /* * chacha_xor_core (ChaCha20/8 cypher) * This version is unrolled to handle both of these loops in a single * call to avoid unnecessary data movement. * * load_key and store_key must not use primary order when * using ChaCha20/8, but rather the basic transposed order * (referred to as "column mode" below) */ #define CHACHA_PRIMITIVE(pt, rt, ps, amt) { uint32_t tmp = rt ^ (pt += ps); rt = ((tmp<<amt)|(tmp>>(32-amt))); } __device__ __forceinline__ void chacha_xor_core(uint4 &b, uint4 &bx, const int x1, const int x2, const int x3) { uint4 x; b ^= bx; x = b; // Enter in "column" mode (t0 has 0, 4, 8, 12) // (t1 has 1, 5, 9, 13) // (t2 has 2, 6, 10, 14) // (t3 has 3, 7, 11, 15) #pragma unroll for (int j = 0; j < 4; j++) { // Column Mixing phase of chacha CHACHA_PRIMITIVE(x.x ,x.w, x.y, 16) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 12) CHACHA_PRIMITIVE(x.x ,x.w, x.y, 8) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 7) x.y = __shfl((int)x.y, x1); x.z = __shfl((int)x.z, x2); x.w = __shfl((int)x.w, x3); // Diagonal Mixing phase of chacha CHACHA_PRIMITIVE(x.x ,x.w, x.y, 16) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 12) CHACHA_PRIMITIVE(x.x ,x.w, x.y, 8) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 7) x.y = __shfl((int)x.y, x3); x.z = __shfl((int)x.z, x2); x.w = __shfl((int)x.w, x1); } b += x; // The next two lines are the beginning of the BX-centric loop iteration bx ^= b; x = bx; #pragma unroll for (int j = 0; j < 4; j++) { // Column Mixing phase of chacha CHACHA_PRIMITIVE(x.x ,x.w, x.y, 16) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 12) CHACHA_PRIMITIVE(x.x ,x.w, x.y, 8) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 7) x.y = __shfl((int)x.y, x1); x.z = __shfl((int)x.z, x2); x.w = __shfl((int)x.w, x3); // Diagonal Mixing phase of chacha CHACHA_PRIMITIVE(x.x ,x.w, x.y, 16) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 12) CHACHA_PRIMITIVE(x.x ,x.w, x.y, 8) CHACHA_PRIMITIVE(x.z ,x.y, x.w, 7) x.y = __shfl((int)x.y, x3); x.z = __shfl((int)x.z, x2); x.w = __shfl((int)x.w, x1); } #undef CHACHA_PRIMITIVE bx += x; } template <int ALGO> __device__ __forceinline__ void block_mixer(uint4 &b, uint4 &bx, const int x1, const int x2, const int x3) { switch(ALGO) { case A_SCRYPT: salsa_xor_core(b, bx, x1, x2, x3); break; case A_SCRYPT_JANE: chacha_xor_core(b, bx, x1, x2, x3); break; } } /* * The hasher_gen_kernel operates on a group of 1024-bit input keys * in B, stored as: * B = { k1B k1Bx k2B k2Bx ... } * and fills up the scratchpad with the iterative hashes derived from * those keys: * scratch { k1h1B k1h1Bx K1h2B K1h2Bx ... K2h1B K2h1Bx K2h2B K2h2Bx ... } * scratch is 1024 times larger than the input keys B. * It is extremely important to stream writes effectively into scratch; * less important to coalesce the reads from B. * * Key ordering note: Keys are input from B in "original" order: * K = {k1, k2, k3, k4, k5, ..., kx15, kx16, kx17, ..., kx31 } * After inputting into kernel_gen, each component k and kx of the * key is transmuted into a permuted internal order to make processing faster: * K = k, kx with: * k = 0, 4, 8, 12, 5, 9, 13, 1, 10, 14, 2, 6, 15, 3, 7, 11 * and similarly for kx. */ template <int ALGO, MemoryAccess SCHEME> __global__ void kepler_scrypt_core_kernelA(const uint32_t *d_idata, int begin, int end) { uint4 b, bx; int x1 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+1)&0x3); int x2 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+2)&0x3); int x3 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int start = (scrypt_block*c_SCRATCH + (SCHEME==ANDERSEN?8:4)*(threadIdx.x%4)) % c_SCRATCH_WU_PER_WARP; int i=begin; if (i == 0) { load_key<ALGO>(d_idata, b, bx); write_keys_direct<SCHEME>(b, bx, start); ++i; } else read_keys_direct<SCHEME,0>(b, bx, start+32*(i-1)); while (i < end) { block_mixer<ALGO>(b, bx, x1, x2, x3); write_keys_direct<SCHEME>(b, bx, start+32*i); ++i; } } template <int ALGO, MemoryAccess SCHEME> __global__ void kepler_scrypt_core_kernelA_LG(const uint32_t *d_idata, int begin, int end, unsigned int LOOKUP_GAP) { uint4 b, bx; int x1 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+1)&0x3); int x2 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+2)&0x3); int x3 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+3)&0x3); int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int start = (scrypt_block*c_SCRATCH + (SCHEME==ANDERSEN?8:4)*(threadIdx.x%4)) % c_SCRATCH_WU_PER_WARP; int i=begin; if (i == 0) { load_key<ALGO>(d_idata, b, bx); write_keys_direct<SCHEME>(b, bx, start); ++i; } else { int pos = (i-1)/LOOKUP_GAP, loop = (i-1)-pos*LOOKUP_GAP; read_keys_direct<SCHEME,0>(b, bx, start+32*pos); while(loop--) block_mixer<ALGO>(b, bx, x1, x2, x3); } while (i < end) { block_mixer<ALGO>(b, bx, x1, x2, x3); if (i % LOOKUP_GAP == 0) write_keys_direct<SCHEME>(b, bx, start+32*(i/LOOKUP_GAP)); ++i; } } /* * hasher_hash_kernel runs the second phase of scrypt after the scratch * buffer is filled with the iterative hashes: It bounces through * the scratch buffer in pseudorandom order, mixing the key as it goes. */ template <int ALGO, MemoryAccess SCHEME, int TEX_DIM> __global__ void kepler_scrypt_core_kernelB(uint32_t *d_odata, int begin, int end) { uint4 b, bx; int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int start = (scrypt_block*c_SCRATCH) + (SCHEME==ANDERSEN?8:4)*(threadIdx.x%4); if (TEX_DIM == 0) start %= c_SCRATCH_WU_PER_WARP; int x1 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+1)&0x3); int x2 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+2)&0x3); int x3 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+3)&0x3); if (begin == 0) { read_keys_direct<SCHEME, TEX_DIM>(b, bx, start+32*c_N_1); block_mixer<ALGO>(b, bx, x1, x2, x3); } else load_key<ALGO>(d_odata, b, bx); for (int i = begin; i < end; i++) { int j = (__shfl((int)bx.x, (threadIdx.x & 0x1c)) & (c_N_1)); uint4 t, tx; read_keys_direct<SCHEME, TEX_DIM>(t, tx, start+32*j); b ^= t; bx ^= tx; block_mixer<ALGO>(b, bx, x1, x2, x3); } store_key<ALGO>(d_odata, b, bx); } template <int ALGO, MemoryAccess SCHEME, int TEX_DIM> __global__ void kepler_scrypt_core_kernelB_LG(uint32_t *d_odata, int begin, int end, unsigned int LOOKUP_GAP) { uint4 b, bx; int scrypt_block = (blockIdx.x*blockDim.x + threadIdx.x)/THREADS_PER_WU; int start = (scrypt_block*c_SCRATCH) + (SCHEME==ANDERSEN?8:4)*(threadIdx.x%4); if (TEX_DIM == 0) start %= c_SCRATCH_WU_PER_WARP; int x1 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+1)&0x3); int x2 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+2)&0x3); int x3 = (threadIdx.x & 0x1c) + (((threadIdx.x & 0x03)+3)&0x3); if (begin == 0) { int pos = c_N_1/LOOKUP_GAP, loop = 1 + (c_N_1-pos*LOOKUP_GAP); read_keys_direct<SCHEME,TEX_DIM>(b, bx, start+32*pos); while(loop--) block_mixer<ALGO>(b, bx, x1, x2, x3); } else load_key<ALGO>(d_odata, b, bx); if (SCHEME == SIMPLE) { // better divergent thread handling submitted by nVidia engineers, but // supposedly this does not run with the ANDERSEN memory access scheme int j = (__shfl((int)bx.x, (threadIdx.x & 0x1c)) & (c_N_1)); int pos = j/LOOKUP_GAP; int loop = -1; uint4 t, tx; int i = begin; while(i < end) { if (loop==-1) { j = (__shfl((int)bx.x, (threadIdx.x & 0x1c)) & (c_N_1)); pos = j/LOOKUP_GAP; loop = j-pos*LOOKUP_GAP; read_keys_direct<SCHEME,TEX_DIM>(t, tx, start+32*pos); } if (loop==0) { b ^= t; bx ^= tx; t=b;tx=bx; } block_mixer<ALGO>(t, tx, x1, x2, x3); if (loop==0) { b=t;bx=tx; i++; } loop--; } } else { // this is my original implementation, now used with the ANDERSEN // memory access scheme only. for (int i = begin; i < end; i++) { int j = (__shfl((int)bx.x, (threadIdx.x & 0x1c)) & (c_N_1)); int pos = j/LOOKUP_GAP, loop = j-pos*LOOKUP_GAP; uint4 t, tx; read_keys_direct<SCHEME,TEX_DIM>(t, tx, start+32*pos); while(loop--) block_mixer<ALGO>(t, tx, x1, x2, x3); b ^= t; bx ^= tx; block_mixer<ALGO>(b, bx, x1, x2, x3); } } //for (int i = begin; i < end; i++) { // int j = (__shfl((int)bx.x, (threadIdx.x & 0x1c)) & (c_N_1)); // int pos = j/LOOKUP_GAP, loop = j-pos*LOOKUP_GAP; // uint4 t, tx; read_keys_direct<SCHEME,TEX_DIM>(t, tx, start+32*pos); // while(loop--) block_mixer<ALGO>(t, tx, x1, x2, x3); // b ^= t; bx ^= tx; // block_mixer<ALGO>(b, bx, x1, x2, x3); //} store_key<ALGO>(d_odata, b, bx); } KeplerKernel::KeplerKernel() : KernelInterface() { } bool KeplerKernel::bindtexture_1D(uint32_t *d_V, size_t size) { cudaChannelFormatDesc channelDesc4 = cudaCreateChannelDesc<uint4>(); texRef1D_4_V.normalized = 0; texRef1D_4_V.filterMode = cudaFilterModePoint; texRef1D_4_V.addressMode[0] = cudaAddressModeClamp; checkCudaErrors(cudaBindTexture(NULL, &texRef1D_4_V, d_V, &channelDesc4, size)); return true; } bool KeplerKernel::bindtexture_2D(uint32_t *d_V, int width, int height, size_t pitch) { cudaChannelFormatDesc channelDesc4 = cudaCreateChannelDesc<uint4>(); texRef2D_4_V.normalized = 0; texRef2D_4_V.filterMode = cudaFilterModePoint; texRef2D_4_V.addressMode[0] = cudaAddressModeClamp; texRef2D_4_V.addressMode[1] = cudaAddressModeClamp; // maintain texture width of TEXWIDTH (max. limit is 65000) while (width > TEXWIDTH) { width /= 2; height *= 2; pitch /= 2; } while (width < TEXWIDTH) { width *= 2; height = (height+1)/2; pitch *= 2; } checkCudaErrors(cudaBindTexture2D(NULL, &texRef2D_4_V, d_V, &channelDesc4, width, height, pitch)); return true; } bool KeplerKernel::unbindtexture_1D() { checkCudaErrors(cudaUnbindTexture(texRef1D_4_V)); return true; } bool KeplerKernel::unbindtexture_2D() { checkCudaErrors(cudaUnbindTexture(texRef2D_4_V)); return true; } void KeplerKernel::set_scratchbuf_constants(int MAXWARPS, uint32_t** h_V) { checkCudaErrors(cudaMemcpyToSymbol(c_V, h_V, MAXWARPS*sizeof(uint32_t*), 0, cudaMemcpyHostToDevice)); } bool KeplerKernel::run_kernel(dim3 grid, dim3 threads, int WARPS_PER_BLOCK, int thr_id, cudaStream_t stream, uint32_t* d_idata, uint32_t* d_odata, unsigned int N, unsigned int LOOKUP_GAP, bool interactive, bool benchmark, int texture_cache) { bool success = true; // make some constants available to kernel, update only initially and when changing static int prev_N[MAX_DEVICES] = {0}; if (N != prev_N[thr_id]) { uint32_t h_N = N; uint32_t h_N_1 = N-1; uint32_t h_SCRATCH = SCRATCH; uint32_t h_SCRATCH_WU_PER_WARP = (SCRATCH * WU_PER_WARP); uint32_t h_SCRATCH_WU_PER_WARP_1 = (SCRATCH * WU_PER_WARP) - 1; cudaMemcpyToSymbolAsync(c_N, &h_N, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); cudaMemcpyToSymbolAsync(c_N_1, &h_N_1, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); cudaMemcpyToSymbolAsync(c_SCRATCH, &h_SCRATCH, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); cudaMemcpyToSymbolAsync(c_SCRATCH_WU_PER_WARP, &h_SCRATCH_WU_PER_WARP, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); cudaMemcpyToSymbolAsync(c_SCRATCH_WU_PER_WARP_1, &h_SCRATCH_WU_PER_WARP_1, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); prev_N[thr_id] = N; } // First phase: Sequential writes to scratchpad. int batch = device_batchsize[thr_id]; //int num_sleeps = 2* ((N + (batch-1)) / batch); //int sleeptime = 100; unsigned int pos = 0; do { if (LOOKUP_GAP == 1) { if (IS_SCRYPT()) kepler_scrypt_core_kernelA<A_SCRYPT, ANDERSEN> <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N)); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelA<A_SCRYPT_JANE, SIMPLE> <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N)); } else { if (IS_SCRYPT()) kepler_scrypt_core_kernelA_LG<A_SCRYPT, ANDERSEN> <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N), LOOKUP_GAP); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelA_LG<A_SCRYPT_JANE, SIMPLE> <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N), LOOKUP_GAP); } pos += batch; } while (pos < N); // Second phase: Random read access from scratchpad. pos = 0; do { if (LOOKUP_GAP == 1) { if (texture_cache == 0) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB<A_SCRYPT ,ANDERSEN, 0><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB<A_SCRYPT_JANE,SIMPLE, 0><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); } else if (texture_cache == 1) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB<A_SCRYPT ,ANDERSEN,1><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB<A_SCRYPT_JANE,SIMPLE, 1><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); } else if (texture_cache == 2) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB<A_SCRYPT ,ANDERSEN,2><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB<A_SCRYPT_JANE,SIMPLE, 2><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); } } else { if (texture_cache == 0) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB_LG<A_SCRYPT ,ANDERSEN,0><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB_LG<A_SCRYPT_JANE,SIMPLE, 0><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); } else if (texture_cache == 1) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB_LG<A_SCRYPT ,ANDERSEN,1><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB_LG<A_SCRYPT_JANE,SIMPLE, 1><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); } else if (texture_cache == 2) { if (IS_SCRYPT()) kepler_scrypt_core_kernelB_LG<A_SCRYPT ,ANDERSEN,2><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); if (IS_SCRYPT_JANE()) kepler_scrypt_core_kernelB_LG<A_SCRYPT_JANE,SIMPLE, 2><<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); } } pos += batch; } while (pos < N); return success; }
the_stack
#ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdbool.h> #include <cuda.h> #include "CRC64.h" // The polynomial here is the bit-reversed encoding of 0x42f0e1eba9ea3693. static const uint64_t crc64_poly = UINT64_C(0xc96c5795d7870f42); static const uint64_t crc64_table[4][256] = { { UINT64_C(0x0000000000000000), UINT64_C(0x1dee8a5e222ca1dc), UINT64_C(0x3bdd14bc445943b8), UINT64_C(0x26339ee26675e264), UINT64_C(0x77ba297888b28770), UINT64_C(0x6a54a326aa9e26ac), UINT64_C(0x4c673dc4ccebc4c8), UINT64_C(0x5189b79aeec76514), UINT64_C(0xef7452f111650ee0), UINT64_C(0xf29ad8af3349af3c), UINT64_C(0xd4a9464d553c4d58), UINT64_C(0xc947cc137710ec84), UINT64_C(0x98ce7b8999d78990), UINT64_C(0x8520f1d7bbfb284c), UINT64_C(0xa3136f35dd8eca28), UINT64_C(0xbefde56bffa26bf4), UINT64_C(0x4c300ac98dc40345), UINT64_C(0x51de8097afe8a299), UINT64_C(0x77ed1e75c99d40fd), UINT64_C(0x6a03942bebb1e121), UINT64_C(0x3b8a23b105768435), UINT64_C(0x2664a9ef275a25e9), UINT64_C(0x0057370d412fc78d), UINT64_C(0x1db9bd5363036651), UINT64_C(0xa34458389ca10da5), UINT64_C(0xbeaad266be8dac79), UINT64_C(0x98994c84d8f84e1d), UINT64_C(0x8577c6dafad4efc1), UINT64_C(0xd4fe714014138ad5), UINT64_C(0xc910fb1e363f2b09), UINT64_C(0xef2365fc504ac96d), UINT64_C(0xf2cdefa2726668b1), UINT64_C(0x986015931b88068a), UINT64_C(0x858e9fcd39a4a756), UINT64_C(0xa3bd012f5fd14532), UINT64_C(0xbe538b717dfde4ee), UINT64_C(0xefda3ceb933a81fa), UINT64_C(0xf234b6b5b1162026), UINT64_C(0xd4072857d763c242), UINT64_C(0xc9e9a209f54f639e), UINT64_C(0x771447620aed086a), UINT64_C(0x6afacd3c28c1a9b6), UINT64_C(0x4cc953de4eb44bd2), UINT64_C(0x5127d9806c98ea0e), UINT64_C(0x00ae6e1a825f8f1a), UINT64_C(0x1d40e444a0732ec6), UINT64_C(0x3b737aa6c606cca2), UINT64_C(0x269df0f8e42a6d7e), UINT64_C(0xd4501f5a964c05cf), UINT64_C(0xc9be9504b460a413), UINT64_C(0xef8d0be6d2154677), UINT64_C(0xf26381b8f039e7ab), UINT64_C(0xa3ea36221efe82bf), UINT64_C(0xbe04bc7c3cd22363), UINT64_C(0x9837229e5aa7c107), UINT64_C(0x85d9a8c0788b60db), UINT64_C(0x3b244dab87290b2f), UINT64_C(0x26cac7f5a505aaf3), UINT64_C(0x00f95917c3704897), UINT64_C(0x1d17d349e15ce94b), UINT64_C(0x4c9e64d30f9b8c5f), UINT64_C(0x5170ee8d2db72d83), UINT64_C(0x7743706f4bc2cfe7), UINT64_C(0x6aadfa3169ee6e3b), UINT64_C(0xa218840d981e1391), UINT64_C(0xbff60e53ba32b24d), UINT64_C(0x99c590b1dc475029), UINT64_C(0x842b1aeffe6bf1f5), UINT64_C(0xd5a2ad7510ac94e1), UINT64_C(0xc84c272b3280353d), UINT64_C(0xee7fb9c954f5d759), UINT64_C(0xf391339776d97685), UINT64_C(0x4d6cd6fc897b1d71), UINT64_C(0x50825ca2ab57bcad), UINT64_C(0x76b1c240cd225ec9), UINT64_C(0x6b5f481eef0eff15), UINT64_C(0x3ad6ff8401c99a01), UINT64_C(0x273875da23e53bdd), UINT64_C(0x010beb384590d9b9), UINT64_C(0x1ce5616667bc7865), UINT64_C(0xee288ec415da10d4), UINT64_C(0xf3c6049a37f6b108), UINT64_C(0xd5f59a785183536c), UINT64_C(0xc81b102673aff2b0), UINT64_C(0x9992a7bc9d6897a4), UINT64_C(0x847c2de2bf443678), UINT64_C(0xa24fb300d931d41c), UINT64_C(0xbfa1395efb1d75c0), UINT64_C(0x015cdc3504bf1e34), UINT64_C(0x1cb2566b2693bfe8), UINT64_C(0x3a81c88940e65d8c), UINT64_C(0x276f42d762cafc50), UINT64_C(0x76e6f54d8c0d9944), UINT64_C(0x6b087f13ae213898), UINT64_C(0x4d3be1f1c854dafc), UINT64_C(0x50d56bafea787b20), UINT64_C(0x3a78919e8396151b), UINT64_C(0x27961bc0a1bab4c7), UINT64_C(0x01a58522c7cf56a3), UINT64_C(0x1c4b0f7ce5e3f77f), UINT64_C(0x4dc2b8e60b24926b), UINT64_C(0x502c32b8290833b7), UINT64_C(0x761fac5a4f7dd1d3), UINT64_C(0x6bf126046d51700f), UINT64_C(0xd50cc36f92f31bfb), UINT64_C(0xc8e24931b0dfba27), UINT64_C(0xeed1d7d3d6aa5843), UINT64_C(0xf33f5d8df486f99f), UINT64_C(0xa2b6ea171a419c8b), UINT64_C(0xbf586049386d3d57), UINT64_C(0x996bfeab5e18df33), UINT64_C(0x848574f57c347eef), UINT64_C(0x76489b570e52165e), UINT64_C(0x6ba611092c7eb782), UINT64_C(0x4d958feb4a0b55e6), UINT64_C(0x507b05b56827f43a), UINT64_C(0x01f2b22f86e0912e), UINT64_C(0x1c1c3871a4cc30f2), UINT64_C(0x3a2fa693c2b9d296), UINT64_C(0x27c12ccde095734a), UINT64_C(0x993cc9a61f3718be), UINT64_C(0x84d243f83d1bb962), UINT64_C(0xa2e1dd1a5b6e5b06), UINT64_C(0xbf0f57447942fada), UINT64_C(0xee86e0de97859fce), UINT64_C(0xf3686a80b5a93e12), UINT64_C(0xd55bf462d3dcdc76), UINT64_C(0xc8b57e3cf1f07daa), UINT64_C(0xd6e9a7309f3239a7), UINT64_C(0xcb072d6ebd1e987b), UINT64_C(0xed34b38cdb6b7a1f), UINT64_C(0xf0da39d2f947dbc3), UINT64_C(0xa1538e481780bed7), UINT64_C(0xbcbd041635ac1f0b), UINT64_C(0x9a8e9af453d9fd6f), UINT64_C(0x876010aa71f55cb3), UINT64_C(0x399df5c18e573747), UINT64_C(0x24737f9fac7b969b), UINT64_C(0x0240e17dca0e74ff), UINT64_C(0x1fae6b23e822d523), UINT64_C(0x4e27dcb906e5b037), UINT64_C(0x53c956e724c911eb), UINT64_C(0x75fac80542bcf38f), UINT64_C(0x6814425b60905253), UINT64_C(0x9ad9adf912f63ae2), UINT64_C(0x873727a730da9b3e), UINT64_C(0xa104b94556af795a), UINT64_C(0xbcea331b7483d886), UINT64_C(0xed6384819a44bd92), UINT64_C(0xf08d0edfb8681c4e), UINT64_C(0xd6be903dde1dfe2a), UINT64_C(0xcb501a63fc315ff6), UINT64_C(0x75adff0803933402), UINT64_C(0x6843755621bf95de), UINT64_C(0x4e70ebb447ca77ba), UINT64_C(0x539e61ea65e6d666), UINT64_C(0x0217d6708b21b372), UINT64_C(0x1ff95c2ea90d12ae), UINT64_C(0x39cac2cccf78f0ca), UINT64_C(0x24244892ed545116), UINT64_C(0x4e89b2a384ba3f2d), UINT64_C(0x536738fda6969ef1), UINT64_C(0x7554a61fc0e37c95), UINT64_C(0x68ba2c41e2cfdd49), UINT64_C(0x39339bdb0c08b85d), UINT64_C(0x24dd11852e241981), UINT64_C(0x02ee8f674851fbe5), UINT64_C(0x1f0005396a7d5a39), UINT64_C(0xa1fde05295df31cd), UINT64_C(0xbc136a0cb7f39011), UINT64_C(0x9a20f4eed1867275), UINT64_C(0x87ce7eb0f3aad3a9), UINT64_C(0xd647c92a1d6db6bd), UINT64_C(0xcba943743f411761), UINT64_C(0xed9add965934f505), UINT64_C(0xf07457c87b1854d9), UINT64_C(0x02b9b86a097e3c68), UINT64_C(0x1f5732342b529db4), UINT64_C(0x3964acd64d277fd0), UINT64_C(0x248a26886f0bde0c), UINT64_C(0x7503911281ccbb18), UINT64_C(0x68ed1b4ca3e01ac4), UINT64_C(0x4ede85aec595f8a0), UINT64_C(0x53300ff0e7b9597c), UINT64_C(0xedcdea9b181b3288), UINT64_C(0xf02360c53a379354), UINT64_C(0xd610fe275c427130), UINT64_C(0xcbfe74797e6ed0ec), UINT64_C(0x9a77c3e390a9b5f8), UINT64_C(0x879949bdb2851424), UINT64_C(0xa1aad75fd4f0f640), UINT64_C(0xbc445d01f6dc579c), UINT64_C(0x74f1233d072c2a36), UINT64_C(0x691fa96325008bea), UINT64_C(0x4f2c37814375698e), UINT64_C(0x52c2bddf6159c852), UINT64_C(0x034b0a458f9ead46), UINT64_C(0x1ea5801badb20c9a), UINT64_C(0x38961ef9cbc7eefe), UINT64_C(0x257894a7e9eb4f22), UINT64_C(0x9b8571cc164924d6), UINT64_C(0x866bfb923465850a), UINT64_C(0xa05865705210676e), UINT64_C(0xbdb6ef2e703cc6b2), UINT64_C(0xec3f58b49efba3a6), UINT64_C(0xf1d1d2eabcd7027a), UINT64_C(0xd7e24c08daa2e01e), UINT64_C(0xca0cc656f88e41c2), UINT64_C(0x38c129f48ae82973), UINT64_C(0x252fa3aaa8c488af), UINT64_C(0x031c3d48ceb16acb), UINT64_C(0x1ef2b716ec9dcb17), UINT64_C(0x4f7b008c025aae03), UINT64_C(0x52958ad220760fdf), UINT64_C(0x74a614304603edbb), UINT64_C(0x69489e6e642f4c67), UINT64_C(0xd7b57b059b8d2793), UINT64_C(0xca5bf15bb9a1864f), UINT64_C(0xec686fb9dfd4642b), UINT64_C(0xf186e5e7fdf8c5f7), UINT64_C(0xa00f527d133fa0e3), UINT64_C(0xbde1d8233113013f), UINT64_C(0x9bd246c15766e35b), UINT64_C(0x863ccc9f754a4287), UINT64_C(0xec9136ae1ca42cbc), UINT64_C(0xf17fbcf03e888d60), UINT64_C(0xd74c221258fd6f04), UINT64_C(0xcaa2a84c7ad1ced8), UINT64_C(0x9b2b1fd69416abcc), UINT64_C(0x86c59588b63a0a10), UINT64_C(0xa0f60b6ad04fe874), UINT64_C(0xbd188134f26349a8), UINT64_C(0x03e5645f0dc1225c), UINT64_C(0x1e0bee012fed8380), UINT64_C(0x383870e3499861e4), UINT64_C(0x25d6fabd6bb4c038), UINT64_C(0x745f4d278573a52c), UINT64_C(0x69b1c779a75f04f0), UINT64_C(0x4f82599bc12ae694), UINT64_C(0x526cd3c5e3064748), UINT64_C(0xa0a13c6791602ff9), UINT64_C(0xbd4fb639b34c8e25), UINT64_C(0x9b7c28dbd5396c41), UINT64_C(0x8692a285f715cd9d), UINT64_C(0xd71b151f19d2a889), UINT64_C(0xcaf59f413bfe0955), UINT64_C(0xecc601a35d8beb31), UINT64_C(0xf1288bfd7fa74aed), UINT64_C(0x4fd56e9680052119), UINT64_C(0x523be4c8a22980c5), UINT64_C(0x74087a2ac45c62a1), UINT64_C(0x69e6f074e670c37d), UINT64_C(0x386f47ee08b7a669), UINT64_C(0x2581cdb02a9b07b5), UINT64_C(0x03b253524ceee5d1), UINT64_C(0x1e5cd90c6ec2440d) }, { UINT64_C(0x0000000000000000), UINT64_C(0x3f0be14a916a6dcb), UINT64_C(0x7e17c29522d4db96), UINT64_C(0x411c23dfb3beb65d), UINT64_C(0xfc2f852a45a9b72c), UINT64_C(0xc3246460d4c3dae7), UINT64_C(0x823847bf677d6cba), UINT64_C(0xbd33a6f5f6170171), UINT64_C(0x6a87a57f245d70dd), UINT64_C(0x558c4435b5371d16), UINT64_C(0x149067ea0689ab4b), UINT64_C(0x2b9b86a097e3c680), UINT64_C(0x96a8205561f4c7f1), UINT64_C(0xa9a3c11ff09eaa3a), UINT64_C(0xe8bfe2c043201c67), UINT64_C(0xd7b4038ad24a71ac), UINT64_C(0xd50f4afe48bae1ba), UINT64_C(0xea04abb4d9d08c71), UINT64_C(0xab18886b6a6e3a2c), UINT64_C(0x94136921fb0457e7), UINT64_C(0x2920cfd40d135696), UINT64_C(0x162b2e9e9c793b5d), UINT64_C(0x57370d412fc78d00), UINT64_C(0x683cec0bbeade0cb), UINT64_C(0xbf88ef816ce79167), UINT64_C(0x80830ecbfd8dfcac), UINT64_C(0xc19f2d144e334af1), UINT64_C(0xfe94cc5edf59273a), UINT64_C(0x43a76aab294e264b), UINT64_C(0x7cac8be1b8244b80), UINT64_C(0x3db0a83e0b9afddd), UINT64_C(0x02bb49749af09016), UINT64_C(0x38c63ad73e7bddf1), UINT64_C(0x07cddb9daf11b03a), UINT64_C(0x46d1f8421caf0667), UINT64_C(0x79da19088dc56bac), UINT64_C(0xc4e9bffd7bd26add), UINT64_C(0xfbe25eb7eab80716), UINT64_C(0xbafe7d685906b14b), UINT64_C(0x85f59c22c86cdc80), UINT64_C(0x52419fa81a26ad2c), UINT64_C(0x6d4a7ee28b4cc0e7), UINT64_C(0x2c565d3d38f276ba), UINT64_C(0x135dbc77a9981b71), UINT64_C(0xae6e1a825f8f1a00), UINT64_C(0x9165fbc8cee577cb), UINT64_C(0xd079d8177d5bc196), UINT64_C(0xef72395dec31ac5d), UINT64_C(0xedc9702976c13c4b), UINT64_C(0xd2c29163e7ab5180), UINT64_C(0x93deb2bc5415e7dd), UINT64_C(0xacd553f6c57f8a16), UINT64_C(0x11e6f50333688b67), UINT64_C(0x2eed1449a202e6ac), UINT64_C(0x6ff1379611bc50f1), UINT64_C(0x50fad6dc80d63d3a), UINT64_C(0x874ed556529c4c96), UINT64_C(0xb845341cc3f6215d), UINT64_C(0xf95917c370489700), UINT64_C(0xc652f689e122facb), UINT64_C(0x7b61507c1735fbba), UINT64_C(0x446ab136865f9671), UINT64_C(0x057692e935e1202c), UINT64_C(0x3a7d73a3a48b4de7), UINT64_C(0x718c75ae7cf7bbe2), UINT64_C(0x4e8794e4ed9dd629), UINT64_C(0x0f9bb73b5e236074), UINT64_C(0x30905671cf490dbf), UINT64_C(0x8da3f084395e0cce), UINT64_C(0xb2a811cea8346105), UINT64_C(0xf3b432111b8ad758), UINT64_C(0xccbfd35b8ae0ba93), UINT64_C(0x1b0bd0d158aacb3f), UINT64_C(0x2400319bc9c0a6f4), UINT64_C(0x651c12447a7e10a9), UINT64_C(0x5a17f30eeb147d62), UINT64_C(0xe72455fb1d037c13), UINT64_C(0xd82fb4b18c6911d8), UINT64_C(0x9933976e3fd7a785), UINT64_C(0xa6387624aebdca4e), UINT64_C(0xa4833f50344d5a58), UINT64_C(0x9b88de1aa5273793), UINT64_C(0xda94fdc5169981ce), UINT64_C(0xe59f1c8f87f3ec05), UINT64_C(0x58acba7a71e4ed74), UINT64_C(0x67a75b30e08e80bf), UINT64_C(0x26bb78ef533036e2), UINT64_C(0x19b099a5c25a5b29), UINT64_C(0xce049a2f10102a85), UINT64_C(0xf10f7b65817a474e), UINT64_C(0xb01358ba32c4f113), UINT64_C(0x8f18b9f0a3ae9cd8), UINT64_C(0x322b1f0555b99da9), UINT64_C(0x0d20fe4fc4d3f062), UINT64_C(0x4c3cdd90776d463f), UINT64_C(0x73373cdae6072bf4), UINT64_C(0x494a4f79428c6613), UINT64_C(0x7641ae33d3e60bd8), UINT64_C(0x375d8dec6058bd85), UINT64_C(0x08566ca6f132d04e), UINT64_C(0xb565ca530725d13f), UINT64_C(0x8a6e2b19964fbcf4), UINT64_C(0xcb7208c625f10aa9), UINT64_C(0xf479e98cb49b6762), UINT64_C(0x23cdea0666d116ce), UINT64_C(0x1cc60b4cf7bb7b05), UINT64_C(0x5dda28934405cd58), UINT64_C(0x62d1c9d9d56fa093), UINT64_C(0xdfe26f2c2378a1e2), UINT64_C(0xe0e98e66b212cc29), UINT64_C(0xa1f5adb901ac7a74), UINT64_C(0x9efe4cf390c617bf), UINT64_C(0x9c4505870a3687a9), UINT64_C(0xa34ee4cd9b5cea62), UINT64_C(0xe252c71228e25c3f), UINT64_C(0xdd592658b98831f4), UINT64_C(0x606a80ad4f9f3085), UINT64_C(0x5f6161e7def55d4e), UINT64_C(0x1e7d42386d4beb13), UINT64_C(0x2176a372fc2186d8), UINT64_C(0xf6c2a0f82e6bf774), UINT64_C(0xc9c941b2bf019abf), UINT64_C(0x88d5626d0cbf2ce2), UINT64_C(0xb7de83279dd54129), UINT64_C(0x0aed25d26bc24058), UINT64_C(0x35e6c498faa82d93), UINT64_C(0x74fae74749169bce), UINT64_C(0x4bf1060dd87cf605), UINT64_C(0xe318eb5cf9ef77c4), UINT64_C(0xdc130a1668851a0f), UINT64_C(0x9d0f29c9db3bac52), UINT64_C(0xa204c8834a51c199), UINT64_C(0x1f376e76bc46c0e8), UINT64_C(0x203c8f3c2d2cad23), UINT64_C(0x6120ace39e921b7e), UINT64_C(0x5e2b4da90ff876b5), UINT64_C(0x899f4e23ddb20719), UINT64_C(0xb694af694cd86ad2), UINT64_C(0xf7888cb6ff66dc8f), UINT64_C(0xc8836dfc6e0cb144), UINT64_C(0x75b0cb09981bb035), UINT64_C(0x4abb2a430971ddfe), UINT64_C(0x0ba7099cbacf6ba3), UINT64_C(0x34ace8d62ba50668), UINT64_C(0x3617a1a2b155967e), UINT64_C(0x091c40e8203ffbb5), UINT64_C(0x4800633793814de8), UINT64_C(0x770b827d02eb2023), UINT64_C(0xca382488f4fc2152), UINT64_C(0xf533c5c265964c99), UINT64_C(0xb42fe61dd628fac4), UINT64_C(0x8b2407574742970f), UINT64_C(0x5c9004dd9508e6a3), UINT64_C(0x639be59704628b68), UINT64_C(0x2287c648b7dc3d35), UINT64_C(0x1d8c270226b650fe), UINT64_C(0xa0bf81f7d0a1518f), UINT64_C(0x9fb460bd41cb3c44), UINT64_C(0xdea84362f2758a19), UINT64_C(0xe1a3a228631fe7d2), UINT64_C(0xdbded18bc794aa35), UINT64_C(0xe4d530c156fec7fe), UINT64_C(0xa5c9131ee54071a3), UINT64_C(0x9ac2f254742a1c68), UINT64_C(0x27f154a1823d1d19), UINT64_C(0x18fab5eb135770d2), UINT64_C(0x59e69634a0e9c68f), UINT64_C(0x66ed777e3183ab44), UINT64_C(0xb15974f4e3c9dae8), UINT64_C(0x8e5295be72a3b723), UINT64_C(0xcf4eb661c11d017e), UINT64_C(0xf045572b50776cb5), UINT64_C(0x4d76f1dea6606dc4), UINT64_C(0x727d1094370a000f), UINT64_C(0x3361334b84b4b652), UINT64_C(0x0c6ad20115dedb99), UINT64_C(0x0ed19b758f2e4b8f), UINT64_C(0x31da7a3f1e442644), UINT64_C(0x70c659e0adfa9019), UINT64_C(0x4fcdb8aa3c90fdd2), UINT64_C(0xf2fe1e5fca87fca3), UINT64_C(0xcdf5ff155bed9168), UINT64_C(0x8ce9dccae8532735), UINT64_C(0xb3e23d8079394afe), UINT64_C(0x64563e0aab733b52), UINT64_C(0x5b5ddf403a195699), UINT64_C(0x1a41fc9f89a7e0c4), UINT64_C(0x254a1dd518cd8d0f), UINT64_C(0x9879bb20eeda8c7e), UINT64_C(0xa7725a6a7fb0e1b5), UINT64_C(0xe66e79b5cc0e57e8), UINT64_C(0xd96598ff5d643a23), UINT64_C(0x92949ef28518cc26), UINT64_C(0xad9f7fb81472a1ed), UINT64_C(0xec835c67a7cc17b0), UINT64_C(0xd388bd2d36a67a7b), UINT64_C(0x6ebb1bd8c0b17b0a), UINT64_C(0x51b0fa9251db16c1), UINT64_C(0x10acd94de265a09c), UINT64_C(0x2fa73807730fcd57), UINT64_C(0xf8133b8da145bcfb), UINT64_C(0xc718dac7302fd130), UINT64_C(0x8604f9188391676d), UINT64_C(0xb90f185212fb0aa6), UINT64_C(0x043cbea7e4ec0bd7), UINT64_C(0x3b375fed7586661c), UINT64_C(0x7a2b7c32c638d041), UINT64_C(0x45209d785752bd8a), UINT64_C(0x479bd40ccda22d9c), UINT64_C(0x789035465cc84057), UINT64_C(0x398c1699ef76f60a), UINT64_C(0x0687f7d37e1c9bc1), UINT64_C(0xbbb45126880b9ab0), UINT64_C(0x84bfb06c1961f77b), UINT64_C(0xc5a393b3aadf4126), UINT64_C(0xfaa872f93bb52ced), UINT64_C(0x2d1c7173e9ff5d41), UINT64_C(0x121790397895308a), UINT64_C(0x530bb3e6cb2b86d7), UINT64_C(0x6c0052ac5a41eb1c), UINT64_C(0xd133f459ac56ea6d), UINT64_C(0xee3815133d3c87a6), UINT64_C(0xaf2436cc8e8231fb), UINT64_C(0x902fd7861fe85c30), UINT64_C(0xaa52a425bb6311d7), UINT64_C(0x9559456f2a097c1c), UINT64_C(0xd44566b099b7ca41), UINT64_C(0xeb4e87fa08dda78a), UINT64_C(0x567d210ffecaa6fb), UINT64_C(0x6976c0456fa0cb30), UINT64_C(0x286ae39adc1e7d6d), UINT64_C(0x176102d04d7410a6), UINT64_C(0xc0d5015a9f3e610a), UINT64_C(0xffdee0100e540cc1), UINT64_C(0xbec2c3cfbdeaba9c), UINT64_C(0x81c922852c80d757), UINT64_C(0x3cfa8470da97d626), UINT64_C(0x03f1653a4bfdbbed), UINT64_C(0x42ed46e5f8430db0), UINT64_C(0x7de6a7af6929607b), UINT64_C(0x7f5deedbf3d9f06d), UINT64_C(0x40560f9162b39da6), UINT64_C(0x014a2c4ed10d2bfb), UINT64_C(0x3e41cd0440674630), UINT64_C(0x83726bf1b6704741), UINT64_C(0xbc798abb271a2a8a), UINT64_C(0xfd65a96494a49cd7), UINT64_C(0xc26e482e05cef11c), UINT64_C(0x15da4ba4d78480b0), UINT64_C(0x2ad1aaee46eeed7b), UINT64_C(0x6bcd8931f5505b26), UINT64_C(0x54c6687b643a36ed), UINT64_C(0xe9f5ce8e922d379c), UINT64_C(0xd6fe2fc403475a57), UINT64_C(0x97e20c1bb0f9ec0a), UINT64_C(0xa8e9ed51219381c1) }, { UINT64_C(0x0000000000000000), UINT64_C(0x54e979925cd0f10d), UINT64_C(0xa9d2f324b9a1e21a), UINT64_C(0xfd3b8ab6e5711317), UINT64_C(0xc17d4962dc4ddab1), UINT64_C(0x959430f0809d2bbc), UINT64_C(0x68afba4665ec38ab), UINT64_C(0x3c46c3d4393cc9a6), UINT64_C(0x10223dee1795abe7), UINT64_C(0x44cb447c4b455aea), UINT64_C(0xb9f0cecaae3449fd), UINT64_C(0xed19b758f2e4b8f0), UINT64_C(0xd15f748ccbd87156), UINT64_C(0x85b60d1e9708805b), UINT64_C(0x788d87a87279934c), UINT64_C(0x2c64fe3a2ea96241), UINT64_C(0x20447bdc2f2b57ce), UINT64_C(0x74ad024e73fba6c3), UINT64_C(0x899688f8968ab5d4), UINT64_C(0xdd7ff16aca5a44d9), UINT64_C(0xe13932bef3668d7f), UINT64_C(0xb5d04b2cafb67c72), UINT64_C(0x48ebc19a4ac76f65), UINT64_C(0x1c02b80816179e68), UINT64_C(0x3066463238befc29), UINT64_C(0x648f3fa0646e0d24), UINT64_C(0x99b4b516811f1e33), UINT64_C(0xcd5dcc84ddcfef3e), UINT64_C(0xf11b0f50e4f32698), UINT64_C(0xa5f276c2b823d795), UINT64_C(0x58c9fc745d52c482), UINT64_C(0x0c2085e60182358f), UINT64_C(0x4088f7b85e56af9c), UINT64_C(0x14618e2a02865e91), UINT64_C(0xe95a049ce7f74d86), UINT64_C(0xbdb37d0ebb27bc8b), UINT64_C(0x81f5beda821b752d), UINT64_C(0xd51cc748decb8420), UINT64_C(0x28274dfe3bba9737), UINT64_C(0x7cce346c676a663a), UINT64_C(0x50aaca5649c3047b), UINT64_C(0x0443b3c41513f576), UINT64_C(0xf9783972f062e661), UINT64_C(0xad9140e0acb2176c), UINT64_C(0x91d78334958edeca), UINT64_C(0xc53efaa6c95e2fc7), UINT64_C(0x380570102c2f3cd0), UINT64_C(0x6cec098270ffcddd), UINT64_C(0x60cc8c64717df852), UINT64_C(0x3425f5f62dad095f), UINT64_C(0xc91e7f40c8dc1a48), UINT64_C(0x9df706d2940ceb45), UINT64_C(0xa1b1c506ad3022e3), UINT64_C(0xf558bc94f1e0d3ee), UINT64_C(0x086336221491c0f9), UINT64_C(0x5c8a4fb0484131f4), UINT64_C(0x70eeb18a66e853b5), UINT64_C(0x2407c8183a38a2b8), UINT64_C(0xd93c42aedf49b1af), UINT64_C(0x8dd53b3c839940a2), UINT64_C(0xb193f8e8baa58904), UINT64_C(0xe57a817ae6757809), UINT64_C(0x18410bcc03046b1e), UINT64_C(0x4ca8725e5fd49a13), UINT64_C(0x8111ef70bcad5f38), UINT64_C(0xd5f896e2e07dae35), UINT64_C(0x28c31c54050cbd22), UINT64_C(0x7c2a65c659dc4c2f), UINT64_C(0x406ca61260e08589), UINT64_C(0x1485df803c307484), UINT64_C(0xe9be5536d9416793), UINT64_C(0xbd572ca48591969e), UINT64_C(0x9133d29eab38f4df), UINT64_C(0xc5daab0cf7e805d2), UINT64_C(0x38e121ba129916c5), UINT64_C(0x6c0858284e49e7c8), UINT64_C(0x504e9bfc77752e6e), UINT64_C(0x04a7e26e2ba5df63), UINT64_C(0xf99c68d8ced4cc74), UINT64_C(0xad75114a92043d79), UINT64_C(0xa15594ac938608f6), UINT64_C(0xf5bced3ecf56f9fb), UINT64_C(0x088767882a27eaec), UINT64_C(0x5c6e1e1a76f71be1), UINT64_C(0x6028ddce4fcbd247), UINT64_C(0x34c1a45c131b234a), UINT64_C(0xc9fa2eeaf66a305d), UINT64_C(0x9d135778aabac150), UINT64_C(0xb177a9428413a311), UINT64_C(0xe59ed0d0d8c3521c), UINT64_C(0x18a55a663db2410b), UINT64_C(0x4c4c23f46162b006), UINT64_C(0x700ae020585e79a0), UINT64_C(0x24e399b2048e88ad), UINT64_C(0xd9d81304e1ff9bba), UINT64_C(0x8d316a96bd2f6ab7), UINT64_C(0xc19918c8e2fbf0a4), UINT64_C(0x9570615abe2b01a9), UINT64_C(0x684bebec5b5a12be), UINT64_C(0x3ca2927e078ae3b3), UINT64_C(0x00e451aa3eb62a15), UINT64_C(0x540d28386266db18), UINT64_C(0xa936a28e8717c80f), UINT64_C(0xfddfdb1cdbc73902), UINT64_C(0xd1bb2526f56e5b43), UINT64_C(0x85525cb4a9beaa4e), UINT64_C(0x7869d6024ccfb959), UINT64_C(0x2c80af90101f4854), UINT64_C(0x10c66c44292381f2), UINT64_C(0x442f15d675f370ff), UINT64_C(0xb9149f60908263e8), UINT64_C(0xedfde6f2cc5292e5), UINT64_C(0xe1dd6314cdd0a76a), UINT64_C(0xb5341a8691005667), UINT64_C(0x480f903074714570), UINT64_C(0x1ce6e9a228a1b47d), UINT64_C(0x20a02a76119d7ddb), UINT64_C(0x744953e44d4d8cd6), UINT64_C(0x8972d952a83c9fc1), UINT64_C(0xdd9ba0c0f4ec6ecc), UINT64_C(0xf1ff5efada450c8d), UINT64_C(0xa51627688695fd80), UINT64_C(0x582dadde63e4ee97), UINT64_C(0x0cc4d44c3f341f9a), UINT64_C(0x308217980608d63c), UINT64_C(0x646b6e0a5ad82731), UINT64_C(0x9950e4bcbfa93426), UINT64_C(0xcdb99d2ee379c52b), UINT64_C(0x90fb71cad654a0f5), UINT64_C(0xc41208588a8451f8), UINT64_C(0x392982ee6ff542ef), UINT64_C(0x6dc0fb7c3325b3e2), UINT64_C(0x518638a80a197a44), UINT64_C(0x056f413a56c98b49), UINT64_C(0xf854cb8cb3b8985e), UINT64_C(0xacbdb21eef686953), UINT64_C(0x80d94c24c1c10b12), UINT64_C(0xd43035b69d11fa1f), UINT64_C(0x290bbf007860e908), UINT64_C(0x7de2c69224b01805), UINT64_C(0x41a405461d8cd1a3), UINT64_C(0x154d7cd4415c20ae), UINT64_C(0xe876f662a42d33b9), UINT64_C(0xbc9f8ff0f8fdc2b4), UINT64_C(0xb0bf0a16f97ff73b), UINT64_C(0xe4567384a5af0636), UINT64_C(0x196df93240de1521), UINT64_C(0x4d8480a01c0ee42c), UINT64_C(0x71c2437425322d8a), UINT64_C(0x252b3ae679e2dc87), UINT64_C(0xd810b0509c93cf90), UINT64_C(0x8cf9c9c2c0433e9d), UINT64_C(0xa09d37f8eeea5cdc), UINT64_C(0xf4744e6ab23aadd1), UINT64_C(0x094fc4dc574bbec6), UINT64_C(0x5da6bd4e0b9b4fcb), UINT64_C(0x61e07e9a32a7866d), UINT64_C(0x350907086e777760), UINT64_C(0xc8328dbe8b066477), UINT64_C(0x9cdbf42cd7d6957a), UINT64_C(0xd073867288020f69), UINT64_C(0x849affe0d4d2fe64), UINT64_C(0x79a1755631a3ed73), UINT64_C(0x2d480cc46d731c7e), UINT64_C(0x110ecf10544fd5d8), UINT64_C(0x45e7b682089f24d5), UINT64_C(0xb8dc3c34edee37c2), UINT64_C(0xec3545a6b13ec6cf), UINT64_C(0xc051bb9c9f97a48e), UINT64_C(0x94b8c20ec3475583), UINT64_C(0x698348b826364694), UINT64_C(0x3d6a312a7ae6b799), UINT64_C(0x012cf2fe43da7e3f), UINT64_C(0x55c58b6c1f0a8f32), UINT64_C(0xa8fe01dafa7b9c25), UINT64_C(0xfc177848a6ab6d28), UINT64_C(0xf037fdaea72958a7), UINT64_C(0xa4de843cfbf9a9aa), UINT64_C(0x59e50e8a1e88babd), UINT64_C(0x0d0c771842584bb0), UINT64_C(0x314ab4cc7b648216), UINT64_C(0x65a3cd5e27b4731b), UINT64_C(0x989847e8c2c5600c), UINT64_C(0xcc713e7a9e159101), UINT64_C(0xe015c040b0bcf340), UINT64_C(0xb4fcb9d2ec6c024d), UINT64_C(0x49c73364091d115a), UINT64_C(0x1d2e4af655cde057), UINT64_C(0x216889226cf129f1), UINT64_C(0x7581f0b03021d8fc), UINT64_C(0x88ba7a06d550cbeb), UINT64_C(0xdc53039489803ae6), UINT64_C(0x11ea9eba6af9ffcd), UINT64_C(0x4503e72836290ec0), UINT64_C(0xb8386d9ed3581dd7), UINT64_C(0xecd1140c8f88ecda), UINT64_C(0xd097d7d8b6b4257c), UINT64_C(0x847eae4aea64d471), UINT64_C(0x794524fc0f15c766), UINT64_C(0x2dac5d6e53c5366b), UINT64_C(0x01c8a3547d6c542a), UINT64_C(0x5521dac621bca527), UINT64_C(0xa81a5070c4cdb630), UINT64_C(0xfcf329e2981d473d), UINT64_C(0xc0b5ea36a1218e9b), UINT64_C(0x945c93a4fdf17f96), UINT64_C(0x6967191218806c81), UINT64_C(0x3d8e608044509d8c), UINT64_C(0x31aee56645d2a803), UINT64_C(0x65479cf41902590e), UINT64_C(0x987c1642fc734a19), UINT64_C(0xcc956fd0a0a3bb14), UINT64_C(0xf0d3ac04999f72b2), UINT64_C(0xa43ad596c54f83bf), UINT64_C(0x59015f20203e90a8), UINT64_C(0x0de826b27cee61a5), UINT64_C(0x218cd888524703e4), UINT64_C(0x7565a11a0e97f2e9), UINT64_C(0x885e2bacebe6e1fe), UINT64_C(0xdcb7523eb73610f3), UINT64_C(0xe0f191ea8e0ad955), UINT64_C(0xb418e878d2da2858), UINT64_C(0x492362ce37ab3b4f), UINT64_C(0x1dca1b5c6b7bca42), UINT64_C(0x5162690234af5051), UINT64_C(0x058b1090687fa15c), UINT64_C(0xf8b09a268d0eb24b), UINT64_C(0xac59e3b4d1de4346), UINT64_C(0x901f2060e8e28ae0), UINT64_C(0xc4f659f2b4327bed), UINT64_C(0x39cdd344514368fa), UINT64_C(0x6d24aad60d9399f7), UINT64_C(0x414054ec233afbb6), UINT64_C(0x15a92d7e7fea0abb), UINT64_C(0xe892a7c89a9b19ac), UINT64_C(0xbc7bde5ac64be8a1), UINT64_C(0x803d1d8eff772107), UINT64_C(0xd4d4641ca3a7d00a), UINT64_C(0x29efeeaa46d6c31d), UINT64_C(0x7d0697381a063210), UINT64_C(0x712612de1b84079f), UINT64_C(0x25cf6b4c4754f692), UINT64_C(0xd8f4e1faa225e585), UINT64_C(0x8c1d9868fef51488), UINT64_C(0xb05b5bbcc7c9dd2e), UINT64_C(0xe4b2222e9b192c23), UINT64_C(0x1989a8987e683f34), UINT64_C(0x4d60d10a22b8ce39), UINT64_C(0x61042f300c11ac78), UINT64_C(0x35ed56a250c15d75), UINT64_C(0xc8d6dc14b5b04e62), UINT64_C(0x9c3fa586e960bf6f), UINT64_C(0xa0796652d05c76c9), UINT64_C(0xf4901fc08c8c87c4), UINT64_C(0x09ab957669fd94d3), UINT64_C(0x5d42ece4352d65de) }, { UINT64_C(0x0000000000000000), UINT64_C(0xb32e4cbe03a75f6f), UINT64_C(0xf4843657a840a05b), UINT64_C(0x47aa7ae9abe7ff34), UINT64_C(0x7bd0c384ff8f5e33), UINT64_C(0xc8fe8f3afc28015c), UINT64_C(0x8f54f5d357cffe68), UINT64_C(0x3c7ab96d5468a107), UINT64_C(0xf7a18709ff1ebc66), UINT64_C(0x448fcbb7fcb9e309), UINT64_C(0x0325b15e575e1c3d), UINT64_C(0xb00bfde054f94352), UINT64_C(0x8c71448d0091e255), UINT64_C(0x3f5f08330336bd3a), UINT64_C(0x78f572daa8d1420e), UINT64_C(0xcbdb3e64ab761d61), UINT64_C(0x7d9ba13851336649), UINT64_C(0xceb5ed8652943926), UINT64_C(0x891f976ff973c612), UINT64_C(0x3a31dbd1fad4997d), UINT64_C(0x064b62bcaebc387a), UINT64_C(0xb5652e02ad1b6715), UINT64_C(0xf2cf54eb06fc9821), UINT64_C(0x41e11855055bc74e), UINT64_C(0x8a3a2631ae2dda2f), UINT64_C(0x39146a8fad8a8540), UINT64_C(0x7ebe1066066d7a74), UINT64_C(0xcd905cd805ca251b), UINT64_C(0xf1eae5b551a2841c), UINT64_C(0x42c4a90b5205db73), UINT64_C(0x056ed3e2f9e22447), UINT64_C(0xb6409f5cfa457b28), UINT64_C(0xfb374270a266cc92), UINT64_C(0x48190ecea1c193fd), UINT64_C(0x0fb374270a266cc9), UINT64_C(0xbc9d3899098133a6), UINT64_C(0x80e781f45de992a1), UINT64_C(0x33c9cd4a5e4ecdce), UINT64_C(0x7463b7a3f5a932fa), UINT64_C(0xc74dfb1df60e6d95), UINT64_C(0x0c96c5795d7870f4), UINT64_C(0xbfb889c75edf2f9b), UINT64_C(0xf812f32ef538d0af), UINT64_C(0x4b3cbf90f69f8fc0), UINT64_C(0x774606fda2f72ec7), UINT64_C(0xc4684a43a15071a8), UINT64_C(0x83c230aa0ab78e9c), UINT64_C(0x30ec7c140910d1f3), UINT64_C(0x86ace348f355aadb), UINT64_C(0x3582aff6f0f2f5b4), UINT64_C(0x7228d51f5b150a80), UINT64_C(0xc10699a158b255ef), UINT64_C(0xfd7c20cc0cdaf4e8), UINT64_C(0x4e526c720f7dab87), UINT64_C(0x09f8169ba49a54b3), UINT64_C(0xbad65a25a73d0bdc), UINT64_C(0x710d64410c4b16bd), UINT64_C(0xc22328ff0fec49d2), UINT64_C(0x85895216a40bb6e6), UINT64_C(0x36a71ea8a7ace989), UINT64_C(0x0adda7c5f3c4488e), UINT64_C(0xb9f3eb7bf06317e1), UINT64_C(0xfe5991925b84e8d5), UINT64_C(0x4d77dd2c5823b7ba), UINT64_C(0x64b62bcaebc387a1), UINT64_C(0xd7986774e864d8ce), UINT64_C(0x90321d9d438327fa), UINT64_C(0x231c512340247895), UINT64_C(0x1f66e84e144cd992), UINT64_C(0xac48a4f017eb86fd), UINT64_C(0xebe2de19bc0c79c9), UINT64_C(0x58cc92a7bfab26a6), UINT64_C(0x9317acc314dd3bc7), UINT64_C(0x2039e07d177a64a8), UINT64_C(0x67939a94bc9d9b9c), UINT64_C(0xd4bdd62abf3ac4f3), UINT64_C(0xe8c76f47eb5265f4), UINT64_C(0x5be923f9e8f53a9b), UINT64_C(0x1c4359104312c5af), UINT64_C(0xaf6d15ae40b59ac0), UINT64_C(0x192d8af2baf0e1e8), UINT64_C(0xaa03c64cb957be87), UINT64_C(0xeda9bca512b041b3), UINT64_C(0x5e87f01b11171edc), UINT64_C(0x62fd4976457fbfdb), UINT64_C(0xd1d305c846d8e0b4), UINT64_C(0x96797f21ed3f1f80), UINT64_C(0x2557339fee9840ef), UINT64_C(0xee8c0dfb45ee5d8e), UINT64_C(0x5da24145464902e1), UINT64_C(0x1a083bacedaefdd5), UINT64_C(0xa9267712ee09a2ba), UINT64_C(0x955cce7fba6103bd), UINT64_C(0x267282c1b9c65cd2), UINT64_C(0x61d8f8281221a3e6), UINT64_C(0xd2f6b4961186fc89), UINT64_C(0x9f8169ba49a54b33), UINT64_C(0x2caf25044a02145c), UINT64_C(0x6b055fede1e5eb68), UINT64_C(0xd82b1353e242b407), UINT64_C(0xe451aa3eb62a1500), UINT64_C(0x577fe680b58d4a6f), UINT64_C(0x10d59c691e6ab55b), UINT64_C(0xa3fbd0d71dcdea34), UINT64_C(0x6820eeb3b6bbf755), UINT64_C(0xdb0ea20db51ca83a), UINT64_C(0x9ca4d8e41efb570e), UINT64_C(0x2f8a945a1d5c0861), UINT64_C(0x13f02d374934a966), UINT64_C(0xa0de61894a93f609), UINT64_C(0xe7741b60e174093d), UINT64_C(0x545a57dee2d35652), UINT64_C(0xe21ac88218962d7a), UINT64_C(0x5134843c1b317215), UINT64_C(0x169efed5b0d68d21), UINT64_C(0xa5b0b26bb371d24e), UINT64_C(0x99ca0b06e7197349), UINT64_C(0x2ae447b8e4be2c26), UINT64_C(0x6d4e3d514f59d312), UINT64_C(0xde6071ef4cfe8c7d), UINT64_C(0x15bb4f8be788911c), UINT64_C(0xa6950335e42fce73), UINT64_C(0xe13f79dc4fc83147), UINT64_C(0x521135624c6f6e28), UINT64_C(0x6e6b8c0f1807cf2f), UINT64_C(0xdd45c0b11ba09040), UINT64_C(0x9aefba58b0476f74), UINT64_C(0x29c1f6e6b3e0301b), UINT64_C(0xc96c5795d7870f42), UINT64_C(0x7a421b2bd420502d), UINT64_C(0x3de861c27fc7af19), UINT64_C(0x8ec62d7c7c60f076), UINT64_C(0xb2bc941128085171), UINT64_C(0x0192d8af2baf0e1e), UINT64_C(0x4638a2468048f12a), UINT64_C(0xf516eef883efae45), UINT64_C(0x3ecdd09c2899b324), UINT64_C(0x8de39c222b3eec4b), UINT64_C(0xca49e6cb80d9137f), UINT64_C(0x7967aa75837e4c10), UINT64_C(0x451d1318d716ed17), UINT64_C(0xf6335fa6d4b1b278), UINT64_C(0xb199254f7f564d4c), UINT64_C(0x02b769f17cf11223), UINT64_C(0xb4f7f6ad86b4690b), UINT64_C(0x07d9ba1385133664), UINT64_C(0x4073c0fa2ef4c950), UINT64_C(0xf35d8c442d53963f), UINT64_C(0xcf273529793b3738), UINT64_C(0x7c0979977a9c6857), UINT64_C(0x3ba3037ed17b9763), UINT64_C(0x888d4fc0d2dcc80c), UINT64_C(0x435671a479aad56d), UINT64_C(0xf0783d1a7a0d8a02), UINT64_C(0xb7d247f3d1ea7536), UINT64_C(0x04fc0b4dd24d2a59), UINT64_C(0x3886b22086258b5e), UINT64_C(0x8ba8fe9e8582d431), UINT64_C(0xcc0284772e652b05), UINT64_C(0x7f2cc8c92dc2746a), UINT64_C(0x325b15e575e1c3d0), UINT64_C(0x8175595b76469cbf), UINT64_C(0xc6df23b2dda1638b), UINT64_C(0x75f16f0cde063ce4), UINT64_C(0x498bd6618a6e9de3), UINT64_C(0xfaa59adf89c9c28c), UINT64_C(0xbd0fe036222e3db8), UINT64_C(0x0e21ac88218962d7), UINT64_C(0xc5fa92ec8aff7fb6), UINT64_C(0x76d4de52895820d9), UINT64_C(0x317ea4bb22bfdfed), UINT64_C(0x8250e80521188082), UINT64_C(0xbe2a516875702185), UINT64_C(0x0d041dd676d77eea), UINT64_C(0x4aae673fdd3081de), UINT64_C(0xf9802b81de97deb1), UINT64_C(0x4fc0b4dd24d2a599), UINT64_C(0xfceef8632775faf6), UINT64_C(0xbb44828a8c9205c2), UINT64_C(0x086ace348f355aad), UINT64_C(0x34107759db5dfbaa), UINT64_C(0x873e3be7d8faa4c5), UINT64_C(0xc094410e731d5bf1), UINT64_C(0x73ba0db070ba049e), UINT64_C(0xb86133d4dbcc19ff), UINT64_C(0x0b4f7f6ad86b4690), UINT64_C(0x4ce50583738cb9a4), UINT64_C(0xffcb493d702be6cb), UINT64_C(0xc3b1f050244347cc), UINT64_C(0x709fbcee27e418a3), UINT64_C(0x3735c6078c03e797), UINT64_C(0x841b8ab98fa4b8f8), UINT64_C(0xadda7c5f3c4488e3), UINT64_C(0x1ef430e13fe3d78c), UINT64_C(0x595e4a08940428b8), UINT64_C(0xea7006b697a377d7), UINT64_C(0xd60abfdbc3cbd6d0), UINT64_C(0x6524f365c06c89bf), UINT64_C(0x228e898c6b8b768b), UINT64_C(0x91a0c532682c29e4), UINT64_C(0x5a7bfb56c35a3485), UINT64_C(0xe955b7e8c0fd6bea), UINT64_C(0xaeffcd016b1a94de), UINT64_C(0x1dd181bf68bdcbb1), UINT64_C(0x21ab38d23cd56ab6), UINT64_C(0x9285746c3f7235d9), UINT64_C(0xd52f0e859495caed), UINT64_C(0x6601423b97329582), UINT64_C(0xd041dd676d77eeaa), UINT64_C(0x636f91d96ed0b1c5), UINT64_C(0x24c5eb30c5374ef1), UINT64_C(0x97eba78ec690119e), UINT64_C(0xab911ee392f8b099), UINT64_C(0x18bf525d915feff6), UINT64_C(0x5f1528b43ab810c2), UINT64_C(0xec3b640a391f4fad), UINT64_C(0x27e05a6e926952cc), UINT64_C(0x94ce16d091ce0da3), UINT64_C(0xd3646c393a29f297), UINT64_C(0x604a2087398eadf8), UINT64_C(0x5c3099ea6de60cff), UINT64_C(0xef1ed5546e415390), UINT64_C(0xa8b4afbdc5a6aca4), UINT64_C(0x1b9ae303c601f3cb), UINT64_C(0x56ed3e2f9e224471), UINT64_C(0xe5c372919d851b1e), UINT64_C(0xa26908783662e42a), UINT64_C(0x114744c635c5bb45), UINT64_C(0x2d3dfdab61ad1a42), UINT64_C(0x9e13b115620a452d), UINT64_C(0xd9b9cbfcc9edba19), UINT64_C(0x6a978742ca4ae576), UINT64_C(0xa14cb926613cf817), UINT64_C(0x1262f598629ba778), UINT64_C(0x55c88f71c97c584c), UINT64_C(0xe6e6c3cfcadb0723), UINT64_C(0xda9c7aa29eb3a624), UINT64_C(0x69b2361c9d14f94b), UINT64_C(0x2e184cf536f3067f), UINT64_C(0x9d36004b35545910), UINT64_C(0x2b769f17cf112238), UINT64_C(0x9858d3a9ccb67d57), UINT64_C(0xdff2a94067518263), UINT64_C(0x6cdce5fe64f6dd0c), UINT64_C(0x50a65c93309e7c0b), UINT64_C(0xe388102d33392364), UINT64_C(0xa4226ac498dedc50), UINT64_C(0x170c267a9b79833f), UINT64_C(0xdcd7181e300f9e5e), UINT64_C(0x6ff954a033a8c131), UINT64_C(0x28532e49984f3e05), UINT64_C(0x9b7d62f79be8616a), UINT64_C(0xa707db9acf80c06d), UINT64_C(0x14299724cc279f02), UINT64_C(0x5383edcd67c06036), UINT64_C(0xe0ada17364673f59) } }; static const uint64_t crc64_interleaved_table[4][256] = { { UINT64_C(0x0000000000000000), UINT64_C(0xe88a0d0c5521de3d), UINT64_C(0x43ccb533054da2ff), UINT64_C(0xab46b83f506c7cc2), UINT64_C(0x87996a660a9b45fe), UINT64_C(0x6f13676a5fba9bc3), UINT64_C(0xc455df550fd6e701), UINT64_C(0x2cdfd2595af7393c), UINT64_C(0x9dea7be7ba389579), UINT64_C(0x756076ebef194b44), UINT64_C(0xde26ced4bf753786), UINT64_C(0x36acc3d8ea54e9bb), UINT64_C(0x1a731181b0a3d087), UINT64_C(0xf2f91c8de5820eba), UINT64_C(0x59bfa4b2b5ee7278), UINT64_C(0xb135a9bee0cfac45), UINT64_C(0xa90c58e4db7f3477), UINT64_C(0x418655e88e5eea4a), UINT64_C(0xeac0edd7de329688), UINT64_C(0x024ae0db8b1348b5), UINT64_C(0x2e953282d1e47189), UINT64_C(0xc61f3f8e84c5afb4), UINT64_C(0x6d5987b1d4a9d376), UINT64_C(0x85d38abd81880d4b), UINT64_C(0x34e623036147a10e), UINT64_C(0xdc6c2e0f34667f33), UINT64_C(0x772a9630640a03f1), UINT64_C(0x9fa09b3c312bddcc), UINT64_C(0xb37f49656bdce4f0), UINT64_C(0x5bf544693efd3acd), UINT64_C(0xf0b3fc566e91460f), UINT64_C(0x1839f15a3bb09832), UINT64_C(0xc0c01ee219f0766b), UINT64_C(0x284a13ee4cd1a856), UINT64_C(0x830cabd11cbdd494), UINT64_C(0x6b86a6dd499c0aa9), UINT64_C(0x47597484136b3395), UINT64_C(0xafd37988464aeda8), UINT64_C(0x0495c1b71626916a), UINT64_C(0xec1fccbb43074f57), UINT64_C(0x5d2a6505a3c8e312), UINT64_C(0xb5a06809f6e93d2f), UINT64_C(0x1ee6d036a68541ed), UINT64_C(0xf66cdd3af3a49fd0), UINT64_C(0xdab30f63a953a6ec), UINT64_C(0x3239026ffc7278d1), UINT64_C(0x997fba50ac1e0413), UINT64_C(0x71f5b75cf93fda2e), UINT64_C(0x69cc4606c28f421c), UINT64_C(0x81464b0a97ae9c21), UINT64_C(0x2a00f335c7c2e0e3), UINT64_C(0xc28afe3992e33ede), UINT64_C(0xee552c60c81407e2), UINT64_C(0x06df216c9d35d9df), UINT64_C(0xad999953cd59a51d), UINT64_C(0x4513945f98787b20), UINT64_C(0xf4263de178b7d765), UINT64_C(0x1cac30ed2d960958), UINT64_C(0xb7ea88d27dfa759a), UINT64_C(0x5f6085de28dbaba7), UINT64_C(0x73bf5787722c929b), UINT64_C(0x9b355a8b270d4ca6), UINT64_C(0x3073e2b477613064), UINT64_C(0xd8f9efb82240ee59), UINT64_C(0x135892ef9ceef253), UINT64_C(0xfbd29fe3c9cf2c6e), UINT64_C(0x509427dc99a350ac), UINT64_C(0xb81e2ad0cc828e91), UINT64_C(0x94c1f8899675b7ad), UINT64_C(0x7c4bf585c3546990), UINT64_C(0xd70d4dba93381552), UINT64_C(0x3f8740b6c619cb6f), UINT64_C(0x8eb2e90826d6672a), UINT64_C(0x6638e40473f7b917), UINT64_C(0xcd7e5c3b239bc5d5), UINT64_C(0x25f4513776ba1be8), UINT64_C(0x092b836e2c4d22d4), UINT64_C(0xe1a18e62796cfce9), UINT64_C(0x4ae7365d2900802b), UINT64_C(0xa26d3b517c215e16), UINT64_C(0xba54ca0b4791c624), UINT64_C(0x52dec70712b01819), UINT64_C(0xf9987f3842dc64db), UINT64_C(0x1112723417fdbae6), UINT64_C(0x3dcda06d4d0a83da), UINT64_C(0xd547ad61182b5de7), UINT64_C(0x7e01155e48472125), UINT64_C(0x968b18521d66ff18), UINT64_C(0x27beb1ecfda9535d), UINT64_C(0xcf34bce0a8888d60), UINT64_C(0x647204dff8e4f1a2), UINT64_C(0x8cf809d3adc52f9f), UINT64_C(0xa027db8af73216a3), UINT64_C(0x48add686a213c89e), UINT64_C(0xe3eb6eb9f27fb45c), UINT64_C(0x0b6163b5a75e6a61), UINT64_C(0xd3988c0d851e8438), UINT64_C(0x3b128101d03f5a05), UINT64_C(0x9054393e805326c7), UINT64_C(0x78de3432d572f8fa), UINT64_C(0x5401e66b8f85c1c6), UINT64_C(0xbc8beb67daa41ffb), UINT64_C(0x17cd53588ac86339), UINT64_C(0xff475e54dfe9bd04), UINT64_C(0x4e72f7ea3f261141), UINT64_C(0xa6f8fae66a07cf7c), UINT64_C(0x0dbe42d93a6bb3be), UINT64_C(0xe5344fd56f4a6d83), UINT64_C(0xc9eb9d8c35bd54bf), UINT64_C(0x21619080609c8a82), UINT64_C(0x8a2728bf30f0f640), UINT64_C(0x62ad25b365d1287d), UINT64_C(0x7a94d4e95e61b04f), UINT64_C(0x921ed9e50b406e72), UINT64_C(0x395861da5b2c12b0), UINT64_C(0xd1d26cd60e0dcc8d), UINT64_C(0xfd0dbe8f54faf5b1), UINT64_C(0x1587b38301db2b8c), UINT64_C(0xbec10bbc51b7574e), UINT64_C(0x564b06b004968973), UINT64_C(0xe77eaf0ee4592536), UINT64_C(0x0ff4a202b178fb0b), UINT64_C(0xa4b21a3de11487c9), UINT64_C(0x4c381731b43559f4), UINT64_C(0x60e7c568eec260c8), UINT64_C(0x886dc864bbe3bef5), UINT64_C(0x232b705beb8fc237), UINT64_C(0xcba17d57beae1c0a), UINT64_C(0x26b125df39dde4a6), UINT64_C(0xce3b28d36cfc3a9b), UINT64_C(0x657d90ec3c904659), UINT64_C(0x8df79de069b19864), UINT64_C(0xa1284fb93346a158), UINT64_C(0x49a242b566677f65), UINT64_C(0xe2e4fa8a360b03a7), UINT64_C(0x0a6ef786632add9a), UINT64_C(0xbb5b5e3883e571df), UINT64_C(0x53d15334d6c4afe2), UINT64_C(0xf897eb0b86a8d320), UINT64_C(0x101de607d3890d1d), UINT64_C(0x3cc2345e897e3421), UINT64_C(0xd4483952dc5fea1c), UINT64_C(0x7f0e816d8c3396de), UINT64_C(0x97848c61d91248e3), UINT64_C(0x8fbd7d3be2a2d0d1), UINT64_C(0x67377037b7830eec), UINT64_C(0xcc71c808e7ef722e), UINT64_C(0x24fbc504b2ceac13), UINT64_C(0x0824175de839952f), UINT64_C(0xe0ae1a51bd184b12), UINT64_C(0x4be8a26eed7437d0), UINT64_C(0xa362af62b855e9ed), UINT64_C(0x125706dc589a45a8), UINT64_C(0xfadd0bd00dbb9b95), UINT64_C(0x519bb3ef5dd7e757), UINT64_C(0xb911bee308f6396a), UINT64_C(0x95ce6cba52010056), UINT64_C(0x7d4461b60720de6b), UINT64_C(0xd602d989574ca2a9), UINT64_C(0x3e88d485026d7c94), UINT64_C(0xe6713b3d202d92cd), UINT64_C(0x0efb3631750c4cf0), UINT64_C(0xa5bd8e0e25603032), UINT64_C(0x4d3783027041ee0f), UINT64_C(0x61e8515b2ab6d733), UINT64_C(0x89625c577f97090e), UINT64_C(0x2224e4682ffb75cc), UINT64_C(0xcaaee9647adaabf1), UINT64_C(0x7b9b40da9a1507b4), UINT64_C(0x93114dd6cf34d989), UINT64_C(0x3857f5e99f58a54b), UINT64_C(0xd0ddf8e5ca797b76), UINT64_C(0xfc022abc908e424a), UINT64_C(0x148827b0c5af9c77), UINT64_C(0xbfce9f8f95c3e0b5), UINT64_C(0x57449283c0e23e88), UINT64_C(0x4f7d63d9fb52a6ba), UINT64_C(0xa7f76ed5ae737887), UINT64_C(0x0cb1d6eafe1f0445), UINT64_C(0xe43bdbe6ab3eda78), UINT64_C(0xc8e409bff1c9e344), UINT64_C(0x206e04b3a4e83d79), UINT64_C(0x8b28bc8cf48441bb), UINT64_C(0x63a2b180a1a59f86), UINT64_C(0xd297183e416a33c3), UINT64_C(0x3a1d1532144bedfe), UINT64_C(0x915bad0d4427913c), UINT64_C(0x79d1a00111064f01), UINT64_C(0x550e72584bf1763d), UINT64_C(0xbd847f541ed0a800), UINT64_C(0x16c2c76b4ebcd4c2), UINT64_C(0xfe48ca671b9d0aff), UINT64_C(0x35e9b730a53316f5), UINT64_C(0xdd63ba3cf012c8c8), UINT64_C(0x76250203a07eb40a), UINT64_C(0x9eaf0f0ff55f6a37), UINT64_C(0xb270dd56afa8530b), UINT64_C(0x5afad05afa898d36), UINT64_C(0xf1bc6865aae5f1f4), UINT64_C(0x19366569ffc42fc9), UINT64_C(0xa803ccd71f0b838c), UINT64_C(0x4089c1db4a2a5db1), UINT64_C(0xebcf79e41a462173), UINT64_C(0x034574e84f67ff4e), UINT64_C(0x2f9aa6b11590c672), UINT64_C(0xc710abbd40b1184f), UINT64_C(0x6c56138210dd648d), UINT64_C(0x84dc1e8e45fcbab0), UINT64_C(0x9ce5efd47e4c2282), UINT64_C(0x746fe2d82b6dfcbf), UINT64_C(0xdf295ae77b01807d), UINT64_C(0x37a357eb2e205e40), UINT64_C(0x1b7c85b274d7677c), UINT64_C(0xf3f688be21f6b941), UINT64_C(0x58b03081719ac583), UINT64_C(0xb03a3d8d24bb1bbe), UINT64_C(0x010f9433c474b7fb), UINT64_C(0xe985993f915569c6), UINT64_C(0x42c32100c1391504), UINT64_C(0xaa492c0c9418cb39), UINT64_C(0x8696fe55ceeff205), UINT64_C(0x6e1cf3599bce2c38), UINT64_C(0xc55a4b66cba250fa), UINT64_C(0x2dd0466a9e838ec7), UINT64_C(0xf529a9d2bcc3609e), UINT64_C(0x1da3a4dee9e2bea3), UINT64_C(0xb6e51ce1b98ec261), UINT64_C(0x5e6f11edecaf1c5c), UINT64_C(0x72b0c3b4b6582560), UINT64_C(0x9a3aceb8e379fb5d), UINT64_C(0x317c7687b315879f), UINT64_C(0xd9f67b8be63459a2), UINT64_C(0x68c3d23506fbf5e7), UINT64_C(0x8049df3953da2bda), UINT64_C(0x2b0f670603b65718), UINT64_C(0xc3856a0a56978925), UINT64_C(0xef5ab8530c60b019), UINT64_C(0x07d0b55f59416e24), UINT64_C(0xac960d60092d12e6), UINT64_C(0x441c006c5c0cccdb), UINT64_C(0x5c25f13667bc54e9), UINT64_C(0xb4affc3a329d8ad4), UINT64_C(0x1fe9440562f1f616), UINT64_C(0xf763490937d0282b), UINT64_C(0xdbbc9b506d271117), UINT64_C(0x3336965c3806cf2a), UINT64_C(0x98702e63686ab3e8), UINT64_C(0x70fa236f3d4b6dd5), UINT64_C(0xc1cf8ad1dd84c190), UINT64_C(0x294587dd88a51fad), UINT64_C(0x82033fe2d8c9636f), UINT64_C(0x6a8932ee8de8bd52), UINT64_C(0x4656e0b7d71f846e), UINT64_C(0xaedcedbb823e5a53), UINT64_C(0x059a5584d2522691), UINT64_C(0xed1058888773f8ac) }, { UINT64_C(0x0000000000000000), UINT64_C(0x4d624bbe73bbc94c), UINT64_C(0x9ac4977ce7779298), UINT64_C(0xd7a6dcc294cc5bd4), UINT64_C(0xa75181d261e13bb5), UINT64_C(0xea33ca6c125af2f9), UINT64_C(0x3d9516ae8696a92d), UINT64_C(0x70f75d10f52d6061), UINT64_C(0xdc7bac8f6ccc69ef), UINT64_C(0x9119e7311f77a0a3), UINT64_C(0x46bf3bf38bbbfb77), UINT64_C(0x0bdd704df800323b), UINT64_C(0x7b2a2d5d0d2d525a), UINT64_C(0x364866e37e969b16), UINT64_C(0xe1eeba21ea5ac0c2), UINT64_C(0xac8cf19f99e1098e), UINT64_C(0x2a2ff6357696cd5b), UINT64_C(0x674dbd8b052d0417), UINT64_C(0xb0eb614991e15fc3), UINT64_C(0xfd892af7e25a968f), UINT64_C(0x8d7e77e71777f6ee), UINT64_C(0xc01c3c5964cc3fa2), UINT64_C(0x17bae09bf0006476), UINT64_C(0x5ad8ab2583bbad3a), UINT64_C(0xf6545aba1a5aa4b4), UINT64_C(0xbb36110469e16df8), UINT64_C(0x6c90cdc6fd2d362c), UINT64_C(0x21f286788e96ff60), UINT64_C(0x5105db687bbb9f01), UINT64_C(0x1c6790d60800564d), UINT64_C(0xcbc14c149ccc0d99), UINT64_C(0x86a307aaef77c4d5), UINT64_C(0x545fec6aed2d9ab6), UINT64_C(0x193da7d49e9653fa), UINT64_C(0xce9b7b160a5a082e), UINT64_C(0x83f930a879e1c162), UINT64_C(0xf30e6db88ccca103), UINT64_C(0xbe6c2606ff77684f), UINT64_C(0x69cafac46bbb339b), UINT64_C(0x24a8b17a1800fad7), UINT64_C(0x882440e581e1f359), UINT64_C(0xc5460b5bf25a3a15), UINT64_C(0x12e0d799669661c1), UINT64_C(0x5f829c27152da88d), UINT64_C(0x2f75c137e000c8ec), UINT64_C(0x62178a8993bb01a0), UINT64_C(0xb5b1564b07775a74), UINT64_C(0xf8d31df574cc9338), UINT64_C(0x7e701a5f9bbb57ed), UINT64_C(0x331251e1e8009ea1), UINT64_C(0xe4b48d237cccc575), UINT64_C(0xa9d6c69d0f770c39), UINT64_C(0xd9219b8dfa5a6c58), UINT64_C(0x9443d03389e1a514), UINT64_C(0x43e50cf11d2dfec0), UINT64_C(0x0e87474f6e96378c), UINT64_C(0xa20bb6d0f7773e02), UINT64_C(0xef69fd6e84ccf74e), UINT64_C(0x38cf21ac1000ac9a), UINT64_C(0x75ad6a1263bb65d6), UINT64_C(0x055a3702969605b7), UINT64_C(0x48387cbce52dccfb), UINT64_C(0x9f9ea07e71e1972f), UINT64_C(0xd2fcebc0025a5e63), UINT64_C(0xa8bfd8d5da5b356c), UINT64_C(0xe5dd936ba9e0fc20), UINT64_C(0x327b4fa93d2ca7f4), UINT64_C(0x7f1904174e976eb8), UINT64_C(0x0fee5907bbba0ed9), UINT64_C(0x428c12b9c801c795), UINT64_C(0x952ace7b5ccd9c41), UINT64_C(0xd84885c52f76550d), UINT64_C(0x74c4745ab6975c83), UINT64_C(0x39a63fe4c52c95cf), UINT64_C(0xee00e32651e0ce1b), UINT64_C(0xa362a898225b0757), UINT64_C(0xd395f588d7766736), UINT64_C(0x9ef7be36a4cdae7a), UINT64_C(0x495162f43001f5ae), UINT64_C(0x0433294a43ba3ce2), UINT64_C(0x82902ee0accdf837), UINT64_C(0xcff2655edf76317b), UINT64_C(0x1854b99c4bba6aaf), UINT64_C(0x5536f2223801a3e3), UINT64_C(0x25c1af32cd2cc382), UINT64_C(0x68a3e48cbe970ace), UINT64_C(0xbf05384e2a5b511a), UINT64_C(0xf26773f059e09856), UINT64_C(0x5eeb826fc00191d8), UINT64_C(0x1389c9d1b3ba5894), UINT64_C(0xc42f151327760340), UINT64_C(0x894d5ead54cdca0c), UINT64_C(0xf9ba03bda1e0aa6d), UINT64_C(0xb4d84803d25b6321), UINT64_C(0x637e94c1469738f5), UINT64_C(0x2e1cdf7f352cf1b9), UINT64_C(0xfce034bf3776afda), UINT64_C(0xb1827f0144cd6696), UINT64_C(0x6624a3c3d0013d42), UINT64_C(0x2b46e87da3baf40e), UINT64_C(0x5bb1b56d5697946f), UINT64_C(0x16d3fed3252c5d23), UINT64_C(0xc1752211b1e006f7), UINT64_C(0x8c1769afc25bcfbb), UINT64_C(0x209b98305bbac635), UINT64_C(0x6df9d38e28010f79), UINT64_C(0xba5f0f4cbccd54ad), UINT64_C(0xf73d44f2cf769de1), UINT64_C(0x87ca19e23a5bfd80), UINT64_C(0xcaa8525c49e034cc), UINT64_C(0x1d0e8e9edd2c6f18), UINT64_C(0x506cc520ae97a654), UINT64_C(0xd6cfc28a41e06281), UINT64_C(0x9bad8934325babcd), UINT64_C(0x4c0b55f6a697f019), UINT64_C(0x01691e48d52c3955), UINT64_C(0x719e435820015934), UINT64_C(0x3cfc08e653ba9078), UINT64_C(0xeb5ad424c776cbac), UINT64_C(0xa6389f9ab4cd02e0), UINT64_C(0x0ab46e052d2c0b6e), UINT64_C(0x47d625bb5e97c222), UINT64_C(0x9070f979ca5b99f6), UINT64_C(0xdd12b2c7b9e050ba), UINT64_C(0xade5efd74ccd30db), UINT64_C(0xe087a4693f76f997), UINT64_C(0x372178ababbaa243), UINT64_C(0x7a433315d8016b0f), UINT64_C(0xc3a71e801bb8745d), UINT64_C(0x8ec5553e6803bd11), UINT64_C(0x596389fcfccfe6c5), UINT64_C(0x1401c2428f742f89), UINT64_C(0x64f69f527a594fe8), UINT64_C(0x2994d4ec09e286a4), UINT64_C(0xfe32082e9d2edd70), UINT64_C(0xb3504390ee95143c), UINT64_C(0x1fdcb20f77741db2), UINT64_C(0x52bef9b104cfd4fe), UINT64_C(0x8518257390038f2a), UINT64_C(0xc87a6ecde3b84666), UINT64_C(0xb88d33dd16952607), UINT64_C(0xf5ef7863652eef4b), UINT64_C(0x2249a4a1f1e2b49f), UINT64_C(0x6f2bef1f82597dd3), UINT64_C(0xe988e8b56d2eb906), UINT64_C(0xa4eaa30b1e95704a), UINT64_C(0x734c7fc98a592b9e), UINT64_C(0x3e2e3477f9e2e2d2), UINT64_C(0x4ed969670ccf82b3), UINT64_C(0x03bb22d97f744bff), UINT64_C(0xd41dfe1bebb8102b), UINT64_C(0x997fb5a59803d967), UINT64_C(0x35f3443a01e2d0e9), UINT64_C(0x78910f84725919a5), UINT64_C(0xaf37d346e6954271), UINT64_C(0xe25598f8952e8b3d), UINT64_C(0x92a2c5e86003eb5c), UINT64_C(0xdfc08e5613b82210), UINT64_C(0x08665294877479c4), UINT64_C(0x4504192af4cfb088), UINT64_C(0x97f8f2eaf695eeeb), UINT64_C(0xda9ab954852e27a7), UINT64_C(0x0d3c659611e27c73), UINT64_C(0x405e2e286259b53f), UINT64_C(0x30a973389774d55e), UINT64_C(0x7dcb3886e4cf1c12), UINT64_C(0xaa6de444700347c6), UINT64_C(0xe70faffa03b88e8a), UINT64_C(0x4b835e659a598704), UINT64_C(0x06e115dbe9e24e48), UINT64_C(0xd147c9197d2e159c), UINT64_C(0x9c2582a70e95dcd0), UINT64_C(0xecd2dfb7fbb8bcb1), UINT64_C(0xa1b09409880375fd), UINT64_C(0x761648cb1ccf2e29), UINT64_C(0x3b7403756f74e765), UINT64_C(0xbdd704df800323b0), UINT64_C(0xf0b54f61f3b8eafc), UINT64_C(0x271393a36774b128), UINT64_C(0x6a71d81d14cf7864), UINT64_C(0x1a86850de1e21805), UINT64_C(0x57e4ceb39259d149), UINT64_C(0x8042127106958a9d), UINT64_C(0xcd2059cf752e43d1), UINT64_C(0x61aca850eccf4a5f), UINT64_C(0x2ccee3ee9f748313), UINT64_C(0xfb683f2c0bb8d8c7), UINT64_C(0xb60a74927803118b), UINT64_C(0xc6fd29828d2e71ea), UINT64_C(0x8b9f623cfe95b8a6), UINT64_C(0x5c39befe6a59e372), UINT64_C(0x115bf54019e22a3e), UINT64_C(0x6b18c655c1e34131), UINT64_C(0x267a8debb258887d), UINT64_C(0xf1dc51292694d3a9), UINT64_C(0xbcbe1a97552f1ae5), UINT64_C(0xcc494787a0027a84), UINT64_C(0x812b0c39d3b9b3c8), UINT64_C(0x568dd0fb4775e81c), UINT64_C(0x1bef9b4534ce2150), UINT64_C(0xb7636adaad2f28de), UINT64_C(0xfa012164de94e192), UINT64_C(0x2da7fda64a58ba46), UINT64_C(0x60c5b61839e3730a), UINT64_C(0x1032eb08ccce136b), UINT64_C(0x5d50a0b6bf75da27), UINT64_C(0x8af67c742bb981f3), UINT64_C(0xc79437ca580248bf), UINT64_C(0x41373060b7758c6a), UINT64_C(0x0c557bdec4ce4526), UINT64_C(0xdbf3a71c50021ef2), UINT64_C(0x9691eca223b9d7be), UINT64_C(0xe666b1b2d694b7df), UINT64_C(0xab04fa0ca52f7e93), UINT64_C(0x7ca226ce31e32547), UINT64_C(0x31c06d704258ec0b), UINT64_C(0x9d4c9cefdbb9e585), UINT64_C(0xd02ed751a8022cc9), UINT64_C(0x07880b933cce771d), UINT64_C(0x4aea402d4f75be51), UINT64_C(0x3a1d1d3dba58de30), UINT64_C(0x777f5683c9e3177c), UINT64_C(0xa0d98a415d2f4ca8), UINT64_C(0xedbbc1ff2e9485e4), UINT64_C(0x3f472a3f2ccedb87), UINT64_C(0x722561815f7512cb), UINT64_C(0xa583bd43cbb9491f), UINT64_C(0xe8e1f6fdb8028053), UINT64_C(0x9816abed4d2fe032), UINT64_C(0xd574e0533e94297e), UINT64_C(0x02d23c91aa5872aa), UINT64_C(0x4fb0772fd9e3bbe6), UINT64_C(0xe33c86b04002b268), UINT64_C(0xae5ecd0e33b97b24), UINT64_C(0x79f811cca77520f0), UINT64_C(0x349a5a72d4cee9bc), UINT64_C(0x446d076221e389dd), UINT64_C(0x090f4cdc52584091), UINT64_C(0xdea9901ec6941b45), UINT64_C(0x93cbdba0b52fd209), UINT64_C(0x1568dc0a5a5816dc), UINT64_C(0x580a97b429e3df90), UINT64_C(0x8fac4b76bd2f8444), UINT64_C(0xc2ce00c8ce944d08), UINT64_C(0xb2395dd83bb92d69), UINT64_C(0xff5b16664802e425), UINT64_C(0x28fdcaa4dccebff1), UINT64_C(0x659f811aaf7576bd), UINT64_C(0xc913708536947f33), UINT64_C(0x84713b3b452fb67f), UINT64_C(0x53d7e7f9d1e3edab), UINT64_C(0x1eb5ac47a25824e7), UINT64_C(0x6e42f15757754486), UINT64_C(0x2320bae924ce8dca), UINT64_C(0xf486662bb002d61e), UINT64_C(0xb9e42d95c3b91f52) }, { UINT64_C(0x0000000000000000), UINT64_C(0x1596922b987ef63f), UINT64_C(0x2b2d245730fdec7e), UINT64_C(0x3ebbb67ca8831a41), UINT64_C(0x565a48ae61fbd8fc), UINT64_C(0x43ccda85f9852ec3), UINT64_C(0x7d776cf951063482), UINT64_C(0x68e1fed2c978c2bd), UINT64_C(0xacb4915cc3f7b1f8), UINT64_C(0xb92203775b8947c7), UINT64_C(0x8799b50bf30a5d86), UINT64_C(0x920f27206b74abb9), UINT64_C(0xfaeed9f2a20c6904), UINT64_C(0xef784bd93a729f3b), UINT64_C(0xd1c3fda592f1857a), UINT64_C(0xc4556f8e0a8f7345), UINT64_C(0xcbb18d9228e17d75), UINT64_C(0xde271fb9b09f8b4a), UINT64_C(0xe09ca9c5181c910b), UINT64_C(0xf50a3bee80626734), UINT64_C(0x9debc53c491aa589), UINT64_C(0x887d5717d16453b6), UINT64_C(0xb6c6e16b79e749f7), UINT64_C(0xa3507340e199bfc8), UINT64_C(0x67051cceeb16cc8d), UINT64_C(0x72938ee573683ab2), UINT64_C(0x4c283899dbeb20f3), UINT64_C(0x59beaab24395d6cc), UINT64_C(0x315f54608aed1471), UINT64_C(0x24c9c64b1293e24e), UINT64_C(0x1a727037ba10f80f), UINT64_C(0x0fe4e21c226e0e30), UINT64_C(0x05bbb40ffecce46f), UINT64_C(0x102d262466b21250), UINT64_C(0x2e969058ce310811), UINT64_C(0x3b000273564ffe2e), UINT64_C(0x53e1fca19f373c93), UINT64_C(0x46776e8a0749caac), UINT64_C(0x78ccd8f6afcad0ed), UINT64_C(0x6d5a4add37b426d2), UINT64_C(0xa90f25533d3b5597), UINT64_C(0xbc99b778a545a3a8), UINT64_C(0x822201040dc6b9e9), UINT64_C(0x97b4932f95b84fd6), UINT64_C(0xff556dfd5cc08d6b), UINT64_C(0xeac3ffd6c4be7b54), UINT64_C(0xd47849aa6c3d6115), UINT64_C(0xc1eedb81f443972a), UINT64_C(0xce0a399dd62d991a), UINT64_C(0xdb9cabb64e536f25), UINT64_C(0xe5271dcae6d07564), UINT64_C(0xf0b18fe17eae835b), UINT64_C(0x98507133b7d641e6), UINT64_C(0x8dc6e3182fa8b7d9), UINT64_C(0xb37d5564872bad98), UINT64_C(0xa6ebc74f1f555ba7), UINT64_C(0x62bea8c115da28e2), UINT64_C(0x77283aea8da4dedd), UINT64_C(0x49938c962527c49c), UINT64_C(0x5c051ebdbd5932a3), UINT64_C(0x34e4e06f7421f01e), UINT64_C(0x21727244ec5f0621), UINT64_C(0x1fc9c43844dc1c60), UINT64_C(0x0a5f5613dca2ea5f), UINT64_C(0x0b77681ffd99c8de), UINT64_C(0x1ee1fa3465e73ee1), UINT64_C(0x205a4c48cd6424a0), UINT64_C(0x35ccde63551ad29f), UINT64_C(0x5d2d20b19c621022), UINT64_C(0x48bbb29a041ce61d), UINT64_C(0x760004e6ac9ffc5c), UINT64_C(0x639696cd34e10a63), UINT64_C(0xa7c3f9433e6e7926), UINT64_C(0xb2556b68a6108f19), UINT64_C(0x8ceedd140e939558), UINT64_C(0x99784f3f96ed6367), UINT64_C(0xf199b1ed5f95a1da), UINT64_C(0xe40f23c6c7eb57e5), UINT64_C(0xdab495ba6f684da4), UINT64_C(0xcf220791f716bb9b), UINT64_C(0xc0c6e58dd578b5ab), UINT64_C(0xd55077a64d064394), UINT64_C(0xebebc1dae58559d5), UINT64_C(0xfe7d53f17dfbafea), UINT64_C(0x969cad23b4836d57), UINT64_C(0x830a3f082cfd9b68), UINT64_C(0xbdb18974847e8129), UINT64_C(0xa8271b5f1c007716), UINT64_C(0x6c7274d1168f0453), UINT64_C(0x79e4e6fa8ef1f26c), UINT64_C(0x475f50862672e82d), UINT64_C(0x52c9c2adbe0c1e12), UINT64_C(0x3a283c7f7774dcaf), UINT64_C(0x2fbeae54ef0a2a90), UINT64_C(0x11051828478930d1), UINT64_C(0x04938a03dff7c6ee), UINT64_C(0x0eccdc1003552cb1), UINT64_C(0x1b5a4e3b9b2bda8e), UINT64_C(0x25e1f84733a8c0cf), UINT64_C(0x30776a6cabd636f0), UINT64_C(0x589694be62aef44d), UINT64_C(0x4d000695fad00272), UINT64_C(0x73bbb0e952531833), UINT64_C(0x662d22c2ca2dee0c), UINT64_C(0xa2784d4cc0a29d49), UINT64_C(0xb7eedf6758dc6b76), UINT64_C(0x8955691bf05f7137), UINT64_C(0x9cc3fb3068218708), UINT64_C(0xf42205e2a15945b5), UINT64_C(0xe1b497c93927b38a), UINT64_C(0xdf0f21b591a4a9cb), UINT64_C(0xca99b39e09da5ff4), UINT64_C(0xc57d51822bb451c4), UINT64_C(0xd0ebc3a9b3caa7fb), UINT64_C(0xee5075d51b49bdba), UINT64_C(0xfbc6e7fe83374b85), UINT64_C(0x9327192c4a4f8938), UINT64_C(0x86b18b07d2317f07), UINT64_C(0xb80a3d7b7ab26546), UINT64_C(0xad9caf50e2cc9379), UINT64_C(0x69c9c0dee843e03c), UINT64_C(0x7c5f52f5703d1603), UINT64_C(0x42e4e489d8be0c42), UINT64_C(0x577276a240c0fa7d), UINT64_C(0x3f93887089b838c0), UINT64_C(0x2a051a5b11c6ceff), UINT64_C(0x14beac27b945d4be), UINT64_C(0x01283e0c213b2281), UINT64_C(0x16eed03ffb3391bc), UINT64_C(0x03784214634d6783), UINT64_C(0x3dc3f468cbce7dc2), UINT64_C(0x2855664353b08bfd), UINT64_C(0x40b498919ac84940), UINT64_C(0x55220aba02b6bf7f), UINT64_C(0x6b99bcc6aa35a53e), UINT64_C(0x7e0f2eed324b5301), UINT64_C(0xba5a416338c42044), UINT64_C(0xafccd348a0bad67b), UINT64_C(0x917765340839cc3a), UINT64_C(0x84e1f71f90473a05), UINT64_C(0xec0009cd593ff8b8), UINT64_C(0xf9969be6c1410e87), UINT64_C(0xc72d2d9a69c214c6), UINT64_C(0xd2bbbfb1f1bce2f9), UINT64_C(0xdd5f5dadd3d2ecc9), UINT64_C(0xc8c9cf864bac1af6), UINT64_C(0xf67279fae32f00b7), UINT64_C(0xe3e4ebd17b51f688), UINT64_C(0x8b051503b2293435), UINT64_C(0x9e9387282a57c20a), UINT64_C(0xa028315482d4d84b), UINT64_C(0xb5bea37f1aaa2e74), UINT64_C(0x71ebccf110255d31), UINT64_C(0x647d5eda885bab0e), UINT64_C(0x5ac6e8a620d8b14f), UINT64_C(0x4f507a8db8a64770), UINT64_C(0x27b1845f71de85cd), UINT64_C(0x32271674e9a073f2), UINT64_C(0x0c9ca008412369b3), UINT64_C(0x190a3223d95d9f8c), UINT64_C(0x1355643005ff75d3), UINT64_C(0x06c3f61b9d8183ec), UINT64_C(0x38784067350299ad), UINT64_C(0x2deed24cad7c6f92), UINT64_C(0x450f2c9e6404ad2f), UINT64_C(0x5099beb5fc7a5b10), UINT64_C(0x6e2208c954f94151), UINT64_C(0x7bb49ae2cc87b76e), UINT64_C(0xbfe1f56cc608c42b), UINT64_C(0xaa7767475e763214), UINT64_C(0x94ccd13bf6f52855), UINT64_C(0x815a43106e8bde6a), UINT64_C(0xe9bbbdc2a7f31cd7), UINT64_C(0xfc2d2fe93f8deae8), UINT64_C(0xc2969995970ef0a9), UINT64_C(0xd7000bbe0f700696), UINT64_C(0xd8e4e9a22d1e08a6), UINT64_C(0xcd727b89b560fe99), UINT64_C(0xf3c9cdf51de3e4d8), UINT64_C(0xe65f5fde859d12e7), UINT64_C(0x8ebea10c4ce5d05a), UINT64_C(0x9b283327d49b2665), UINT64_C(0xa593855b7c183c24), UINT64_C(0xb0051770e466ca1b), UINT64_C(0x745078feeee9b95e), UINT64_C(0x61c6ead576974f61), UINT64_C(0x5f7d5ca9de145520), UINT64_C(0x4aebce82466aa31f), UINT64_C(0x220a30508f1261a2), UINT64_C(0x379ca27b176c979d), UINT64_C(0x09271407bfef8ddc), UINT64_C(0x1cb1862c27917be3), UINT64_C(0x1d99b82006aa5962), UINT64_C(0x080f2a0b9ed4af5d), UINT64_C(0x36b49c773657b51c), UINT64_C(0x23220e5cae294323), UINT64_C(0x4bc3f08e6751819e), UINT64_C(0x5e5562a5ff2f77a1), UINT64_C(0x60eed4d957ac6de0), UINT64_C(0x757846f2cfd29bdf), UINT64_C(0xb12d297cc55de89a), UINT64_C(0xa4bbbb575d231ea5), UINT64_C(0x9a000d2bf5a004e4), UINT64_C(0x8f969f006ddef2db), UINT64_C(0xe77761d2a4a63066), UINT64_C(0xf2e1f3f93cd8c659), UINT64_C(0xcc5a4585945bdc18), UINT64_C(0xd9ccd7ae0c252a27), UINT64_C(0xd62835b22e4b2417), UINT64_C(0xc3bea799b635d228), UINT64_C(0xfd0511e51eb6c869), UINT64_C(0xe89383ce86c83e56), UINT64_C(0x80727d1c4fb0fceb), UINT64_C(0x95e4ef37d7ce0ad4), UINT64_C(0xab5f594b7f4d1095), UINT64_C(0xbec9cb60e733e6aa), UINT64_C(0x7a9ca4eeedbc95ef), UINT64_C(0x6f0a36c575c263d0), UINT64_C(0x51b180b9dd417991), UINT64_C(0x44271292453f8fae), UINT64_C(0x2cc6ec408c474d13), UINT64_C(0x39507e6b1439bb2c), UINT64_C(0x07ebc817bcbaa16d), UINT64_C(0x127d5a3c24c45752), UINT64_C(0x18220c2ff866bd0d), UINT64_C(0x0db49e0460184b32), UINT64_C(0x330f2878c89b5173), UINT64_C(0x2699ba5350e5a74c), UINT64_C(0x4e784481999d65f1), UINT64_C(0x5beed6aa01e393ce), UINT64_C(0x655560d6a960898f), UINT64_C(0x70c3f2fd311e7fb0), UINT64_C(0xb4969d733b910cf5), UINT64_C(0xa1000f58a3effaca), UINT64_C(0x9fbbb9240b6ce08b), UINT64_C(0x8a2d2b0f931216b4), UINT64_C(0xe2ccd5dd5a6ad409), UINT64_C(0xf75a47f6c2142236), UINT64_C(0xc9e1f18a6a973877), UINT64_C(0xdc7763a1f2e9ce48), UINT64_C(0xd39381bdd087c078), UINT64_C(0xc605139648f93647), UINT64_C(0xf8bea5eae07a2c06), UINT64_C(0xed2837c17804da39), UINT64_C(0x85c9c913b17c1884), UINT64_C(0x905f5b382902eebb), UINT64_C(0xaee4ed448181f4fa), UINT64_C(0xbb727f6f19ff02c5), UINT64_C(0x7f2710e113707180), UINT64_C(0x6ab182ca8b0e87bf), UINT64_C(0x540a34b6238d9dfe), UINT64_C(0x419ca69dbbf36bc1), UINT64_C(0x297d584f728ba97c), UINT64_C(0x3cebca64eaf55f43), UINT64_C(0x02507c1842764502), UINT64_C(0x17c6ee33da08b33d) }, { UINT64_C(0x0000000000000000), UINT64_C(0x2ddda07ff6672378), UINT64_C(0x5bbb40ffecce46f0), UINT64_C(0x7666e0801aa96588), UINT64_C(0xb77681ffd99c8de0), UINT64_C(0x9aab21802ffbae98), UINT64_C(0xeccdc1003552cb10), UINT64_C(0xc110617fc335e868), UINT64_C(0xfc35acd41c370545), UINT64_C(0xd1e80cabea50263d), UINT64_C(0xa78eec2bf0f943b5), UINT64_C(0x8a534c54069e60cd), UINT64_C(0x4b432d2bc5ab88a5), UINT64_C(0x669e8d5433ccabdd), UINT64_C(0x10f86dd42965ce55), UINT64_C(0x3d25cdabdf02ed2d), UINT64_C(0x6ab3f6839760140f), UINT64_C(0x476e56fc61073777), UINT64_C(0x3108b67c7bae52ff), UINT64_C(0x1cd516038dc97187), UINT64_C(0xddc5777c4efc99ef), UINT64_C(0xf018d703b89bba97), UINT64_C(0x867e3783a232df1f), UINT64_C(0xaba397fc5455fc67), UINT64_C(0x96865a578b57114a), UINT64_C(0xbb5bfa287d303232), UINT64_C(0xcd3d1aa8679957ba), UINT64_C(0xe0e0bad791fe74c2), UINT64_C(0x21f0dba852cb9caa), UINT64_C(0x0c2d7bd7a4acbfd2), UINT64_C(0x7a4b9b57be05da5a), UINT64_C(0x57963b284862f922), UINT64_C(0xd567ed072ec0281e), UINT64_C(0xf8ba4d78d8a70b66), UINT64_C(0x8edcadf8c20e6eee), UINT64_C(0xa3010d8734694d96), UINT64_C(0x62116cf8f75ca5fe), UINT64_C(0x4fcccc87013b8686), UINT64_C(0x39aa2c071b92e30e), UINT64_C(0x14778c78edf5c076), UINT64_C(0x295241d332f72d5b), UINT64_C(0x048fe1acc4900e23), UINT64_C(0x72e9012cde396bab), UINT64_C(0x5f34a153285e48d3), UINT64_C(0x9e24c02ceb6ba0bb), UINT64_C(0xb3f960531d0c83c3), UINT64_C(0xc59f80d307a5e64b), UINT64_C(0xe84220acf1c2c533), UINT64_C(0xbfd41b84b9a03c11), UINT64_C(0x9209bbfb4fc71f69), UINT64_C(0xe46f5b7b556e7ae1), UINT64_C(0xc9b2fb04a3095999), UINT64_C(0x08a29a7b603cb1f1), UINT64_C(0x257f3a04965b9289), UINT64_C(0x5319da848cf2f701), UINT64_C(0x7ec47afb7a95d479), UINT64_C(0x43e1b750a5973954), UINT64_C(0x6e3c172f53f01a2c), UINT64_C(0x185af7af49597fa4), UINT64_C(0x358757d0bf3e5cdc), UINT64_C(0xf49736af7c0bb4b4), UINT64_C(0xd94a96d08a6c97cc), UINT64_C(0xaf2c765090c5f244), UINT64_C(0x82f1d62f66a2d13c), UINT64_C(0x38177525f28e4eb9), UINT64_C(0x15cad55a04e96dc1), UINT64_C(0x63ac35da1e400849), UINT64_C(0x4e7195a5e8272b31), UINT64_C(0x8f61f4da2b12c359), UINT64_C(0xa2bc54a5dd75e021), UINT64_C(0xd4dab425c7dc85a9), UINT64_C(0xf907145a31bba6d1), UINT64_C(0xc422d9f1eeb94bfc), UINT64_C(0xe9ff798e18de6884), UINT64_C(0x9f99990e02770d0c), UINT64_C(0xb2443971f4102e74), UINT64_C(0x7354580e3725c61c), UINT64_C(0x5e89f871c142e564), UINT64_C(0x28ef18f1dbeb80ec), UINT64_C(0x0532b88e2d8ca394), UINT64_C(0x52a483a665ee5ab6), UINT64_C(0x7f7923d9938979ce), UINT64_C(0x091fc35989201c46), UINT64_C(0x24c263267f473f3e), UINT64_C(0xe5d20259bc72d756), UINT64_C(0xc80fa2264a15f42e), UINT64_C(0xbe6942a650bc91a6), UINT64_C(0x93b4e2d9a6dbb2de), UINT64_C(0xae912f7279d95ff3), UINT64_C(0x834c8f0d8fbe7c8b), UINT64_C(0xf52a6f8d95171903), UINT64_C(0xd8f7cff263703a7b), UINT64_C(0x19e7ae8da045d213), UINT64_C(0x343a0ef25622f16b), UINT64_C(0x425cee724c8b94e3), UINT64_C(0x6f814e0dbaecb79b), UINT64_C(0xed709822dc4e66a7), UINT64_C(0xc0ad385d2a2945df), UINT64_C(0xb6cbd8dd30802057), UINT64_C(0x9b1678a2c6e7032f), UINT64_C(0x5a0619dd05d2eb47), UINT64_C(0x77dbb9a2f3b5c83f), UINT64_C(0x01bd5922e91cadb7), UINT64_C(0x2c60f95d1f7b8ecf), UINT64_C(0x114534f6c07963e2), UINT64_C(0x3c989489361e409a), UINT64_C(0x4afe74092cb72512), UINT64_C(0x6723d476dad0066a), UINT64_C(0xa633b50919e5ee02), UINT64_C(0x8bee1576ef82cd7a), UINT64_C(0xfd88f5f6f52ba8f2), UINT64_C(0xd0555589034c8b8a), UINT64_C(0x87c36ea14b2e72a8), UINT64_C(0xaa1ecedebd4951d0), UINT64_C(0xdc782e5ea7e03458), UINT64_C(0xf1a58e2151871720), UINT64_C(0x30b5ef5e92b2ff48), UINT64_C(0x1d684f2164d5dc30), UINT64_C(0x6b0eafa17e7cb9b8), UINT64_C(0x46d30fde881b9ac0), UINT64_C(0x7bf6c275571977ed), UINT64_C(0x562b620aa17e5495), UINT64_C(0x204d828abbd7311d), UINT64_C(0x0d9022f54db01265), UINT64_C(0xcc80438a8e85fa0d), UINT64_C(0xe15de3f578e2d975), UINT64_C(0x973b0375624bbcfd), UINT64_C(0xbae6a30a942c9f85), UINT64_C(0x702eea4be51c9d72), UINT64_C(0x5df34a34137bbe0a), UINT64_C(0x2b95aab409d2db82), UINT64_C(0x06480acbffb5f8fa), UINT64_C(0xc7586bb43c801092), UINT64_C(0xea85cbcbcae733ea), UINT64_C(0x9ce32b4bd04e5662), UINT64_C(0xb13e8b342629751a), UINT64_C(0x8c1b469ff92b9837), UINT64_C(0xa1c6e6e00f4cbb4f), UINT64_C(0xd7a0066015e5dec7), UINT64_C(0xfa7da61fe382fdbf), UINT64_C(0x3b6dc76020b715d7), UINT64_C(0x16b0671fd6d036af), UINT64_C(0x60d6879fcc795327), UINT64_C(0x4d0b27e03a1e705f), UINT64_C(0x1a9d1cc8727c897d), UINT64_C(0x3740bcb7841baa05), UINT64_C(0x41265c379eb2cf8d), UINT64_C(0x6cfbfc4868d5ecf5), UINT64_C(0xadeb9d37abe0049d), UINT64_C(0x80363d485d8727e5), UINT64_C(0xf650ddc8472e426d), UINT64_C(0xdb8d7db7b1496115), UINT64_C(0xe6a8b01c6e4b8c38), UINT64_C(0xcb751063982caf40), UINT64_C(0xbd13f0e38285cac8), UINT64_C(0x90ce509c74e2e9b0), UINT64_C(0x51de31e3b7d701d8), UINT64_C(0x7c03919c41b022a0), UINT64_C(0x0a65711c5b194728), UINT64_C(0x27b8d163ad7e6450), UINT64_C(0xa549074ccbdcb56c), UINT64_C(0x8894a7333dbb9614), UINT64_C(0xfef247b32712f39c), UINT64_C(0xd32fe7ccd175d0e4), UINT64_C(0x123f86b31240388c), UINT64_C(0x3fe226cce4271bf4), UINT64_C(0x4984c64cfe8e7e7c), UINT64_C(0x6459663308e95d04), UINT64_C(0x597cab98d7ebb029), UINT64_C(0x74a10be7218c9351), UINT64_C(0x02c7eb673b25f6d9), UINT64_C(0x2f1a4b18cd42d5a1), UINT64_C(0xee0a2a670e773dc9), UINT64_C(0xc3d78a18f8101eb1), UINT64_C(0xb5b16a98e2b97b39), UINT64_C(0x986ccae714de5841), UINT64_C(0xcffaf1cf5cbca163), UINT64_C(0xe22751b0aadb821b), UINT64_C(0x9441b130b072e793), UINT64_C(0xb99c114f4615c4eb), UINT64_C(0x788c703085202c83), UINT64_C(0x5551d04f73470ffb), UINT64_C(0x233730cf69ee6a73), UINT64_C(0x0eea90b09f89490b), UINT64_C(0x33cf5d1b408ba426), UINT64_C(0x1e12fd64b6ec875e), UINT64_C(0x68741de4ac45e2d6), UINT64_C(0x45a9bd9b5a22c1ae), UINT64_C(0x84b9dce4991729c6), UINT64_C(0xa9647c9b6f700abe), UINT64_C(0xdf029c1b75d96f36), UINT64_C(0xf2df3c6483be4c4e), UINT64_C(0x48399f6e1792d3cb), UINT64_C(0x65e43f11e1f5f0b3), UINT64_C(0x1382df91fb5c953b), UINT64_C(0x3e5f7fee0d3bb643), UINT64_C(0xff4f1e91ce0e5e2b), UINT64_C(0xd292beee38697d53), UINT64_C(0xa4f45e6e22c018db), UINT64_C(0x8929fe11d4a73ba3), UINT64_C(0xb40c33ba0ba5d68e), UINT64_C(0x99d193c5fdc2f5f6), UINT64_C(0xefb77345e76b907e), UINT64_C(0xc26ad33a110cb306), UINT64_C(0x037ab245d2395b6e), UINT64_C(0x2ea7123a245e7816), UINT64_C(0x58c1f2ba3ef71d9e), UINT64_C(0x751c52c5c8903ee6), UINT64_C(0x228a69ed80f2c7c4), UINT64_C(0x0f57c9927695e4bc), UINT64_C(0x793129126c3c8134), UINT64_C(0x54ec896d9a5ba24c), UINT64_C(0x95fce812596e4a24), UINT64_C(0xb821486daf09695c), UINT64_C(0xce47a8edb5a00cd4), UINT64_C(0xe39a089243c72fac), UINT64_C(0xdebfc5399cc5c281), UINT64_C(0xf36265466aa2e1f9), UINT64_C(0x850485c6700b8471), UINT64_C(0xa8d925b9866ca709), UINT64_C(0x69c944c645594f61), UINT64_C(0x4414e4b9b33e6c19), UINT64_C(0x32720439a9970991), UINT64_C(0x1fafa4465ff02ae9), UINT64_C(0x9d5e72693952fbd5), UINT64_C(0xb083d216cf35d8ad), UINT64_C(0xc6e53296d59cbd25), UINT64_C(0xeb3892e923fb9e5d), UINT64_C(0x2a28f396e0ce7635), UINT64_C(0x07f553e916a9554d), UINT64_C(0x7193b3690c0030c5), UINT64_C(0x5c4e1316fa6713bd), UINT64_C(0x616bdebd2565fe90), UINT64_C(0x4cb67ec2d302dde8), UINT64_C(0x3ad09e42c9abb860), UINT64_C(0x170d3e3d3fcc9b18), UINT64_C(0xd61d5f42fcf97370), UINT64_C(0xfbc0ff3d0a9e5008), UINT64_C(0x8da61fbd10373580), UINT64_C(0xa07bbfc2e65016f8), UINT64_C(0xf7ed84eaae32efda), UINT64_C(0xda3024955855cca2), UINT64_C(0xac56c41542fca92a), UINT64_C(0x818b646ab49b8a52), UINT64_C(0x409b051577ae623a), UINT64_C(0x6d46a56a81c94142), UINT64_C(0x1b2045ea9b6024ca), UINT64_C(0x36fde5956d0707b2), UINT64_C(0x0bd8283eb205ea9f), UINT64_C(0x260588414462c9e7), UINT64_C(0x506368c15ecbac6f), UINT64_C(0x7dbec8bea8ac8f17), UINT64_C(0xbcaea9c16b99677f), UINT64_C(0x917309be9dfe4407), UINT64_C(0xe715e93e8757218f), UINT64_C(0xcac84941713002f7) } }; uint64_t crc64_slow(const void *input, size_t nbytes) { const unsigned char *data = (const unsigned char*) input; uint64_t cs = UINT64_C(0xffffffffffffffff); while (nbytes--) { uint32_t idx = ((uint32_t) (cs ^ *data++)) & 0xff; cs = crc64_table[3][idx] ^ (cs >> 8); } return cs ^ UINT64_C(0xffffffffffffffff); } // Loads an input 32-bit word in little-endian order from a big-endian machine. __host__ __device__ static inline uint32_t crc64_load_le32_(const uint32_t *p) { uint32_t w = *p; return ((((w) & 0xff000000) >> 24) | (((w) & 0x00ff0000) >> 8) | (((w) & 0x0000ff00) << 8) | (((w) & 0x000000ff) << 24)); } // A parallel multiword interleaved algorithm with a word size of 4 bytes // and a stride factor of 5. uint64_t crc64(const void *input, size_t nbytes) { const unsigned char *data = (const unsigned char*) input; const unsigned char *end = data + nbytes; uint64_t cs[5] = { UINT64_C(0xffffffffffffffff), 0, 0, 0, 0 }; // Process byte-by-byte until proper alignment is attained. // In the inner loop, we process 5 4-byte words (20 bytes in total) // per iteration. If the amount of data remaining is small, // then we also use the slow algorithm. while (data < end && ((((size_t) data) & 3) || (end - data < 20))) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8); } if (data == end) return cs[0] ^ UINT64_C(0xffffffffffffffff); const uint32_t one = 1; bool big_endian = !(*((char *)(&one))); uint64_t cry = 0; uint32_t in[5]; if (!big_endian) { for (unsigned i = 0; i < 5; ++i) in[i] = ((const uint32_t*) data)[i]; data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = crc64_interleaved_table[0][in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = crc64_interleaved_table[0][in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = crc64_interleaved_table[0][in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = crc64_interleaved_table[0][in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = crc64_interleaved_table[0][in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff]; in[0] >>= 8; cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff]; in[1] >>= 8; cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff]; in[2] >>= 8; cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff]; in[3] >>= 8; cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff]; in[4] >>= 8; } cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff]; in[0] = ((const uint32_t*) data)[0]; cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff]; in[1] = ((const uint32_t*) data)[1]; cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff]; in[2] = ((const uint32_t*) data)[2]; cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff]; in[3] = ((const uint32_t*) data)[3]; cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff]; in[4] = ((const uint32_t*) data)[4]; } } else { for (unsigned i = 0; i < 5; ++i) { in[i] = crc64_load_le32_(&((const uint32_t*) data)[i]); } data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = crc64_interleaved_table[0][in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = crc64_interleaved_table[0][in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = crc64_interleaved_table[0][in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = crc64_interleaved_table[0][in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = crc64_interleaved_table[0][in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff]; in[0] >>= 8; cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff]; in[1] >>= 8; cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff]; in[2] >>= 8; cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff]; in[3] >>= 8; cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff]; in[4] >>= 8; } cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff]; in[0] = crc64_load_le32_(&((const uint32_t*) data)[0]); cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff]; in[1] = crc64_load_le32_(&((const uint32_t*) data)[1]); cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff]; in[2] = crc64_load_le32_(&((const uint32_t*) data)[2]); cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff]; in[3] = crc64_load_le32_(&((const uint32_t*) data)[3]); cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff]; in[4] = crc64_load_le32_(&((const uint32_t*) data)[4]); } } cs[0] ^= cry; for (unsigned i = 0; i < 5; ++i) { if (i > 0) cs[0] ^= cs[i]; in[i] ^= (uint32_t) cs[0]; cs[0] = cs[0] >> 32; for (unsigned b = 0; b < 3; ++b) { cs[0] ^= crc64_table[b][in[i] & 0xff]; in[i] >>= 8; } cs[0] ^= crc64_table[3][in[i] & 0xff]; } while (data < end) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8); } return cs[0] ^ UINT64_C(0xffffffffffffffff); } __device__ uint64_t crc64_device(const unsigned char *input, size_t nbytes, const uint64_t *d_crc64_table, const uint64_t *d_crc64_interleaved_table) { const unsigned char *data = input; const unsigned char *end = data + nbytes; uint64_t cs[5] = { UINT64_C(0xffffffffffffffff), 0, 0, 0, 0 }; // Process byte-by-byte until proper alignment is attained. // In the inner loop, we process 5 4-byte words (20 bytes in total) // per iteration. If the amount of data remaining is small, // then we also use the slow algorithm. while (data < end && ((((size_t) data) & 3) || (end - data < 20))) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = d_crc64_table[3*256+idx] ^ (cs[0] >> 8); } if (data == end) return cs[0] ^ UINT64_C(0xffffffffffffffff); const uint32_t one = 1; bool big_endian = !(*((char *)(&one))); uint64_t cry = 0; uint32_t in[5]; if (!big_endian) { for (unsigned i = 0; i < 5; ++i) in[i] = ((const uint32_t*) data)[i]; data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = d_crc64_interleaved_table[in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = d_crc64_interleaved_table[in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = d_crc64_interleaved_table[in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = d_crc64_interleaved_table[in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = d_crc64_interleaved_table[in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= d_crc64_interleaved_table[b*256+(in[0] & 0xff)]; in[0] >>= 8; cs[1] ^= d_crc64_interleaved_table[b*256+(in[1] & 0xff)]; in[1] >>= 8; cs[2] ^= d_crc64_interleaved_table[b*256+(in[2] & 0xff)]; in[2] >>= 8; cs[3] ^= d_crc64_interleaved_table[b*256+(in[3] & 0xff)]; in[3] >>= 8; cs[4] ^= d_crc64_interleaved_table[b*256+(in[4] & 0xff)]; in[4] >>= 8; } cs[0] ^= d_crc64_interleaved_table[3*256+(in[0] & 0xff)]; in[0] = ((const uint32_t*) data)[0]; cs[1] ^= d_crc64_interleaved_table[3*256+(in[1] & 0xff)]; in[1] = ((const uint32_t*) data)[1]; cs[2] ^= d_crc64_interleaved_table[3*256+(in[2] & 0xff)]; in[2] = ((const uint32_t*) data)[2]; cs[3] ^= d_crc64_interleaved_table[3*256+(in[3] & 0xff)]; in[3] = ((const uint32_t*) data)[3]; cs[4] ^= d_crc64_interleaved_table[3*256+(in[4] & 0xff)]; in[4] = ((const uint32_t*) data)[4]; } } else { for (unsigned i = 0; i < 5; ++i) { in[i] = crc64_load_le32_(&((const uint32_t*) data)[i]); } data += 20; for (; end - data >= 20; data += 20) { cs[0] ^= cry; in[0] ^= (uint32_t) cs[0]; cs[1] ^= cs[0] >> 32; cs[0] = d_crc64_interleaved_table[in[0] & 0xff]; in[0] >>= 8; in[1] ^= (uint32_t) cs[1]; cs[2] ^= cs[1] >> 32; cs[1] = d_crc64_interleaved_table[in[1] & 0xff]; in[1] >>= 8; in[2] ^= (uint32_t) cs[2]; cs[3] ^= cs[2] >> 32; cs[2] = d_crc64_interleaved_table[in[2] & 0xff]; in[2] >>= 8; in[3] ^= (uint32_t) cs[3]; cs[4] ^= cs[3] >> 32; cs[3] = d_crc64_interleaved_table[in[3] & 0xff]; in[3] >>= 8; in[4] ^= (uint32_t) cs[4]; cry = cs[4] >> 32; cs[4] = d_crc64_interleaved_table[in[4] & 0xff]; in[4] >>= 8; for (unsigned b = 1; b < 3; ++b) { cs[0] ^= d_crc64_interleaved_table[b*256+(in[0] & 0xff)]; in[0] >>= 8; cs[1] ^= d_crc64_interleaved_table[b*256+(in[1] & 0xff)]; in[1] >>= 8; cs[2] ^= d_crc64_interleaved_table[b*256+(in[2] & 0xff)]; in[2] >>= 8; cs[3] ^= d_crc64_interleaved_table[b*256+(in[3] & 0xff)]; in[3] >>= 8; cs[4] ^= d_crc64_interleaved_table[b*256+(in[4] & 0xff)]; in[4] >>= 8; } cs[0] ^= d_crc64_interleaved_table[3*256+(in[0] & 0xff)]; in[0] = crc64_load_le32_(&((const uint32_t*) data)[0]); cs[1] ^= d_crc64_interleaved_table[3*256+(in[1] & 0xff)]; in[1] = crc64_load_le32_(&((const uint32_t*) data)[1]); cs[2] ^= d_crc64_interleaved_table[3*256+(in[2] & 0xff)]; in[2] = crc64_load_le32_(&((const uint32_t*) data)[2]); cs[3] ^= d_crc64_interleaved_table[3*256+(in[3] & 0xff)]; in[3] = crc64_load_le32_(&((const uint32_t*) data)[3]); cs[4] ^= d_crc64_interleaved_table[3*256+(in[4] & 0xff)]; in[4] = crc64_load_le32_(&((const uint32_t*) data)[4]); } } cs[0] ^= cry; for (unsigned i = 0; i < 5; ++i) { if (i > 0) cs[0] ^= cs[i]; in[i] ^= (uint32_t) cs[0]; cs[0] = cs[0] >> 32; for (unsigned b = 0; b < 3; ++b) { cs[0] ^= d_crc64_table[b*256+(in[i] & 0xff)]; in[i] >>= 8; } cs[0] ^= d_crc64_table[3*256+(in[i] & 0xff)]; } while (data < end) { uint32_t idx = ((uint32_t) (cs[0] ^ *data++)) & 0xff; cs[0] = d_crc64_table[3*256+idx] ^ (cs[0] >> 8); } return cs[0] ^ UINT64_C(0xffffffffffffffff); } // Calculate the 'check bytes' for the provided checksum. If these bytes are // appended to the original buffer, then the new total checksum should be -1. void crc64_invert(uint64_t cs, void *check_bytes) { unsigned char *bytes = (unsigned char *) check_bytes; cs ^= UINT64_C(0xffffffffffffffff); // The CRC is self-inverting (in big-endian, so the bit-reversed CRC is // self-inverting in little-endian). bytes[7] = (cs >> 56) & 0xff; bytes[6] = (cs >> 48) & 0xff; bytes[5] = (cs >> 40) & 0xff; bytes[4] = (cs >> 32) & 0xff; bytes[3] = (cs >> 24) & 0xff; bytes[2] = (cs >> 16) & 0xff; bytes[1] = (cs >> 8) & 0xff; bytes[0] = cs & 0xff; } static const uint64_t crc64_x_pow_2n[64] = { UINT64_C(0x4000000000000000), UINT64_C(0x2000000000000000), UINT64_C(0x0800000000000000), UINT64_C(0x0080000000000000), UINT64_C(0x0000800000000000), UINT64_C(0x0000000080000000), UINT64_C(0xc96c5795d7870f42), UINT64_C(0x6d5f4ad7e3c3afa0), UINT64_C(0xd49f7e445077d8ea), UINT64_C(0x040fb02a53c216fa), UINT64_C(0x6bec35957b9ef3a0), UINT64_C(0xb0e3bb0658964afe), UINT64_C(0x218578c7a2dff638), UINT64_C(0x6dbb920f24dd5cf2), UINT64_C(0x7a140cfcdb4d5eb5), UINT64_C(0x41b3705ecbc4057b), UINT64_C(0xd46ab656accac1ea), UINT64_C(0x329beda6fc34fb73), UINT64_C(0x51a4fcd4350b9797), UINT64_C(0x314fa85637efae9d), UINT64_C(0xacf27e9a1518d512), UINT64_C(0xffe2a3388a4d8ce7), UINT64_C(0x48b9697e60cc2e4e), UINT64_C(0xada73cb78dd62460), UINT64_C(0x3ea5454d8ce5c1bb), UINT64_C(0x5e84e3a6c70feaf1), UINT64_C(0x90fd49b66cbd81d1), UINT64_C(0xe2943e0c1db254e8), UINT64_C(0xecfa6adeca8834a1), UINT64_C(0xf513e212593ee321), UINT64_C(0xf36ae57331040916), UINT64_C(0x63fbd333b87b6717), UINT64_C(0xbd60f8e152f50b8b), UINT64_C(0xa5ce4a8299c1567d), UINT64_C(0x0bd445f0cbdb55ee), UINT64_C(0xfdd6824e20134285), UINT64_C(0xcead8b6ebda2227a), UINT64_C(0xe44b17e4f5d4fb5c), UINT64_C(0x9b29c81ad01ca7c5), UINT64_C(0x1b4366e40fea4055), UINT64_C(0x27bca1551aae167b), UINT64_C(0xaa57bcd1b39a5690), UINT64_C(0xd7fce83fa1234db9), UINT64_C(0xcce4986efea3ff8e), UINT64_C(0x3602a4d9e65341f1), UINT64_C(0x722b1da2df516145), UINT64_C(0xecfc3ddd3a08da83), UINT64_C(0x0fb96dcca83507e6), UINT64_C(0x125f2fe78d70f080), UINT64_C(0x842f50b7651aa516), UINT64_C(0x09bc34188cd9836f), UINT64_C(0xf43666c84196d909), UINT64_C(0xb56feb30c0df6ccb), UINT64_C(0xaa66e04ce7f30958), UINT64_C(0xb7b1187e9af29547), UINT64_C(0x113255f8476495de), UINT64_C(0x8fb19f783095d77e), UINT64_C(0xaec4aacc7c82b133), UINT64_C(0xf64e6d09218428cf), UINT64_C(0x036a72ea5ac258a0), UINT64_C(0x5235ef12eb7aaa6a), UINT64_C(0x2fed7b1685657853), UINT64_C(0x8ef8951d46606fb5), UINT64_C(0x9d58c1090f034d14) }; // Compute (a*b) mod P // See: https://code.google.com/p/crcutil/source/browse/code/gf_util.h static inline uint64_t crc64_multiply_(uint64_t a, uint64_t b) { if ((a ^ (a-1)) < (b ^ (b-1))) { uint64_t t = a; a = b; b = t; } if (a == 0) return 0; uint64_t r = 0, h = UINT64_C(1) << 63; for (; a != 0; a <<= 1) { if (a & h) { r ^= b; a ^= h; } b = (b >> 1) ^ ((b & 1) ? crc64_poly : 0); } return r; } // Compute x**n mod P static inline uint64_t crc64_x_pow_n_(uint64_t n) { uint64_t r = UINT64_C(1) << 63; for (size_t i = 0; n != 0; n >>= 1, ++i) { if (n & 1) r = crc64_multiply_(r, crc64_x_pow_2n[i]); } return r; } uint64_t crc64_combine(uint64_t cs1, uint64_t cs2, size_t nbytes2) { // For M = CONCAT(M1, M2) => CRC(M, a) = CRC(M2, CRC(M1, a)) and: // CRC(M, b) = CRC(M, a) + ((b-a)x^|M|) mod P. return cs2 ^ crc64_multiply_(cs1, crc64_x_pow_n_(8*nbytes2)); } static const size_t crc64_min_thread_bytes = 1024; __global__ void crc64_kernel(size_t *d_thread_sz, uint64_t *d_thread_cs, const unsigned char* d_data, const uint64_t *d_crc64_table, const uint64_t *d_crc64_interleaved_table, size_t nbytes, int nthreads) { int tid = blockIdx.x * blockDim.x + threadIdx.x; size_t bpt = nbytes/nthreads; const unsigned char *start = d_data + bpt*tid; const unsigned char *end; if (tid != nthreads - 1) end = start + bpt; else end = d_data + nbytes; size_t sz = end - start; d_thread_sz[tid] = sz; d_thread_cs[tid] = crc64_device(start, sz, d_crc64_table, d_crc64_interleaved_table); } uint64_t crc64_parallel(const void *input, size_t nbytes) { if (nbytes > 2*crc64_min_thread_bytes) { int nthreads = 96*8*32; if (nbytes < nthreads*crc64_min_thread_bytes) nthreads = nbytes/crc64_min_thread_bytes; uint64_t thread_cs[nthreads]; size_t thread_sz[nthreads]; const unsigned char *data = (const unsigned char*) input; uint64_t *d_thread_cs; uint64_t *d_crc64_table; uint64_t *d_crc64_interleaved_table; size_t *d_thread_sz; unsigned char *d_data; cudaMalloc((void**)&d_thread_sz, nthreads * sizeof(size_t)); cudaMalloc((void**)&d_thread_cs, nthreads * sizeof(uint64_t)); cudaMalloc((void**)&d_data, nbytes); cudaMalloc((void**)&d_crc64_table, 4*256*sizeof(uint64_t)); cudaMalloc((void**)&d_crc64_interleaved_table, 4*256*sizeof(uint64_t)); cudaMemcpy(d_data, data, nbytes , cudaMemcpyHostToDevice); cudaMemcpy(d_crc64_table, crc64_table, 4*256*sizeof(uint64_t) , cudaMemcpyHostToDevice); cudaMemcpy(d_crc64_interleaved_table, crc64_interleaved_table, 4*256*sizeof(uint64_t) , cudaMemcpyHostToDevice); crc64_kernel<<<nthreads/64, 64>>>(d_thread_sz, d_thread_cs, d_data, d_crc64_table, d_crc64_interleaved_table, nbytes, nthreads); cudaMemcpy(thread_sz, d_thread_sz, nthreads * sizeof(size_t), cudaMemcpyDeviceToHost); cudaMemcpy(thread_cs, d_thread_cs, nthreads * sizeof(uint64_t), cudaMemcpyDeviceToHost); uint64_t cs = thread_cs[0]; for (int i = 1; i < nthreads; ++i) { cs = crc64_combine(cs, thread_cs[i], thread_sz[i]); } cudaFree(d_thread_sz); cudaFree(d_thread_cs); cudaFree(d_data); cudaFree(d_crc64_table); cudaFree(d_crc64_interleaved_table); return cs; } return crc64(input, nbytes); }
the_stack
template <typename T, typename S, typename G> __global__ void MomentumUpdateVariableKernel(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient, const S *momentum, bool use_nesterov) { if (use_nesterov) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = momentum[0] * accumulation[i] + gradient[i]; variable[i] -= gradient[i] * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0]; } } else { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = momentum[0] * accumulation[i] + gradient[i]; variable[i] -= learning_rate[0] * accumulation[i]; } } } template <> __global__ void MomentumUpdateVariableKernel(const size_t size, half *variable, half *accumulation, const float *learning_rate, const half *gradient, const float *momentum, bool use_nesterov) { if (use_nesterov) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i]; variable[i] -= gradient[i] * __float2half(learning_rate[0]) + accumulation[i] * __float2half(momentum[0]) * __float2half(learning_rate[0]); } } else { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i]; variable[i] -= __float2half(learning_rate[0]) * accumulation[i]; } } } template <> __global__ void MomentumUpdateVariableKernel(const size_t size, float *variable, float *accumulation, const float *learning_rate, const half *gradient, const float *momentum, bool use_nesterov) { if (use_nesterov) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]); variable[i] -= __half2float(gradient[i]) * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0]; } } else { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) { accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]); variable[i] -= learning_rate[0] * accumulation[i]; } } } template <typename T, typename S, typename G> void MomentumUpdateVariable(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient, const S *momentum, bool use_nesterov, cudaStream_t cuda_stream) { MomentumUpdateVariableKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>( size, variable, accumulation, learning_rate, gradient, momentum, use_nesterov); } template <typename T, typename S> __global__ void FusedMomentumWeightDecayScaleKernel(const size_t element_num, T *weight_decay, T *scale, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) { T grad = (variable[i] * weight_decay[0] + static_cast<T>(gradient[i])) * scale[0]; accumulation[i] = momentum[0] * accumulation[i] + grad; variable[i] -= learning_rate[0] * accumulation[i]; } } template <typename T, typename S> void FusedWeightDecayScaleMomentum(const size_t element_num, T *weight_decay, T *scale, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum, cudaStream_t cuda_stream) { size_t thread_per_block = 256; size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block; FusedMomentumWeightDecayScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>( element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum); } template <typename T, typename S> __global__ void FusedMomentumScaleKernel(const size_t element_num, T *scale, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) { accumulation[i] = momentum[0] * accumulation[i] + static_cast<T>(gradient[i]) * scale[0]; variable[i] -= learning_rate[0] * accumulation[i]; } } template <typename T, typename S> void FusedScaleMomentum(const size_t element_num, T *scale, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum, cudaStream_t cuda_stream) { size_t thread_per_block = 256; size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block; FusedMomentumScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>( element_num, scale, variable, accumulation, learning_rate, gradient, momentum); } template <typename T, typename S> __global__ void FusedWeightDecayMomentumKernel(const size_t element_num, T *weight_decay, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) { T grad = variable[i] * weight_decay[0] + static_cast<T>(gradient[i]); accumulation[i] = momentum[0] * accumulation[i] + grad; variable[i] -= learning_rate[0] * accumulation[i]; } } template <typename T, typename S> void FusedWeightDecayMomentum(const size_t element_num, T *weight_decay, T *variable, T *accumulation, const T *learning_rate, const S *gradient, const T *momentum, cudaStream_t cuda_stream) { size_t thread_per_block = 256; size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block; FusedWeightDecayMomentumKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>( element_num, weight_decay, variable, accumulation, learning_rate, gradient, momentum); } // CombineFusedScaleMomentum template <typename T, typename S> __global__ void CombineFusedMomentumScaleKernel(const size_t num, const size_t *element_num, T **scale, T **variable, T **accumulation, T **learning_rate, S **gradient, T **momentum) { for (size_t idx = 0; idx < num; idx++) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) { accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + static_cast<T>(gradient[idx][i]) * scale[idx][0]; variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i]; } } } template <typename T, typename S> void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, T **scale, T **variable, T **accumulation, T **learning_rate, S **gradient, T **momentum, cudaStream_t cuda_stream) { size_t thread_per_block = 256; size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block; CombineFusedMomentumScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>( num, elements, scale, variable, accumulation, learning_rate, gradient, momentum); } // end CombineFusedScaleMomentum // CombineFusedWeightDecayScaleMomentum template <typename T, typename S> __global__ void CombineFusedMomentumWeightDecayScaleKernel(const size_t num, const size_t *element_num, T **weight_decay, T **scale, T **variable, T **accumulation, T **learning_rate, S **gradient, T **momentum) { for (size_t idx = 0; idx < num; idx++) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) { T grad = (variable[idx][i] * weight_decay[idx][0] + static_cast<T>(gradient[idx][i])) * scale[idx][0]; accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + grad; variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i]; } } } template <typename T, typename S> void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *element_num, T **weight_decay, T **scale, T **variable, T **accumulation, T **learning_rate, S **gradient, T **momentum, cudaStream_t cuda_stream) { size_t thread_per_block = 256; size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block; CombineFusedMomentumWeightDecayScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>( num, element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum); } // end CombineFusedWeightDecayScaleMomentum template void MomentumUpdateVariable<float, float, float>(const size_t size, float *variable, float *accumulation, const float *learning_rate, const float *gradient, const float *momentum, bool use_nesterov, cudaStream_t cuda_stream); template void MomentumUpdateVariable<half, half, half>(const size_t size, half *variable, half *accumulation, const half *learning_rate, const half *gradient, const half *momentum, bool use_nesterov, cudaStream_t cuda_stream); template void MomentumUpdateVariable<half, float, half>(const size_t size, half *variable, half *accumulation, const float *learning_rate, const half *gradient, const float *momentum, bool use_nesterov, cudaStream_t cuda_stream); template void MomentumUpdateVariable<float, float, half>(const size_t size, float *variable, float *accumulation, const float *learning_rate, const half *gradient, const float *momentum, bool use_nesterov, cudaStream_t cuda_stream); template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale, float *variable, float *accumulation, const float *learning_rate, const float *gradient, const float *momentum, cudaStream_t cuda_stream); template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale, float *variable, float *accumulation, const float *learning_rate, const half *gradient, const float *momentum, cudaStream_t cuda_stream); template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable, float *accumulation, const float *learning_rate, const float *gradient, const float *momentum, cudaStream_t cuda_stream); template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable, float *accumulation, const float *learning_rate, const half *gradient, const float *momentum, cudaStream_t cuda_stream); template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation, const float *learning_rate, const float *gradient, const float *momentum, cudaStream_t cuda_stream); template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation, const float *learning_rate, const half *gradient, const float *momentum, cudaStream_t cuda_stream); template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable, float **accumulation, float **learning_rate, float **gradient, float **momentum, cudaStream_t cuda_stream); template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable, float **accumulation, float **learning_rate, half **gradient, float **momentum, cudaStream_t cuda_stream); template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale, float **variable, float **accumulation, float **learning_rate, float **gradient, float **momentum, cudaStream_t cuda_stream); template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale, float **variable, float **accumulation, float **learning_rate, half **gradient, float **momentum, cudaStream_t cuda_stream);
the_stack
#include "cublas_v2.h" #include "cusolverDn.h" #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/gather.h> #include <thrust/sort.h> #include <cmath> #include "_reg_maths.h" #include "_reg_tools.h" #include "_reg_blockMatching.h" #define IDX2C(i,j,ld) (((j)*(ld))+(i)) /* *************************************************************** */ template<class DTYPE> __device__ __inline__ void reg_mat44_mul_cuda(float* mat, DTYPE const* in, DTYPE *out) { out[0] = (DTYPE)((double)mat[0 * 4 + 0] * (double)in[0] + (double)mat[0 * 4 + 1] * (double)in[1] + (double)mat[0 * 4 + 2] * (double)in[2] + (double)mat[0 * 4 + 3]); out[1] = (DTYPE)((double)mat[1 * 4 + 0] * (double)in[0] + (double)mat[1 * 4 + 1] * (double)in[1] + (double)mat[1 * 4 + 2] * (double)in[2] + (double)mat[1 * 4 + 3]); out[2] = (DTYPE)((double)mat[2 * 4 + 0] * (double)in[0] + (double)mat[2 * 4 + 1] * (double)in[1] + (double)mat[2 * 4 + 2] * (double)in[2] + (double)mat[2 * 4 + 3]); return; } /* *************************************************************** */ __device__ double getSquareDistance3Dcu(float * first_point3D, float * second_point3D) { return sqrt(((double)first_point3D[0] - (double)second_point3D[0]) * ((double)first_point3D[0] - (double)second_point3D[0]) + ((double)first_point3D[1] - (double)second_point3D[1]) * ((double)first_point3D[1] - (double)second_point3D[1]) + ((double)first_point3D[2] - (double)second_point3D[2]) * ((double)first_point3D[2] - (double)second_point3D[2])); } /* *************************************************************** */ void checkCublasStatus(cublasStatus_t status) { if (status != CUBLAS_STATUS_SUCCESS) { reg_print_fct_error("checkCublasStatus"); reg_print_msg_error("!!!! CUBLAS error"); reg_exit(0); } } /* *************************************************************** */ void checkCUSOLVERStatus(cusolverStatus_t status, char* msg) { if (status != CUSOLVER_STATUS_SUCCESS) { if (status == CUSOLVER_STATUS_NOT_INITIALIZED) { reg_print_fct_error("the library was not initialized."); } else if (status == CUSOLVER_STATUS_INTERNAL_ERROR) { reg_print_fct_error(" an internal operation failed."); } reg_exit(0); } } /* *************************************************************** */ void checkDevInfo(int *devInfo) { int * hostDevInfo = (int*)malloc(sizeof(int)); cudaMemcpy(hostDevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost); if (hostDevInfo < 0) printf("parameter: %d is wrong\n", hostDevInfo); if (hostDevInfo > 0) printf("%d superdiagonals of an intermediate bidiagonal form B did not converge to zero.\n", hostDevInfo); else printf(" %d: operation successful\n", hostDevInfo); free(hostDevInfo); } /* *************************************************************** */ void downloadMat44(mat44 *lastTransformation, float* transform_d) { float* tempMat = (float*)malloc(16 * sizeof(float)); cudaMemcpy(tempMat, transform_d, 16 * sizeof(float), cudaMemcpyDeviceToHost); cPtrToMat44(lastTransformation, tempMat); free(tempMat); } /* *************************************************************** */ void uploadMat44(mat44 lastTransformation, float* transform_d) { float* tempMat = (float*)malloc(16 * sizeof(float)); mat44ToCptr(lastTransformation, tempMat); cudaMemcpy(transform_d, tempMat, 16 * sizeof(float), cudaMemcpyHostToDevice); free(tempMat); } /* *************************************************************** */ //threads: 512 | blocks:numEquations/512 __global__ void transformWarpedPointsKernel(float* transform, float* in, float* out, unsigned int definedBlockNum) { const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < definedBlockNum) { const unsigned int posIdx = 3 * tid; in += posIdx; out += posIdx; reg_mat44_mul_cuda<float>(transform, in, out); } } /* *************************************************************** */ //blocks: 1 | threads: 12 __global__ void trimAndInvertSingularValuesKernel(float* sigma) { sigma[threadIdx.x] = (sigma[threadIdx.x] < 0.0001) ? 0.0f : (float) ((double) 1.0 / (double) sigma[threadIdx.x]); } /* *************************************************************** */ //launched as ldm blocks n threads __global__ void scaleV(float* V, const unsigned int ldm, const unsigned int n, float*w) { unsigned int k = blockIdx.x; unsigned int j = threadIdx.x; V[IDX2C(j, k, ldm)] = (float)((double)V[IDX2C(j, k, ldm)] * (double)w[j]); } /* *************************************************************** */ //threads: 16 | blocks:1 __global__ void permuteAffineMatrix(float* transform) { __shared__ float buffer[16]; const unsigned int i = threadIdx.x; buffer[i] = transform[i]; __syncthreads(); const unsigned int idx33 = (i / 3) * 4 + i % 3; const unsigned int idx34 = (i % 3) * 4 + 3; if (i < 9) transform[idx33] = buffer[i]; else if (i < 12)transform[idx34] = buffer[i]; else transform[i] = buffer[i]; } /* *************************************************************** */ //threads: 512 | blocks:numEquations/512 __global__ void populateMatrixA(float* A, float *reference, unsigned int numBlocks) { const unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int c = tid * 3; // const unsigned int n = 12; const unsigned int lda = numBlocks * 3; if (tid < numBlocks) { reference += c; //IDX2C(i,j,ld) A[IDX2C(c, 0, lda)] = reference[0]; A[IDX2C(c, 1, lda)] = reference[1]; A[IDX2C(c, 2, lda)] = reference[2]; A[IDX2C(c, 3, lda)] = A[IDX2C(c, 4, lda)] = A[IDX2C(c, 5, lda)] = A[IDX2C(c, 6, lda)] = A[IDX2C(c, 7, lda)] = A[IDX2C(c, 8, lda)] = A[IDX2C(c, 10, lda)] = A[IDX2C(c, 11, lda)] = 0.0f; A[IDX2C(c, 9, lda)] = 1.0f; A[IDX2C((c + 1), 3, lda)] = reference[0]; A[IDX2C((c + 1), 4, lda)] = reference[1]; A[IDX2C((c + 1), 5, lda)] = reference[2]; A[IDX2C((c + 1), 0, lda)] = A[IDX2C((c + 1), 1, lda)] = A[IDX2C((c + 1), 2, lda)] = A[IDX2C((c + 1), 6, lda)] = A[IDX2C((c + 1), 7, lda)] = A[IDX2C((c + 1), 8, lda)] = A[IDX2C((c + 1), 9, lda)] = A[IDX2C((c + 1), 11, lda)] = 0.0f; A[IDX2C((c + 1), 10, lda)] = 1.0f; A[IDX2C((c + 2), 6, lda)] = reference[0]; A[IDX2C((c + 2), 7, lda)] = reference[1]; A[IDX2C((c + 2), 8, lda)] = reference[2]; A[IDX2C((c + 2), 0, lda)] = A[IDX2C((c + 2), 1, lda)] = A[IDX2C((c + 2), 2, lda)] = A[IDX2C((c + 2), 3, lda)] = A[IDX2C((c + 2), 4, lda)] = A[IDX2C((c + 2), 5, lda)] = A[IDX2C((c + 2), 9, lda)] = A[IDX2C((c + 2), 10, lda)] = 0.0f; A[IDX2C((c + 2), 11, lda)] = 1.0f; } } /* *************************************************************** */ //threads: 512 | blocks:numEquations/512 __global__ void populateLengthsKernel(float* lengths, float* warped_d, float* newWarped_d, unsigned int numEquations) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int c = tid * 3; if (tid < numEquations) { newWarped_d += c; warped_d += c; lengths[tid] = getSquareDistance3Dcu(warped_d, newWarped_d); } } /* *************************************************************** */ //launched as 1 block 1 thread __global__ void outputMatFlat(float* mat, const unsigned int ldm, const unsigned int n, char* msg) { for (int i = 0; i < ldm * n; ++i) printf("%f | ", mat[i]); printf("\n"); } /* *************************************************************** */ //launched as 1 block 1 thread __global__ void outputMat(float* mat, const unsigned int ldm, const unsigned int n, char* msg) { for (int i = 0; i < ldm; ++i) { printf("%d ", i); for (int j = 0; j < n; ++j) { printf("%f ", mat[IDX2C(i, j, ldm)]); } printf("\n"); } printf("\n"); } /* *************************************************************** */ /* * the function computes the SVD of a matrix A * A = V* x S x U, where V* is a (conjugate) transpose of V * */ void cusolverSVD(float* A_d, unsigned int m, unsigned int n, float* S_d, float* VT_d, float* U_d) { //CAST float* to double* /* double* Adouble_d; double* Sdouble_d; double* VTdouble_d; double* Udouble_d; cudaMalloc((void **) &Adouble_d, m*n*sizeof(double)); cudaMalloc((void **) &Sdouble_d, xx*sizeof(double)); cudaMalloc((void **) &VTdouble_d, xx*sizeof(double)); cudaMalloc((void **) &Udouble_d, xx*sizeof(double)); cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice); */ const int lda = m; const int ldu = m; const int ldvt = n; /* * 'A': all m columns of U are returned in array * 'S': the first min(m,n) columns of U (the left singular vectors) are returned in the array * 'O': the first min(m,n) columns of U (the left singular vectors) are overwritten on the array * 'N': no columns of U (no left singular vectors) are computed */ const char jobu = 'A'; /* * 'A': all N rows of V**T are returned in the array * 'S': the first min(m,n) rows of V**T (the right singular vectors) are returned in the array * 'O': the first min(m,n) rows of V**T (the right singular vectors) are overwritten on the array * 'N': no rows of V**T (no right singular vectors) are computed */ const char jobvt = 'A'; cusolverDnHandle_t gH = NULL; int Lwork; //device ptrs float *Work; float *rwork; int *devInfo; //init cusolver compute SVD and shut down checkCUSOLVERStatus(cusolverDnCreate(&gH), "cusolverDnCreate"); checkCUSOLVERStatus(cusolverDnSgesvd_bufferSize(gH, m, n, &Lwork), "cusolverDnSgesvd_bufferSize"); cudaMalloc(&Work, Lwork * sizeof(float)); cudaMalloc(&rwork, Lwork * sizeof(float)); cudaMalloc(&devInfo, sizeof(int)); checkCUSOLVERStatus(cusolverDnSgesvd(gH, jobu, jobvt, m, n, A_d, lda, S_d, U_d, ldu, VT_d, ldvt, Work, Lwork, NULL, devInfo), "cusolverDnSgesvd"); checkCUSOLVERStatus(cusolverDnDestroy(gH), "cusolverDnDestroy"); //free vars cudaFree(devInfo); cudaFree(rwork); cudaFree(Work); } /* *************************************************************** */ /* * the function computes the Pseudoinverse from the products of the SVD factorisation of A * R = V x inv(S) x U* * */ void cublasPseudoInverse(float* transformation, float *R_d, float* warped_d, float *VT_d, float* Sigma_d, float *U_d, const unsigned int m, const unsigned int n) { // First we make sure that the really small singular values // are set to 0. and compute the inverse by taking the reciprocal of the entries trimAndInvertSingularValuesKernel <<<1, n >>>(Sigma_d); //test 3 cublasHandle_t handle; const float alpha = 1.f; const float beta = 0.f; const int ldvt = n;//VT's lead dimension const int ldu = m;//U's lead dimension const int ldr = n;//Pseudoinverse's r lead dimension const int rowsVTandR = n;//VT and r's num rows const int colsUandR = m;//U and r's num cols const int colsVtRowsU = n;//VT's cols and U's rows // V x inv(S) in place | We scale eaach row with the corresponding singular value as V is transpose scaleV <<<n, n >>>(VT_d, n, n, Sigma_d); //Initialize CUBLAS perform ops and shut down checkCublasStatus(cublasCreate(&handle)); //now R = V x inv(S) x U* checkCublasStatus(cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, rowsVTandR, colsUandR, colsVtRowsU, &alpha, VT_d, ldvt, U_d, ldu, &beta, R_d, ldr)); //finally M=Rxb, where M is our affine matrix and b a vector containg the warped points checkCublasStatus(cublasSgemv(handle, CUBLAS_OP_N, n, m, &alpha, R_d, ldr, warped_d, 1, &beta, transformation, 1)); checkCublasStatus(cublasDestroy(handle)); permuteAffineMatrix <<<1, 16 >>>(transformation); cudaThreadSynchronize(); } /* *************************************************************** */ double sortAndReduce(float* lengths_d, float* reference_d, float* warped_d, float* newWarped_d, const unsigned int numBlocks, const unsigned int numToKeep, const unsigned int m) { //populateLengthsKernel populateLengthsKernel <<< numBlocks, 512 >>>(lengths_d, warped_d, newWarped_d, m / 3); // The initial vector with all the input points thrust::device_ptr<float> reference_d_ptr(reference_d); thrust::device_vector<float> vecReference_d(reference_d_ptr, reference_d_ptr + m); thrust::device_ptr<float> warped_d_ptr(warped_d); thrust::device_vector<float> vecWarped_d(warped_d_ptr, warped_d_ptr + m); thrust::device_ptr<float> lengths_d_ptr(lengths_d); thrust::device_vector<float> vec_lengths_d(lengths_d_ptr, lengths_d_ptr + m / 3); // initialize indices vector to [0,1,2,..m] thrust::counting_iterator<int> iter(0); thrust::device_vector<int> indices(m); thrust::copy(iter, iter + indices.size(), indices.begin()); //sort an indices array by lengths as key. Then use it to sort reference and warped arrays thrust::sort_by_key(vec_lengths_d.begin(), vec_lengths_d.end(), indices.begin()); thrust::gather(indices.begin(), indices.end(), vecReference_d.begin(), vecReference_d.begin());//end()? thrust::gather(indices.begin(), indices.end(), vecWarped_d.begin(), vecWarped_d.begin());//end()? return thrust::reduce(lengths_d_ptr, lengths_d_ptr + numToKeep, 0, thrust::plus<double>()); } /* *************************************************************** */ //OPTIMIZER----------------------------------------------- // estimate an affine transformation using least square void getAffineMat3D(float* AR_d, float* Sigma_d, float* VT_d, float* U_d, float* reference_d, float* warped_d, float *transformation, const unsigned int numBlocks, unsigned int m, unsigned int n) { //populate A populateMatrixA <<< numBlocks, 512 >>>(AR_d, reference_d, m / 3); //test 2 //calculate SVD on the GPU cusolverSVD(AR_d, m, n, Sigma_d, VT_d, U_d); //calculate the pseudoinverse cublasPseudoInverse(transformation, AR_d, warped_d, VT_d, Sigma_d, U_d, m, n); } /* *************************************************************** */ void affineLocalSearch3DCuda(mat44 *cpuMat, float* final_d, float *AR_d, float* Sigma_d, float* U_d, float* VT_d, float * newWarpedPos_d, float* referencePos_d, float* warpedPos_d, float* lengths_d, const unsigned int numBlocks, const unsigned int num_to_keep, const unsigned int m, const unsigned int n) { double lastDistance = std::numeric_limits<double>::max(); float* lastTransformation_d; cudaMalloc(&lastTransformation_d, 16 * sizeof(float)); //get initial affine matrix getAffineMat3D(AR_d, Sigma_d, VT_d, U_d, referencePos_d, warpedPos_d, final_d, numBlocks, m, n); for (unsigned int count = 0; count < MAX_ITERATIONS; ++count) { // Transform the points in the reference transformWarpedPointsKernel <<< numBlocks, 512 >>>(final_d, referencePos_d, newWarpedPos_d, m / 3); //test 1 double distance = sortAndReduce(lengths_d, referencePos_d, warpedPos_d, newWarpedPos_d, numBlocks, num_to_keep, m); // If the change is not substantial or we are getting worst, we return if ((distance > lastDistance) || (lastDistance - distance) < TOLERANCE) break; lastDistance = distance; cudaMemcpy(lastTransformation_d, final_d, 16 * sizeof(float), cudaMemcpyDeviceToDevice); getAffineMat3D(AR_d, Sigma_d, VT_d, U_d, referencePos_d, warpedPos_d, final_d, numBlocks, m, n); } //async cudamemcpy here cudaMemcpy(final_d, lastTransformation_d, 16 * sizeof(float), cudaMemcpyDeviceToDevice); cudaFree(lastTransformation_d); } /* *************************************************************** */ void optimize_affine3D_cuda(mat44* cpuMat, float* final_d, float* A_d, float* U_d, float* Sigma_d, float* VT_d, float* lengths_d, float* reference_d, float* warped_d, float* newWarped_d, unsigned int m, unsigned int n, const unsigned int numToKeep, bool ilsIn, bool isAffine) { //m | blockMatchingParams->activeBlockNumber * 3 //n | 12 const unsigned int numEquations = m; const unsigned int numBlocks = (numEquations % 512) ? (numEquations / 512) + 1 : numEquations / 512; uploadMat44(*cpuMat, final_d); transformWarpedPointsKernel <<< numBlocks, 512 >>>(final_d, warped_d, newWarped_d, m / 3); //test 1 cudaMemcpy(warped_d, newWarped_d, m * sizeof(float), cudaMemcpyDeviceToDevice); // run the local search optimization routine affineLocalSearch3DCuda(cpuMat, final_d, A_d, Sigma_d, U_d, VT_d, newWarped_d, reference_d, warped_d, lengths_d, numBlocks, numToKeep, m, n); downloadMat44(cpuMat, final_d); }
the_stack
#include <algorithm> #include <assert.h> #include <bitset> #include <inttypes.h> #include <iostream> #include <iterator> #include <map> #include <sstream> #include <stdint.h> #include <stdio.h> #include <string> #include <sys/stat.h> #include <unistd.h> #include <unordered_set> #include <vector> /* every tool needs to include this once */ #include "nvbit_tool.h" /* nvbit interface file */ #include "nvbit.h" /* for channel */ #include "utils/channel.hpp" /* contains definition of the inst_trace_t structure */ #include "common.h" #define TRACER_VERSION "3" /* Channel used to communicate from GPU to CPU receiving thread */ #define CHANNEL_SIZE (1l << 20) static __managed__ ChannelDev channel_dev; static ChannelHost channel_host; /* receiving thread and its control variables */ pthread_t recv_thread; volatile bool recv_thread_started = false; volatile bool recv_thread_receiving = false; /* skip flag used to avoid re-entry on the nvbit_callback when issuing * flush_channel kernel call */ bool skip_flag = false; /* global control variables for this tool */ uint32_t instr_begin_interval = 0; uint32_t instr_end_interval = UINT32_MAX; int verbose = 0; int enable_compress = 1; int print_core_id = 0; int exclude_pred_off = 1; /* opcode to id map and reverse map */ std::map<std::string, int> opcode_to_id_map; std::map<int, std::string> id_to_opcode_map; /* kernel instruction counter, updated by the GPU */ uint64_t dynamic_kernel_limit_start = 0; // 0 means start from the begging kernel uint64_t dynamic_kernel_limit_end = 0; // 0 means no limit enum address_format { list_all = 0, base_stride = 1, base_delta = 2 }; void nvbit_at_init() { setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1); GET_VAR_INT( instr_begin_interval, "INSTR_BEGIN", 0, "Beginning of the instruction interval where to apply instrumentation"); GET_VAR_INT(instr_end_interval, "INSTR_END", UINT32_MAX, "End of the instruction interval where to apply instrumentation"); GET_VAR_INT(exclude_pred_off, "EXCLUDE_PRED_OFF", 1, "Exclude predicated off instruction from count"); GET_VAR_INT(dynamic_kernel_limit_end, "DYNAMIC_KERNEL_LIMIT_END", 0, "Limit of the number kernel to be printed, 0 means no limit"); GET_VAR_INT(dynamic_kernel_limit_start, "DYNAMIC_KERNEL_LIMIT_START", 0, "start to report kernel from this kernel id, 0 means starts from " "the begging, i.e. first kernel"); GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool"); GET_VAR_INT(enable_compress, "TOOL_COMPRESS", 1, "Enable traces compression"); GET_VAR_INT(print_core_id, "TOOL_TRACE_CORE", 0, "write the core id in the traces"); std::string pad(100, '-'); printf("%s\n", pad.c_str()); } /* Set used to avoid re-instrumenting the same functions multiple times */ std::unordered_set<CUfunction> already_instrumented; /* instrument each memory instruction adding a call to the above instrumentation * function */ void instrument_function_if_needed(CUcontext ctx, CUfunction func) { std::vector<CUfunction> related_functions = nvbit_get_related_functions(ctx, func); /* add kernel itself to the related function vector */ related_functions.push_back(func); /* iterate on function */ for (auto f : related_functions) { /* "recording" function was instrumented, if set insertion failed * we have already encountered this function */ if (!already_instrumented.insert(f).second) { continue; } const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f); if (verbose) { printf("Inspecting function %s at address 0x%lx\n", nvbit_get_func_name(ctx, f), nvbit_get_func_addr(f), true); } uint32_t cnt = 0; /* iterate on all the static instructions in the function */ for (auto instr : instrs) { if (cnt < instr_begin_interval || cnt >= instr_end_interval) { cnt++; continue; } if (verbose) { instr->printDecoded(); } if (opcode_to_id_map.find(instr->getOpcode()) == opcode_to_id_map.end()) { int opcode_id = opcode_to_id_map.size(); opcode_to_id_map[instr->getOpcode()] = opcode_id; id_to_opcode_map[opcode_id] = instr->getOpcode(); } int opcode_id = opcode_to_id_map[instr->getOpcode()]; /* insert call to the instrumentation function with its * arguments */ nvbit_insert_call(instr, "instrument_inst", IPOINT_BEFORE); /* pass predicate value */ nvbit_add_call_arg_guard_pred_val(instr); /* send opcode and pc */ nvbit_add_call_arg_const_val32(instr, opcode_id); nvbit_add_call_arg_const_val32(instr, (int)instr->getOffset()); /* check all operands. For now, we ignore constant, TEX, predicates and * unified registers. We only report vector regisers */ int src_oprd[MAX_SRC]; int srcNum = 0; int dst_oprd = -1; int mem_oper_idx = -1; /* find dst reg and handle the special case if the oprd[0] is mem (e.g. * store and RED)*/ if (instr->getNumOperands() > 0 && instr->getOperand(0)->type == InstrType::OperandType::REG) dst_oprd = instr->getOperand(0)->u.reg.num; else if (instr->getNumOperands() > 0 && instr->getOperand(0)->type == InstrType::OperandType::MREF) { src_oprd[0] = instr->getOperand(0)->u.mref.ra_num; mem_oper_idx = 0; srcNum++; } // find src regs and mem for (int i = 1; i < MAX_SRC; i++) { if (i < instr->getNumOperands()) { const InstrType::operand_t *op = instr->getOperand(i); if (op->type == InstrType::OperandType::MREF) { // mem is found assert(srcNum < MAX_SRC); src_oprd[srcNum] = instr->getOperand(i)->u.mref.ra_num; srcNum++; // TO DO: handle LDGSTS with two mem refs assert(mem_oper_idx == -1); // ensure one memory operand per inst mem_oper_idx++; } else if (op->type == InstrType::OperandType::REG) { // reg is found assert(srcNum < MAX_SRC); src_oprd[srcNum] = instr->getOperand(i)->u.reg.num; srcNum++; } // skip anything else (constant and predicates) } } /* mem addresses info */ if (mem_oper_idx >= 0) { nvbit_add_call_arg_const_val32(instr, 1); nvbit_add_call_arg_mref_addr64(instr, 0); nvbit_add_call_arg_const_val32(instr, (int)instr->getSize()); } else { nvbit_add_call_arg_const_val32(instr, 0); nvbit_add_call_arg_const_val64(instr, -1); nvbit_add_call_arg_const_val32(instr, -1); } /* reg info */ nvbit_add_call_arg_const_val32(instr, dst_oprd); for (int i = 0; i < srcNum; i++) { nvbit_add_call_arg_const_val32(instr, src_oprd[i]); } for (int i = srcNum; i < MAX_SRC; i++) { nvbit_add_call_arg_const_val32(instr, -1); } nvbit_add_call_arg_const_val32(instr, srcNum); /* add pointer to channel_dev and other counters*/ nvbit_add_call_arg_const_val64(instr, (uint64_t)&channel_dev); nvbit_add_call_arg_const_val64(instr, (uint64_t)&total_dynamic_instr_counter); nvbit_add_call_arg_const_val64(instr, (uint64_t)&reported_dynamic_instr_counter); nvbit_add_call_arg_const_val64(instr, (uint64_t)&stop_report); cnt++; } } } __global__ void flush_channel() { /* push memory access with negative cta id to communicate the kernel is * completed */ inst_trace_t ma; ma.cta_id_x = -1; channel_dev.push(&ma, sizeof(inst_trace_t)); /* flush channel */ channel_dev.flush(); } static FILE *resultsFile = NULL; static FILE *kernelsFile = NULL; static FILE *statsFile = NULL; static int kernelid = 1; static bool first_call = true; unsigned old_total_insts = 0; unsigned old_total_reported_insts = 0; void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid, const char *name, void *params, CUresult *pStatus) { if (skip_flag) return; if (first_call == true) { first_call = false; if (mkdir("traces", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) == -1) { if (errno == EEXIST) { // alredy exists } else { // something else std::cout << "cannot create folder error:" << strerror(errno) << std::endl; return; } } if (!dynamic_kernel_limit_start || dynamic_kernel_limit_start == 1) stop_report = false; else stop_report = true; kernelsFile = fopen("./traces/kernelslist", "w"); statsFile = fopen("./traces/stats.csv", "w"); fprintf(statsFile, "kernel id, kernel mangled name, grid_dimX, grid_dimY, grid_dimZ, " "#blocks, block_dimX, block_dimY, block_dimZ, #threads, " "total_insts, total_reported_insts\n"); fclose(statsFile); } if (cbid == API_CUDA_cuMemcpyHtoD_v2) { if (!is_exit) { cuMemcpyHtoD_v2_params *p = (cuMemcpyHtoD_v2_params *)params; char buffer[1024]; kernelsFile = fopen("./traces/kernelslist", "a"); sprintf(buffer, "MemcpyHtoD,0x%016lx,%lld", p->dstDevice, p->ByteCount); fprintf(kernelsFile, buffer); fprintf(kernelsFile, "\n"); fclose(kernelsFile); } } else if (cbid == API_CUDA_cuLaunchKernel_ptsz || cbid == API_CUDA_cuLaunchKernel) { cuLaunchKernel_params *p = (cuLaunchKernel_params *)params; if (!is_exit) { if (dynamic_kernel_limit_start && kernelid == dynamic_kernel_limit_start) stop_report = false; int nregs; CUDA_SAFECALL( cuFuncGetAttribute(&nregs, CU_FUNC_ATTRIBUTE_NUM_REGS, p->f)); int shmem_static_nbytes; CUDA_SAFECALL(cuFuncGetAttribute( &shmem_static_nbytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, p->f)); int binary_version; CUDA_SAFECALL(cuFuncGetAttribute(&binary_version, CU_FUNC_ATTRIBUTE_BINARY_VERSION, p->f)); instrument_function_if_needed(ctx, p->f); nvbit_enable_instrumented(ctx, p->f, true); char buffer[1024]; sprintf(buffer, "./traces/kernel-%d.trace", kernelid); if (!stop_report) { resultsFile = fopen(buffer, "w"); printf("Writing results to %s\n", buffer); fprintf(resultsFile, "-kernel name = %s\n", nvbit_get_func_name(ctx, p->f, true)); fprintf(resultsFile, "-kernel id = %d\n", kernelid); fprintf(resultsFile, "-grid dim = (%d,%d,%d)\n", p->gridDimX, p->gridDimY, p->gridDimZ); fprintf(resultsFile, "-block dim = (%d,%d,%d)\n", p->blockDimX, p->blockDimY, p->blockDimZ); fprintf(resultsFile, "-shmem = %d\n", shmem_static_nbytes + p->sharedMemBytes); fprintf(resultsFile, "-nregs = %d\n", nregs); fprintf(resultsFile, "-binary version = %d\n", binary_version); fprintf(resultsFile, "-cuda stream id = %d\n", (uint64_t)p->hStream); fprintf(resultsFile, "-shmem base_addr = 0x%016lx\n", (uint64_t)nvbit_get_shmem_base_addr(ctx)); fprintf(resultsFile, "-local mem base_addr = 0x%016lx\n", (uint64_t)nvbit_get_local_mem_base_addr(ctx)); fprintf(resultsFile, "-nvbit version = %s\n", NVBIT_VERSION); fprintf(resultsFile, "-accelsim tracer version = %s\n", TRACER_VERSION); fprintf(resultsFile, "\n"); fprintf(resultsFile, "#traces format = threadblock_x threadblock_y threadblock_z " "warpid_tb PC mask dest_num [reg_dests] opcode src_num " "[reg_srcs] mem_width [adrrescompress?] [mem_addresses]\n"); fprintf(resultsFile, "\n"); } kernelsFile = fopen("./traces/kernelslist", "a"); sprintf(buffer, "kernel-%d.trace", kernelid); if (!stop_report) { fprintf(kernelsFile, buffer); fprintf(kernelsFile, "\n"); } fclose(kernelsFile); statsFile = fopen("./traces/stats.csv", "a"); unsigned blocks = p->gridDimX * p->gridDimY * p->gridDimZ; unsigned threads = p->blockDimX * p->blockDimY * p->blockDimZ; fprintf(statsFile, "%s, %s, %d, %d, %d, %d, %d, %d, %d, %d, ", buffer, nvbit_get_func_name(ctx, p->f, true), p->gridDimX, p->gridDimY, p->gridDimZ, blocks, p->blockDimX, p->blockDimY, p->blockDimZ, threads); fclose(statsFile); kernelid++; recv_thread_receiving = true; } else { /* make sure current kernel is completed */ cudaDeviceSynchronize(); assert(cudaGetLastError() == cudaSuccess); /* make sure we prevent re-entry on the nvbit_callback when issuing * the flush_channel kernel */ skip_flag = true; /* issue flush of channel so we are sure all the memory accesses * have been pushed */ flush_channel<<<1, 1>>>(); cudaDeviceSynchronize(); assert(cudaGetLastError() == cudaSuccess); /* unset the skip flag */ skip_flag = false; /* wait here until the receiving thread has not finished with the * current kernel */ while (recv_thread_receiving) { pthread_yield(); } unsigned total_insts_per_kernel = total_dynamic_instr_counter - old_total_insts; old_total_insts = total_dynamic_instr_counter; unsigned reported_insts_per_kernel = reported_dynamic_instr_counter - old_total_reported_insts; old_total_reported_insts = reported_dynamic_instr_counter; statsFile = fopen("./traces/stats.csv", "a"); fprintf(statsFile, "%d,%d", total_insts_per_kernel, reported_insts_per_kernel); fprintf(statsFile, "\n"); fclose(statsFile); if (!stop_report) fclose(resultsFile); if (dynamic_kernel_limit_end && kernelid > dynamic_kernel_limit_end) stop_report = true; } } } bool is_number(const std::string &s) { std::string::const_iterator it = s.begin(); while (it != s.end() && std::isdigit(*it)) ++it; return !s.empty() && it == s.end(); } unsigned get_datawidth_from_opcode(const std::vector<std::string> &opcode) { for (unsigned i = 0; i < opcode.size(); ++i) { if (is_number(opcode[i])) { return (std::stoi(opcode[i], NULL) / 8); } else if (opcode[i][0] == 'U' && is_number(opcode[i].substr(1))) { // handle the U* case unsigned bits; sscanf(opcode[i].c_str(), "U%u", &bits); return bits / 8; } } return 4; // default is 4 bytes } bool check_opcode_contain(const std::vector<std::string> &opcode, std::string param) { for (unsigned i = 0; i < opcode.size(); ++i) if (opcode[i] == param) return true; return false; } bool base_stride_compress(const uint64_t *addrs, const std::bitset<32> &mask, uint64_t &base_addr, int &stride) { // calulcate the difference between addresses // write cosnsctive addresses with constant stride in a more // compressed way (i.e. start adress and stride) bool const_stride = true; bool first_bit1_found = false; bool last_bit1_found = false; for (int s = 0; s < 32; s++) { if (mask.test(s) && !first_bit1_found) { first_bit1_found = true; base_addr = addrs[s]; if (s < 31 && mask.test(s + 1)) stride = addrs[s + 1] - addrs[s]; else { const_stride = false; break; } } else if (first_bit1_found && !last_bit1_found) { if (mask.test(s)) { if (stride != addrs[s] - addrs[s - 1]) { const_stride = false; break; } } else last_bit1_found = true; } else if (last_bit1_found) { if (mask.test(s)) { const_stride = false; break; } } } return const_stride; } void base_delta_compress(const uint64_t *addrs, const std::bitset<32> &mask, uint64_t &base_addr, std::vector<long long> &deltas) { // save the delta from the previous address bool first_bit1_found = false; uint64_t last_address = 0; for (int s = 0; s < 32; s++) { if (mask.test(s) && !first_bit1_found) { base_addr = addrs[s]; first_bit1_found = true; last_address = addrs[s]; } else if (mask.test(s) && first_bit1_found) { deltas.push_back(addrs[s] - last_address); last_address = addrs[s]; } } } void *recv_thread_fun(void *) { char *recv_buffer = (char *)malloc(CHANNEL_SIZE); while (recv_thread_started) { uint32_t num_recv_bytes = 0; if (recv_thread_receiving && (num_recv_bytes = channel_host.recv(recv_buffer, CHANNEL_SIZE)) > 0) { uint32_t num_processed_bytes = 0; while (num_processed_bytes < num_recv_bytes) { inst_trace_t *ma = (inst_trace_t *)&recv_buffer[num_processed_bytes]; /* when we get this cta_id_x it means the kernel has completed */ if (ma->cta_id_x == -1) { recv_thread_receiving = false; break; } fprintf(resultsFile, "%d ", ma->cta_id_x); fprintf(resultsFile, "%d ", ma->cta_id_y); fprintf(resultsFile, "%d ", ma->cta_id_z); fprintf(resultsFile, "%d ", ma->warpid_tb); if (print_core_id) { fprintf(resultsFile, "%d ", ma->sm_id); fprintf(resultsFile, "%d ", ma->warpid_sm); } fprintf(resultsFile, "%04x ", ma->vpc); // Print the virtual PC fprintf(resultsFile, "%08x ", ma->active_mask & ma->predicate_mask); if (ma->GPRDst >= 0) { fprintf(resultsFile, "1 "); fprintf(resultsFile, "R%d ", ma->GPRDst); } else fprintf(resultsFile, "0 "); // Print the opcode. fprintf(resultsFile, "%s ", id_to_opcode_map[ma->opcode_id].c_str()); unsigned src_count = 0; for (int s = 0; s < MAX_SRC; s++) // GPR srcs count. if (ma->GPRSrcs[s] >= 0) src_count++; fprintf(resultsFile, "%d ", src_count); for (int s = 0; s < MAX_SRC; s++) // GPR srcs. if (ma->GPRSrcs[s] >= 0) fprintf(resultsFile, "R%d ", ma->GPRSrcs[s]); // print addresses std::bitset<32> mask(ma->active_mask); if (ma->is_mem) { std::istringstream iss(id_to_opcode_map[ma->opcode_id]); std::vector<std::string> tokens; std::string token; while (std::getline(iss, token, '.')) { if (!token.empty()) tokens.push_back(token); } fprintf(resultsFile, "%d ", get_datawidth_from_opcode(tokens)); bool base_stride_success = false; uint64_t base_addr = 0; int stride = 0; std::vector<long long> deltas; if (enable_compress) { // try base+stride format base_stride_success = base_stride_compress(ma->addrs, mask, base_addr, stride); if (!base_stride_success) { // if base+stride fails, try base+delta format base_delta_compress(ma->addrs, mask, base_addr, deltas); } } if (base_stride_success && enable_compress) { // base + stride format fprintf(resultsFile, "%u 0x%llx %d ", address_format::base_stride, base_addr, stride); } else if (!base_stride_success && enable_compress) { // base + delta format fprintf(resultsFile, "%u 0x%llx ", address_format::base_delta, base_addr); for (int s = 0; s < deltas.size(); s++) { fprintf(resultsFile, "%lld ", deltas[s]); } } else { // list all the addresses fprintf(resultsFile, "%u ", address_format::list_all); for (int s = 0; s < 32; s++) { if (mask.test(s)) fprintf(resultsFile, "0x%016lx ", ma->addrs[s]); } } } else { fprintf(resultsFile, "0 "); } fprintf(resultsFile, "\n"); num_processed_bytes += sizeof(inst_trace_t); } } } free(recv_buffer); return NULL; } void nvbit_at_ctx_init(CUcontext ctx) { recv_thread_started = true; channel_host.init(0, CHANNEL_SIZE, &channel_dev, NULL); pthread_create(&recv_thread, NULL, recv_thread_fun, NULL); } void nvbit_at_ctx_term(CUcontext ctx) { if (recv_thread_started) { recv_thread_started = false; pthread_join(recv_thread, NULL); } }
the_stack
#define INTENSIVE_GMF #include "cuda_x11_aes_alexis.cuh" #define TPB 384 __device__ __forceinline__ static void round_3_7_11(const uint32_t sharedMemory[4][256], uint32_t* r, uint4 *p, uint4 &x){ KEY_EXPAND_ELT(sharedMemory, &r[ 0]); *(uint4*)&r[ 0] ^= *(uint4*)&r[28]; x = p[ 2] ^ *(uint4*)&r[ 0]; KEY_EXPAND_ELT(sharedMemory, &r[ 4]); r[4] ^= r[0]; r[5] ^= r[1]; r[6] ^= r[2]; r[7] ^= r[3]; AES_ROUND_NOKEY(sharedMemory, &x); x.x ^= r[4]; x.y ^= r[5]; x.z ^= r[6]; x.w ^= r[7]; KEY_EXPAND_ELT(sharedMemory, &r[ 8]); r[8] ^= r[4]; r[9] ^= r[5]; r[10]^= r[6]; r[11]^= r[7]; AES_ROUND_NOKEY(sharedMemory, &x); x.x ^= r[8]; x.y ^= r[9]; x.z ^= r[10]; x.w ^= r[11]; KEY_EXPAND_ELT(sharedMemory, &r[12]); r[12] ^= r[8]; r[13] ^= r[9]; r[14]^= r[10]; r[15]^= r[11]; AES_ROUND_NOKEY(sharedMemory, &x); x.x ^= r[12]; x.y ^= r[13]; x.z ^= r[14]; x.w ^= r[15]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 1].x ^= x.x; p[ 1].y ^= x.y; p[ 1].z ^= x.z; p[ 1].w ^= x.w; KEY_EXPAND_ELT(sharedMemory, &r[16]); *(uint4*)&r[16] ^= *(uint4*)&r[12]; x = p[ 0] ^ *(uint4*)&r[16]; KEY_EXPAND_ELT(sharedMemory, &r[20]); *(uint4*)&r[20] ^= *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[20]; KEY_EXPAND_ELT(sharedMemory, &r[24]); *(uint4*)&r[24] ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[24]; KEY_EXPAND_ELT(sharedMemory,&r[28]); AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[28] ^= *(uint4*)&r[24]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 3] ^= x; } __device__ __forceinline__ static void round_4_8_12(const uint32_t sharedMemory[4][256], uint32_t* r, uint4 *p, uint4 &x){ *(uint4*)&r[ 0] ^= *(uint4*)&r[25]; x = p[ 1] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); r[ 4] ^= r[29]; r[ 5] ^= r[30]; r[ 6] ^= r[31]; r[ 7] ^= r[ 0]; x ^= *(uint4*)&r[ 4]; *(uint4*)&r[ 8] ^= *(uint4*)&r[ 1]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[ 8]; *(uint4*)&r[12] ^= *(uint4*)&r[ 5]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 0] ^= x; *(uint4*)&r[16] ^= *(uint4*)&r[ 9]; x = p[ 3] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[20] ^= *(uint4*)&r[13]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[24] ^= *(uint4*)&r[17]; x ^= *(uint4*)&r[24]; *(uint4*)&r[28] ^= *(uint4*)&r[21]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 2] ^= x; } // GPU Hash __global__ __launch_bounds__(TPB,2) /* 64 registers with 128,8 - 72 regs with 128,7 */ void x11_shavite512_gpu_hash_64_alexis(const uint32_t threads, uint64_t *g_hash) { __shared__ uint32_t sharedMemory[4][256]; aes_gpu_init_mt_256(sharedMemory); const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint4 p[ 4]; uint4 x; uint32_t r[32]; // kopiere init-state const uint32_t state[16] = { 0x72FCCDD8, 0x79CA4727, 0x128A077B, 0x40D55AEC, 0xD1901A06, 0x430AE307, 0xB29F5CD1, 0xDF07FBFC, 0x8E45D73D, 0x681AB538, 0xBDE86578, 0xDD577E47, 0xE275EADE, 0x502D9FCD, 0xB9357178, 0x022A4B9A }; if (thread < threads) { uint64_t *Hash = &g_hash[thread<<3]; // fülle die Nachricht mit 64-byte (vorheriger Hash) *(uint2x4*)&r[ 0] = __ldg4((uint2x4*)&Hash[ 0]); *(uint2x4*)&r[ 8] = __ldg4((uint2x4*)&Hash[ 4]); __syncthreads(); *(uint2x4*)&p[ 0] = *(uint2x4*)&state[ 0]; *(uint2x4*)&p[ 2] = *(uint2x4*)&state[ 8]; r[16] = 0x80; r[17] = 0; r[18] = 0; r[19] = 0; r[20] = 0; r[21] = 0; r[22] = 0; r[23] = 0; r[24] = 0; r[25] = 0; r[26] = 0; r[27] = 0x02000000; r[28] = 0; r[29] = 0; r[30] = 0; r[31] = 0x02000000; /* round 0 */ x = p[ 1] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 0] ^= x; x = p[ 3]; x.x ^= 0x80; AES_ROUND_NOKEY(sharedMemory, &x); AES_ROUND_NOKEY(sharedMemory, &x); x.w ^= 0x02000000; AES_ROUND_NOKEY(sharedMemory, &x); x.w ^= 0x02000000; AES_ROUND_NOKEY(sharedMemory, &x); p[ 2]^= x; // 1 KEY_EXPAND_ELT(sharedMemory, &r[ 0]); *(uint4*)&r[ 0]^=*(uint4*)&r[28]; r[ 0] ^= 0x200; r[ 3] ^= 0xFFFFFFFF; x = p[ 0] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 4]); *(uint4*)&r[ 4] ^= *(uint4*)&r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 8]); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 4]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[12]); *(uint4*)&r[12] ^= *(uint4*)&r[ 8]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 3] ^= x; KEY_EXPAND_ELT(sharedMemory, &r[16]); *(uint4*)&r[16] ^= *(uint4*)&r[12]; x = p[ 2] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[20]); *(uint4*)&r[20] ^= *(uint4*)&r[16]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[24]); *(uint4*)&r[24] ^= *(uint4*)&r[20]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[28]); *(uint4*)&r[28] ^= *(uint4*)&r[24]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 1] ^= x; *(uint4*)&r[ 0] ^= *(uint4*)&r[25]; x = p[ 3] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); r[ 4] ^= r[29]; r[ 5] ^= r[30]; r[ 6] ^= r[31]; r[ 7] ^= r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 1]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[12] ^= *(uint4*)&r[ 5]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 2] ^= x; *(uint4*)&r[16] ^= *(uint4*)&r[ 9]; x = p[ 1] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[20] ^= *(uint4*)&r[13]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[24] ^= *(uint4*)&r[17]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[28] ^= *(uint4*)&r[21]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 0] ^= x; /* round 3, 7, 11 */ round_3_7_11(sharedMemory,r,p,x); /* round 4, 8, 12 */ round_4_8_12(sharedMemory,r,p,x); // 2 KEY_EXPAND_ELT(sharedMemory,&r[ 0]); *(uint4*)&r[ 0] ^= *(uint4*)&r[28]; x = p[ 0] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 4]); *(uint4*)&r[ 4] ^= *(uint4*)&r[ 0]; r[ 7] ^= (~0x200); x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 8]); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 4]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[12]); *(uint4*)&r[12] ^= *(uint4*)&r[ 8]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 3] ^= x; KEY_EXPAND_ELT(sharedMemory, &r[16]); *(uint4*)&r[16] ^= *(uint4*)&r[12]; x = p[ 2] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[20]); *(uint4*)&r[20] ^= *(uint4*)&r[16]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[24]); *(uint4*)&r[24] ^= *(uint4*)&r[20]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory,&r[28]); *(uint4*)&r[28] ^= *(uint4*)&r[24]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 1] ^= x; *(uint4*)&r[ 0] ^= *(uint4*)&r[25]; x = p[ 3] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); r[ 4] ^= r[29]; r[ 5] ^= r[30]; r[ 6] ^= r[31]; r[ 7] ^= r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 1]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[12] ^= *(uint4*)&r[ 5]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 2] ^= x; *(uint4*)&r[16] ^= *(uint4*)&r[ 9]; x = p[ 1] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[20] ^= *(uint4*)&r[13]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[24] ^= *(uint4*)&r[17]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[28] ^= *(uint4*)&r[21]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 0] ^= x; /* round 3, 7, 11 */ round_3_7_11(sharedMemory,r,p,x); /* round 4, 8, 12 */ round_4_8_12(sharedMemory,r,p,x); // 3 KEY_EXPAND_ELT(sharedMemory,&r[ 0]); *(uint4*)&r[ 0] ^= *(uint4*)&r[28]; x = p[ 0] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 4]); *(uint4*)&r[ 4] ^= *(uint4*)&r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 8]); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 4]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[12]); *(uint4*)&r[12] ^= *(uint4*)&r[ 8]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 3] ^= x; KEY_EXPAND_ELT(sharedMemory, &r[16]); *(uint4*)&r[16] ^= *(uint4*)&r[12]; x = p[ 2] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[20]); *(uint4*)&r[20] ^= *(uint4*)&r[16]; x^=*(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[24]); *(uint4*)&r[24]^=*(uint4*)&r[20]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory,&r[28]); *(uint4*)&r[28] ^= *(uint4*)&r[24]; r[30] ^= 0x200; r[31] ^= 0xFFFFFFFF; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 1] ^= x; *(uint4*)&r[ 0] ^= *(uint4*)&r[25]; x = p[ 3] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); r[ 4] ^= r[29]; r[ 5] ^= r[30]; r[ 6] ^= r[31]; r[ 7] ^= r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 1]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[12] ^= *(uint4*)&r[ 5]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 2] ^= x; *(uint4*)&r[16] ^= *(uint4*)&r[ 9]; x = p[ 1] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[20] ^= *(uint4*)&r[13]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[24] ^= *(uint4*)&r[17]; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); *(uint4*)&r[28] ^= *(uint4*)&r[21]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 0] ^= x; /* round 3, 7, 11 */ round_3_7_11(sharedMemory,r,p,x); /* round 4, 8, 12 */ round_4_8_12(sharedMemory,r,p,x); /* round 13 */ KEY_EXPAND_ELT(sharedMemory,&r[ 0]); *(uint4*)&r[ 0] ^= *(uint4*)&r[28]; x = p[ 0] ^ *(uint4*)&r[ 0]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 4]); *(uint4*)&r[ 4] ^= *(uint4*)&r[ 0]; x ^= *(uint4*)&r[ 4]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[ 8]); *(uint4*)&r[ 8] ^= *(uint4*)&r[ 4]; x ^= *(uint4*)&r[ 8]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[12]); *(uint4*)&r[12] ^= *(uint4*)&r[ 8]; x ^= *(uint4*)&r[12]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 3] ^= x; KEY_EXPAND_ELT(sharedMemory, &r[16]); *(uint4*)&r[16] ^= *(uint4*)&r[12]; x = p[ 2] ^ *(uint4*)&r[16]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[20]); *(uint4*)&r[20] ^= *(uint4*)&r[16]; x ^= *(uint4*)&r[20]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory, &r[24]); *(uint4*)&r[24] ^= *(uint4*)&r[20]; r[25] ^= 0x200; r[27] ^= 0xFFFFFFFF; x ^= *(uint4*)&r[24]; AES_ROUND_NOKEY(sharedMemory, &x); KEY_EXPAND_ELT(sharedMemory,&r[28]); *(uint4*)&r[28] ^= *(uint4*)&r[24]; x ^= *(uint4*)&r[28]; AES_ROUND_NOKEY(sharedMemory, &x); p[ 1] ^= x; *(uint2x4*)&Hash[ 0] = *(uint2x4*)&state[ 0] ^ *(uint2x4*)&p[ 2]; *(uint2x4*)&Hash[ 4] = *(uint2x4*)&state[ 8] ^ *(uint2x4*)&p[ 0]; } } __host__ void x11_shavite512_cpu_hash_64_alexis(int thr_id, uint32_t threads, uint32_t *d_hash) { dim3 grid((threads + TPB-1)/TPB); dim3 block(TPB); // note: 128 threads minimum are required to init the shared memory array x11_shavite512_gpu_hash_64_alexis<<<grid, block>>>(threads, (uint64_t*)d_hash); }
the_stack
char *cp_to_device(char *from, size_t size) { char *tmp; cudaMalloc((void**)&tmp, size); cudaMemcpy(tmp, from, size, cudaMemcpyHostToDevice); return tmp; } void cp_to_host(char *to, char*from, size_t size) { cudaMemcpy(to, from, size, cudaMemcpyDeviceToHost); cudaFree(from); } __global__ void ccc_loop1(const int * __restrict imaterial, const int * __restrict nextfrac, const double * __restrict rho_compact, const double * __restrict rho_compact_list, const double * __restrict Vf_compact_list, const double * __restrict V, double * __restrict rho_ave_compact, int sizex, int sizey, int * __restrict mmc_index) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= sizex || j >= sizey) return; #ifdef FUSED double ave = 0.0; int ix = imaterial[i+sizex*j]; if (ix <= 0) { // condition is 'ix >= 0', this is the equivalent of // 'until ix < 0' from the paper #ifdef LINKED for (ix = -ix; ix >= 0; ix = nextfrac[ix]) { ave += rho_compact_list[ix] * Vf_compact_list[ix]; } #else for (int idx = mmc_index[-ix]; idx < mmc_index[-ix+1]; idx++) { ave += rho_compact_list[idx] * Vf_compact_list[idx]; } #endif rho_ave_compact[i+sizex*j] = ave/V[i+sizex*j]; } else { #endif // We use a distinct output array for averages. // In case of a pure cell, the average density equals to the total. rho_ave_compact[i+sizex*j] = rho_compact[i+sizex*j] / V[i+sizex*j]; #ifdef FUSED } #endif } __global__ void ccc_loop1_2(const double * __restrict rho_compact_list, const double * __restrict Vf_compact_list, const double * __restrict V, double * __restrict rho_ave_compact, const int * __restrict mmc_index, const int mmc_cells, const int * __restrict mmc_i, const int * __restrict mmc_j, int sizex, int sizey) { int c = threadIdx.x + blockIdx.x * blockDim.x; if (c >= mmc_cells) return; double ave = 0.0; for (int m = mmc_index[c]; m < mmc_index[c+1]; m++) { ave += rho_compact_list[m] * Vf_compact_list[m]; } rho_ave_compact[mmc_i[c]+sizex*mmc_j[c]] = ave/V[mmc_i[c]+sizex*mmc_j[c]]; } __global__ void ccc_loop2(const int * __restrict imaterial, const int * __restrict matids, const int * __restrict nextfrac, const double * __restrict rho_compact, const double * __restrict rho_compact_list, const double * __restrict t_compact, const double * __restrict t_compact_list, const double * __restrict Vf_compact_list, const double * __restrict n, double * __restrict p_compact, double * __restrict p_compact_list, int sizex, int sizey, int * __restrict mmc_index) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= sizex || j >= sizey) return; int ix = imaterial[i+sizex*j]; if (ix <= 0) { #ifdef FUSED // NOTE: I think the paper describes this algorithm (Alg. 9) wrong. // The solution below is what I believe to good. // condition is 'ix >= 0', this is the equivalent of // 'until ix < 0' from the paper #ifdef LINKED for (ix = -ix; ix >= 0; ix = nextfrac[ix]) { double nm = n[matids[ix]]; p_compact_list[ix] = (nm * rho_compact_list[ix] * t_compact_list[ix]) / Vf_compact_list[ix]; } #else for (int idx = mmc_index[-ix]; idx < mmc_index[-ix+1]; idx++) { double nm = n[matids[idx]]; p_compact_list[idx] = (nm * rho_compact_list[idx] * t_compact_list[idx]) / Vf_compact_list[idx]; } #endif #endif } else { // NOTE: HACK: we index materials from zero, but zero can be a list index int mat = ix - 1; // NOTE: There is no division by Vf here, because the fractional volume is 1.0 in the pure cell case. p_compact[i+sizex*j] = n[mat] * rho_compact[i+sizex*j] * t_compact[i+sizex*j];; } } __global__ void ccc_loop2_2(const int * __restrict matids, const double * __restrict rho_compact_list, const double * __restrict t_compact_list, const double * __restrict Vf_compact_list, const double * __restrict n, double * __restrict p_compact_list, int * __restrict mmc_index, int mmc_cells) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= mmc_cells) return; double nm = n[matids[idx]]; p_compact_list[idx] = (nm * rho_compact_list[idx] * t_compact_list[idx]) / Vf_compact_list[idx]; } __global__ void ccc_loop3(const int * __restrict imaterial, const int * __restrict nextfrac, const int * __restrict matids, const double * __restrict rho_compact, const double * __restrict rho_compact_list, double * __restrict rho_mat_ave_compact, double * __restrict rho_mat_ave_compact_list, const double * __restrict x, const double * __restrict y, int sizex, int sizey, int * __restrict mmc_index) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= sizex-1 || j >= sizey-1 || i < 1 || j < 1) return; // o: outer double xo = x[i+sizex*j]; double yo = y[i+sizex*j]; // There are at most 9 neighbours in 2D case. double dsqr[9]; // for all neighbours for (int nj = -1; nj <= 1; nj++) { for (int ni = -1; ni <= 1; ni++) { dsqr[(nj+1)*3 + (ni+1)] = 0.0; // i: inner double xi = x[(i+ni)+sizex*(j+nj)]; double yi = y[(i+ni)+sizex*(j+nj)]; dsqr[(nj+1)*3 + (ni+1)] += (xo - xi) * (xo - xi); dsqr[(nj+1)*3 + (ni+1)] += (yo - yi) * (yo - yi); } } int ix = imaterial[i+sizex*j]; if (ix <= 0) { // condition is 'ix >= 0', this is the equivalent of // 'until ix < 0' from the paper #ifdef LINKED for (ix = -ix; ix >= 0; ix = nextfrac[ix]) { #else for (int ix = mmc_index[-imaterial[i+sizex*j]]; ix < mmc_index[-imaterial[i+sizex*j]+1]; ix++) { #endif int mat = matids[ix]; double rho_sum = 0.0; int Nn = 0; // for all neighbours for (int nj = -1; nj <= 1; nj++) { for (int ni = -1; ni <= 1; ni++) { int ci = i+ni, cj = j+nj; int jx = imaterial[ci+sizex*cj]; if (jx <= 0) { // condition is 'jx >= 0', this is the equivalent of // 'until jx < 0' from the paper #ifdef LINKED for (jx = -jx; jx >= 0; jx = nextfrac[jx]) { #else for (int jx = mmc_index[-imaterial[ci+sizex*cj]]; jx < mmc_index[-imaterial[ci+sizex*cj]+1]; jx++) { #endif if (matids[jx] == mat) { rho_sum += rho_compact_list[jx] / dsqr[(nj+1)*3 + (ni+1)]; Nn += 1; // The loop has an extra condition: "and not found". // This makes sense, if the material is found, there won't be any more of the same. break; } } } else { // NOTE: In this case, the neighbour is a pure cell, its material index is in jx. // In contrast, Algorithm 10 loads matids[jx] which I think is wrong. // NOTE: HACK: we index materials from zero, but zero can be a list index int mat_neighbour = jx - 1; if (mat == mat_neighbour) { rho_sum += rho_compact[ci+sizex*cj] / dsqr[(nj+1)*3 + (ni+1)]; Nn += 1; } } // end if (jx <= 0) } // end for (int ni) } // end for (int nj) rho_mat_ave_compact_list[ix] = rho_sum / Nn; } // end for (ix = -ix) } // end if (ix <= 0) else { // NOTE: In this case, the cell is a pure cell, its material index is in ix. // In contrast, Algorithm 10 loads matids[ix] which I think is wrong. // NOTE: HACK: we index materials from zero, but zero can be a list index int mat = ix - 1; double rho_sum = 0.0; int Nn = 0; // for all neighbours for (int nj = -1; nj <= 1; nj++) { if ((j + nj < 0) || (j + nj >= sizey)) // TODO: better way? continue; for (int ni = -1; ni <= 1; ni++) { if ((i + ni < 0) || (i + ni >= sizex)) // TODO: better way? continue; int ci = i+ni, cj = j+nj; int jx = imaterial[ci+sizex*cj]; if (jx <= 0) { // condition is 'jx >= 0', this is the equivalent of // 'until jx < 0' from the paper #ifdef LINKED for (jx = -jx; jx >= 0; jx = nextfrac[jx]) { #else for (int jx = mmc_index[-imaterial[ci+sizex*cj]]; jx < mmc_index[-imaterial[ci+sizex*cj]+1]; jx++) { #endif if (matids[jx] == mat) { rho_sum += rho_compact_list[jx] / dsqr[(nj+1)*3 + (ni+1)]; Nn += 1; // The loop has an extra condition: "and not found". // This makes sense, if the material is found, there won't be any more of the same. break; } } } else { // NOTE: In this case, the neighbour is a pure cell, its material index is in jx. // In contrast, Algorithm 10 loads matids[jx] which I think is wrong. // NOTE: HACK: we index materials from zero, but zero can be a list index int mat_neighbour = jx - 1; if (mat == mat_neighbour) { rho_sum += rho_compact[ci+sizex*cj] / dsqr[(nj+1)*3 + (ni+1)]; Nn += 1; } } // end if (jx <= 0) } // end for (int ni) } // end for (int nj) rho_mat_ave_compact[i+sizex*j] = rho_sum / Nn; } // end else } struct full_data { int sizex; int sizey; int Nmats; double * __restrict__ rho; double * __restrict__ rho_mat_ave; double * __restrict__ p; double * __restrict__ Vf; double * __restrict__ t; double * __restrict__ V; double * __restrict__ x; double * __restrict__ y; double * __restrict__ n; double * __restrict__ rho_ave; }; struct compact_data { int sizex; int sizey; int Nmats; double * __restrict__ rho_compact; double * __restrict__ rho_compact_list; double * __restrict__ rho_mat_ave_compact; double * __restrict__ rho_mat_ave_compact_list; double * __restrict__ p_compact; double * __restrict__ p_compact_list; double * __restrict__ Vf_compact_list; double * __restrict__ t_compact; double * __restrict__ t_compact_list; double * __restrict__ V; double * __restrict__ x; double * __restrict__ y; double * __restrict__ n; double * __restrict__ rho_ave_compact; int * __restrict__ imaterial; int * __restrict__ matids; int * __restrict__ nextfrac; int * __restrict__ mmc_index; int * __restrict__ mmc_i; int * __restrict__ mmc_j; int mm_len; int mmc_cells; }; void compact_cell_centric(full_data cc, compact_data ccc, double &a1, double &a2, double &a3, int argc, char** argv) { int sizex = cc.sizex; int sizey = cc.sizey; int Nmats = cc.Nmats; int mmc_cells = ccc.mmc_cells; int mm_len = ccc.mm_len; int *d_imaterial = (int *)cp_to_device((char*)ccc.imaterial, sizex*sizey*sizeof(int)); int *d_matids = (int *)cp_to_device((char*)ccc.matids, mm_len*sizeof(int)); int *d_nextfrac = (int *)cp_to_device((char*)ccc.nextfrac, mm_len*sizeof(int)); int *d_mmc_index = (int *)cp_to_device((char*)ccc.mmc_index, (mmc_cells+1)*sizeof(int)); int *d_mmc_i = (int *)cp_to_device((char*)ccc.mmc_i, (mmc_cells)*sizeof(int)); int *d_mmc_j = (int *)cp_to_device((char*)ccc.mmc_j, (mmc_cells)*sizeof(int)); double *d_x = (double *)cp_to_device((char*)ccc.x, sizex*sizey*sizeof(double)); double *d_y = (double *)cp_to_device((char*)ccc.y, sizex*sizey*sizeof(double)); double *d_rho_compact = (double *)cp_to_device((char*)ccc.rho_compact, sizex*sizey*sizeof(double)); double *d_rho_compact_list = (double *)cp_to_device((char*)ccc.rho_compact_list,mm_len*sizeof(double)); double *d_rho_mat_ave_compact = (double *)cp_to_device((char*)ccc.rho_mat_ave_compact, sizex*sizey*sizeof(double)); double *d_rho_mat_ave_compact_list = (double *)cp_to_device((char*)ccc.rho_mat_ave_compact_list,mm_len*sizeof(double)); double *d_p_compact = (double *)cp_to_device((char*)ccc.p_compact, sizex*sizey*sizeof(double)); double *d_p_compact_list = (double *)cp_to_device((char*)ccc.p_compact_list,mm_len*sizeof(double)); double *d_t_compact = (double *)cp_to_device((char*)ccc.t_compact, sizex*sizey*sizeof(double)); double *d_t_compact_list = (double *)cp_to_device((char*)ccc.t_compact_list,mm_len*sizeof(double)); double *d_Vf_compact_list = (double *)cp_to_device((char*)ccc.Vf_compact_list, mm_len*sizeof(double)); double *d_V = (double *)cp_to_device((char*)ccc.V, sizex*sizey*sizeof(double)); double *d_n = (double *)cp_to_device((char*)ccc.n, Nmats*sizeof(double)); double *d_rho_ave_compact = (double *)cp_to_device((char*)ccc.rho_ave_compact, sizex*sizey*sizeof(double)); int thx = 32; int thy = 4; dim3 threads(thx,thy,1); dim3 blocks((sizex-1)/thx+1, (sizey-1)/thy+1, 1); // Cell-centric algorithms // Computational loop 1 - average density in cell #ifdef DEBUG cudaDeviceSynchronize(); auto t0 = std::chrono::system_clock::now(); #endif ccc_loop1 <<< dim3(blocks), dim3(threads) >>> (d_imaterial, d_nextfrac, d_rho_compact, d_rho_compact_list, d_Vf_compact_list, d_V, d_rho_ave_compact, sizex, sizey, d_mmc_index); #ifndef FUSED ccc_loop1_2 <<< dim3((mmc_cells-1)/(thx*thy)+1), dim3((thx*thy)) >>> (d_rho_compact_list, d_Vf_compact_list, d_V, d_rho_ave_compact, d_mmc_index, mmc_cells, d_mmc_i, d_mmc_j, sizex, sizey); #endif #ifdef DEBUG cudaDeviceSynchronize(); std::chrono::duration<double> t1 = std::chrono::system_clock::now() - t0; printf("Compact matrix, cell centric, alg 1: %g sec\n", t1.count()); // Computational loop 2 - Pressure for each cell and each material t0 = std::chrono::system_clock::now(); #endif ccc_loop2 <<< dim3(blocks), dim3(threads) >>> (d_imaterial, d_matids,d_nextfrac, d_rho_compact, d_rho_compact_list, d_t_compact, d_t_compact_list, d_Vf_compact_list, d_n, d_p_compact, d_p_compact_list, sizex, sizey, d_mmc_index); #ifndef FUSED ccc_loop2_2 <<< dim3((mm_len-1)/(thx*thy)+1), dim3((thx*thy)) >>> (d_matids, d_rho_compact_list, d_t_compact_list, d_Vf_compact_list, d_n, d_p_compact_list, d_mmc_index, mm_len); #endif #ifdef DEBUG cudaDeviceSynchronize(); std::chrono::duration<double> t2 = std::chrono::system_clock::now() - t0; printf("Compact matrix, cell centric, alg 2: %g sec\n", t2.count()); // Computational loop 3 - Average density of each material over neighborhood of each cell t0 = std::chrono::system_clock::now(); #endif ccc_loop3 <<< dim3(blocks), dim3(threads) >>> (d_imaterial,d_nextfrac, d_matids, d_rho_compact, d_rho_compact_list, d_rho_mat_ave_compact, d_rho_mat_ave_compact_list, d_x, d_y, sizex, sizey, d_mmc_index); #ifdef DEBUG cudaDeviceSynchronize(); std::chrono::duration<double> t3 = std::chrono::system_clock::now() - t0; printf("Compact matrix, cell centric, alg 3: %g sec\n", t3.count()); #endif cp_to_host((char*)ccc.x, (char*)d_x, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.y, (char*)d_y, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.rho_compact, (char*)d_rho_compact, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.rho_compact_list, (char*)d_rho_compact_list, mm_len*sizeof(double)); cp_to_host((char*)ccc.rho_mat_ave_compact, (char*)d_rho_mat_ave_compact, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.rho_mat_ave_compact_list, (char*)d_rho_mat_ave_compact_list, mm_len*sizeof(double)); cp_to_host((char*)ccc.p_compact, (char*)d_p_compact, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.p_compact_list, (char*)d_p_compact_list, mm_len*sizeof(double)); cp_to_host((char*)ccc.t_compact, (char*)d_t_compact, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.t_compact_list, (char*)d_t_compact_list, mm_len*sizeof(double)); cp_to_host((char*)ccc.Vf_compact_list, (char*)d_Vf_compact_list, mm_len*sizeof(double)); cp_to_host((char*)ccc.V, (char*)d_V, sizex*sizey*sizeof(double)); cp_to_host((char*)ccc.n, (char*)d_n, Nmats*sizeof(double)); cp_to_host((char*)ccc.rho_ave_compact, (char*)d_rho_ave_compact, sizex*sizey*sizeof(double)); // cudaFree are included in cp_to_host } bool compact_check_results(full_data cc, compact_data ccc) { int sizex = cc.sizex; int sizey = cc.sizey; int Nmats = cc.Nmats; //int mmc_cells = ccc.mmc_cells; //int mm_len = ccc.mm_len; printf("Checking results of compact representation... "); for (int j = 0; j < sizey; j++) { for (int i = 0; i < sizex; i++) { if (fabs(cc.rho_ave[i+sizex*j] - ccc.rho_ave_compact[i+sizex*j]) > 0.0001) { printf("1. full matrix and compact cell-centric values are not equal! (%f, %f, %d, %d)\n", cc.rho_ave[i+sizex*j], ccc.rho_ave_compact[i+sizex*j], i, j); return false; } int ix = ccc.imaterial[i+sizex*j]; if (ix <= 0) { #ifdef LINKED for (ix = -ix; ix >= 0; ix = ccc.nextfrac[ix]) { #else for (int ix = ccc.mmc_index[-ccc.imaterial[i+sizex*j]]; ix < ccc.mmc_index[-ccc.imaterial[i+sizex*j]+1]; ix++) { #endif int mat = ccc.matids[ix]; if (fabs(cc.p[(i+sizex*j)*Nmats+mat] - ccc.p_compact_list[ix]) > 0.0001) { printf("2. full matrix and compact cell-centric values are not equal! (%f, %f, %d, %d, %d)\n", cc.p[(i+sizex*j)*Nmats+mat], ccc.p_compact_list[ix], i, j, mat); return false; } if (fabs(cc.rho_mat_ave[(i+sizex*j)*Nmats+mat] - ccc.rho_mat_ave_compact_list[ix]) > 0.0001) { printf("3. full matrix and compact cell-centric values are not equal! (%f, %f, %d, %d, %d)\n", cc.rho_mat_ave[(i+sizex*j)*Nmats+mat], ccc.rho_mat_ave_compact_list[ix], i, j, mat); return false; } } } else { // NOTE: HACK: we index materials from zero, but zero can be a list index int mat = ix - 1; if (fabs(cc.p[(i+sizex*j)*Nmats+mat] - ccc.p_compact[i+sizex*j]) > 0.0001) { printf("2. full matrix and compact cell-centric values are not equal! (%f, %f, %d, %d, %d)\n", cc.p[(i+sizex*j)*Nmats+mat], ccc.p_compact[i+sizex*j], i, j, mat); return false; } if (fabs(cc.rho_mat_ave[(i+sizex*j)*Nmats+mat] - ccc.rho_mat_ave_compact[i+sizex*j]) > 0.0001) { printf("3. full matrix and compact cell-centric values are not equal! (%f, %f, %d, %d, %d)\n", cc.rho_mat_ave[(i+sizex*j)*Nmats+mat], ccc.rho_mat_ave_compact[i+sizex*j], i, j, mat); return false; } } } } printf("All tests passed!\n"); return true; }
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> // #include <THC/THC.h> // #include <THC/THCAtomics.cuh> // #include <THC/THCDeviceUtils.cuh> // extern THCState *state; // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu at::Tensor location_aware_upsampling_cuda_forward(const at::Tensor &input, const at::Tensor &offset_x, const at::Tensor &offset_y, const int k_h, const int k_w) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(offset_x.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(offset_y.type().is_cuda(), "offset must be a CUDA tensor"); const int batch = input.size(0); // N const int channels = input.size(1); // C const int height = input.size(2); // H const int width = input.size(3); // W // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); const int channels_out = channels; const int height_out = height * k_h; const int width_out = width * k_w; auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // define alias for easy use const int per_input_size = channels * height * width; const int per_output_size = channels_out * height_out * width_out; const int per_offset_size = offset_x.size(1) * offset_x.size(2) * offset_x.size(3); AT_ASSERTM(offset_x.size(1) == channels_out, "%d channel number mismatch.", channels_out); AT_ASSERTM(offset_x.size(2) == height_out, "%d height mismatch.", height_out); AT_ASSERTM(offset_x.size(3) == width_out, "%d width mismatch.", width_out); for (int n = 0; n < batch; ++n) { AT_DISPATCH_FLOATING_TYPES(input.type(), "location_aware_upsampling_forward_cuda", ([&] { lau_bilinear_cuda(at::cuda::getCurrentCUDAStream(), input.data<scalar_t>() + n * per_input_size, offset_x.data<scalar_t>() + n * per_offset_size, offset_y.data<scalar_t>() + n * per_offset_size, 1, channels, height, width, height_out, width_out, output.data<scalar_t>() + n * per_output_size); })); } output = output.contiguous(); return output; } std::vector<at::Tensor> location_aware_upsampling_cuda_backward(const at::Tensor &input, const at::Tensor &offset_x, const at::Tensor &offset_y, const at::Tensor &grad_output) { AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(offset_x.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(offset_y.type().is_cuda(), "offset must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int batch_ = grad_output.size(0); const int channels_out_ = grad_output.size(1); const int height_out_ = grad_output.size(2); const int width_out_ = grad_output.size(3); auto grad_input = at::zeros_like(input); auto grad_offset_x = at::zeros_like(offset_x); auto grad_offset_y = at::zeros_like(offset_y); const int per_input_size = channels * height * width; const int per_output_size = channels_out_ * height_out_ * width_out_; const int per_offset_size = offset_x.size(1) * offset_x.size(2) * offset_x.size(3); for (int n = 0; n < batch; ++n) { AT_DISPATCH_FLOATING_TYPES(input.type(), "location_aware_upsampling_backward_cuda", ([&] { // gradient w.r.t. input data and coords lau_bilinear_cuda_backward(at::cuda::getCurrentCUDAStream(), grad_output.data<scalar_t>() + n * per_output_size, input.data<scalar_t>() + n * per_input_size, offset_x.data<scalar_t>() + n * per_offset_size, offset_y.data<scalar_t>() + n * per_offset_size, 1, channels, height, width, height_out_, width_out_, grad_offset_x.data<scalar_t>() + n * per_offset_size, grad_offset_y.data<scalar_t>() + n * per_offset_size, grad_input.data<scalar_t>() + n * per_input_size); })); } return { grad_input, grad_offset_x, grad_offset_y }; } at::Tensor location_determined_upsampling_cuda_forward(const at::Tensor &input, const int k_h, const int k_w) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); const int batch = input.size(0); // N const int channels = input.size(1); // C const int height = input.size(2); // H const int width = input.size(3); // W // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); const int channels_out = channels; const int height_out = height * k_h; const int width_out = width * k_w; auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // define alias for easy use const int per_input_size = channels * height * width; const int per_output_size = channels_out * height_out * width_out; for (int n = 0; n < batch; ++n) { AT_DISPATCH_FLOATING_TYPES(input.type(), "location_determined_upsampling_forward_cuda", ([&] { ldu_bilinear_cuda(at::cuda::getCurrentCUDAStream(), input.data<scalar_t>() + n * per_input_size, 1, channels, height, width, height_out, width_out, output.data<scalar_t>() + n * per_output_size); })); } output = output.contiguous(); return output; } at::Tensor location_determined_upsampling_cuda_backward(const at::Tensor &input, const at::Tensor &grad_output) { AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto grad_input = at::zeros_like(input); // NOT IMPLEMENTED return grad_input; } std::vector<at::Tensor> location_determined_upsampling_multi_output_cuda_forward(const at::Tensor &input, const int k_h, const int k_w) { // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); const int batch = input.size(0); // N const int channels = input.size(1); // C const int height = input.size(2); // H const int width = input.size(3); // W // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); const int channels_out = channels; const int height_out = height * k_h; const int width_out = width * k_w; auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto output_lt = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto output_lb = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto output_rt = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto output_rb = at::empty({batch, channels_out, height_out, width_out}, input.options()); // define alias for easy use const int per_input_size = channels * height * width; const int per_output_size = channels_out * height_out * width_out; for (int n = 0; n < batch; ++n) { AT_DISPATCH_FLOATING_TYPES(input.type(), "location_determined_upsampling_forward_cuda", ([&] { ldu_bilinear_multi_output_cuda(at::cuda::getCurrentCUDAStream(), input.data<scalar_t>() + n * per_input_size, 1, channels, height, width, height_out, width_out, output.data<scalar_t>() + n * per_output_size, output_lt.data<scalar_t>() + n * per_output_size, output_lb.data<scalar_t>() + n * per_output_size, output_rt.data<scalar_t>() + n * per_output_size, output_rb.data<scalar_t>() + n * per_output_size); })); } output = output.contiguous(); output_lt = output_lt.contiguous(); output_lb = output_lb.contiguous(); output_rt = output_rt.contiguous(); output_rb = output_rb.contiguous(); return {output, output_lt, output_lb, output_rt, output_rb}; } at::Tensor location_determined_upsampling_multi_output_cuda_backward(const at::Tensor &input, const at::Tensor &grad_output) { AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto grad_input = at::zeros_like(input); // NOT IMPLEMENTED return grad_input; }
the_stack
#include <thread> #include <atomic> #include <algorithm> #include <vector> #include <set> #include <stdio.h> #include <torch/extension.h> #include <ATen/ATen.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <cusolverDn.h> #include <cublas_v2.h> #include "utils.cuh" //#define DEBUG 1 #define TORCH_CUSOLVER_CHECK(EXPR) \ do { \ cusolverStatus_t __err = EXPR; \ TORCH_CHECK(__err == CUSOLVER_STATUS_SUCCESS, \ "CUDA error: ", \ cusolverGetErrorString(__err), \ " when calling `" #EXPR "`"); \ } while (0) const char* cusolverGetErrorString(cusolverStatus_t error) { if (error == CUSOLVER_STATUS_SUCCESS) { return "CUBLAS_STATUS_SUCCESS"; } if (error == CUSOLVER_STATUS_NOT_INITIALIZED) { return "CUSOLVER_STATUS_NOT_INITIALIZED"; } if (error == CUSOLVER_STATUS_ALLOC_FAILED) { return "CUSOLVER_STATUS_ALLOC_FAILED"; } if (error == CUSOLVER_STATUS_INVALID_VALUE) { return "CUSOLVER_STATUS_INVALID_VALUE"; } if (error == CUSOLVER_STATUS_ARCH_MISMATCH) { return "CUSOLVER_STATUS_ARCH_MISMATCH"; } if (error == CUSOLVER_STATUS_EXECUTION_FAILED) { return "CUSOLVER_STATUS_EXECUTION_FAILED"; } if (error == CUSOLVER_STATUS_INTERNAL_ERROR) { return "CUSOLVER_STATUS_INTERNAL_ERROR"; } if (error == CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED) { return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; } return "<unknown>"; } #define TORCH_CUDABLAS_CHECK(EXPR) \ do { \ cublasStatus_t __err = EXPR; \ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \ "CuBLAS error: ", \ cublasGetErrorString(__err), \ " when calling `" #EXPR "`"); \ } while (0) const char* cublasGetErrorString(cublasStatus_t error) { if (error == CUBLAS_STATUS_SUCCESS) { return "CUBLAS_STATUS_SUCCESS"; } if (error == CUBLAS_STATUS_NOT_INITIALIZED) { return "CUBLAS_STATUS_NOT_INITIALIZED"; } if (error == CUBLAS_STATUS_ALLOC_FAILED) { return "CUBLAS_STATUS_ALLOC_FAILED"; } if (error == CUBLAS_STATUS_INVALID_VALUE) { return "CUBLAS_STATUS_INVALID_VALUE"; } if (error == CUBLAS_STATUS_ARCH_MISMATCH) { return "CUBLAS_STATUS_ARCH_MISMATCH"; } if (error == CUBLAS_STATUS_MAPPING_ERROR) { return "CUBLAS_STATUS_MAPPING_ERROR"; } if (error == CUBLAS_STATUS_EXECUTION_FAILED) { return "CUBLAS_STATUS_EXECUTION_FAILED"; } if (error == CUBLAS_STATUS_INTERNAL_ERROR) { return "CUBLAS_STATUS_INTERNAL_ERROR"; } if (error == CUBLAS_STATUS_NOT_SUPPORTED) { return "CUBLAS_STATUS_NOT_SUPPORTED"; } #ifdef CUBLAS_STATUS_LICENSE_ERROR if (error == CUBLAS_STATUS_LICENSE_ERROR) { return "CUBLAS_STATUS_LICENSE_ERROR"; } #endif return "<unknown>"; } /* CUDA CallBacks */ struct callBackData { std::atomic<int> *work_unit; const int x; const int y; const int callee; }; void CUDART_CB copyCallBack(cudaStream_t stream, cudaError_t error, void *data) { callBackData *tmp = (callBackData *)(data); #ifdef DEBUG fprintf(stderr, "Incrementing work unit at [%d, %d] callee: %d - from %d\n", tmp->x, tmp->y, tmp->callee, tmp->work_unit->load()); #endif std::atomic_fetch_add(tmp->work_unit, 1); } /* cu* library data and functions */ static constexpr double const oned = 1.0; static constexpr double const moned = -1.0; static constexpr float const onef = 1.0; static constexpr float const monef = -1.0; /* POTRF Buffer Size */ template<typename scalar_t> inline int potrf_buffer_size(const cusolverDnHandle_t cusolver_handle, const int mbs) { throw std::invalid_argument("scalar_t"); } template<> inline int potrf_buffer_size<double>(const cusolverDnHandle_t cusolver_handle, const int mbs) { int potrf_buf_size; TORCH_CUSOLVER_CHECK(cusolverDnDpotrf_bufferSize( /*handle=*/cusolver_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*n=*/mbs, /*A=*/NULL, /*lda=*/mbs, /*Lwork=*/&potrf_buf_size )); return potrf_buf_size; } template<> inline int potrf_buffer_size<float>(const cusolverDnHandle_t cusolver_handle, const int mbs) { int potrf_buf_size; TORCH_CUSOLVER_CHECK(cusolverDnSpotrf_bufferSize( /*handle=*/cusolver_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*n=*/mbs, /*A=*/NULL, /*lda=*/mbs, /*Lwork=*/&potrf_buf_size )); return potrf_buf_size; } /* POTRF */ template<typename scalar_t> inline void potrf(const cusolverDnHandle_t cusolver_handle, const int mbs, const blockAlloc &block_alloc, scalar_t *block_ptr, scalar_t *workspace, const int workspace_size, int *potrf_info, int &potrf_info_h, cudaStream_t stream) { throw std::invalid_argument("scalar_t"); } template<> inline void potrf<double>( const cusolverDnHandle_t cusolver_handle, const int mbs, const blockAlloc &block_alloc, double *block_ptr, double *workspace, const int workspace_size, int *potrf_info, int &potrf_info_h, cudaStream_t stream) { TORCH_CUSOLVER_CHECK(cusolverDnDpotrf( /*handle=*/cusolver_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*n=*/block_alloc.size, /*A=*/block_ptr, /*lda=*/mbs, /*workspace=*/workspace, /*Lwork=*/workspace_size, /*devInfo=*/potrf_info )); //C10_CUDA_CHECK(cudaMemcpyAsync(&potrf_info_h, potrf_info, sizeof(int), cudaMemcpyDeviceToHost, stream)); potrf_info_h = 0; } template<> inline void potrf<float>( const cusolverDnHandle_t cusolver_handle, const int mbs, const blockAlloc &block_alloc, float *block_ptr, float *workspace, const int workspace_size, int *potrf_info, int &potrf_info_h, cudaStream_t stream) { TORCH_CUSOLVER_CHECK(cusolverDnSpotrf( /*handle=*/cusolver_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*n=*/block_alloc.size, /*A=*/block_ptr, /*lda=*/mbs, /*workspace=*/workspace, /*Lwork=*/workspace_size, /*devInfo=*/potrf_info )); //C10_CUDA_CHECK(cudaMemcpyAsync(&potrf_info_h, potrf_info, sizeof(int), cudaMemcpyDeviceToHost, stream)); potrf_info_h = 0; } /* TRSM (cuBLAS) */ template<typename scalar_t> inline void trsm(const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, scalar_t* i_block, scalar_t* b_block, const int mbs) { throw std::invalid_argument("scalar_t"); } template<> inline void trsm<double>( const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, double* i_block, double* b_block, const int mbs) { TORCH_CUDABLAS_CHECK(cublasDtrsm( /*handle=*/cublas_handle, /*side=*/CUBLAS_SIDE_RIGHT, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*trans=*/CUBLAS_OP_T, /*diag=*/CUBLAS_DIAG_NON_UNIT, /*m=*/b_alloc.size, /*n=*/i_alloc.size, /*alpha=*/&oned, /*A=*/i_block, /*lda=*/mbs, /*B=*/b_block, /*ldb=*/mbs )); } template<> inline void trsm<float>( const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, float* i_block, float* b_block, const int mbs) { TORCH_CUDABLAS_CHECK(cublasStrsm( /*handle=*/cublas_handle, /*side=*/CUBLAS_SIDE_RIGHT, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*trans=*/CUBLAS_OP_T, /*diag=*/CUBLAS_DIAG_NON_UNIT, /*m=*/b_alloc.size, /*n=*/i_alloc.size, /*alpha=*/&onef, /*A=*/i_block, /*lda=*/mbs, /*B=*/b_block, /*ldb=*/mbs )); } /* GEMM (cuBLAS) */ template<typename scalar_t> inline void gemm(const cublasHandle_t cublas_handle, const blockAlloc &b_alloc, const blockAlloc &y_alloc, const blockAlloc &i_alloc, scalar_t* b_block, scalar_t* y_block, scalar_t* out_buf, const int mbs) { throw std::invalid_argument("scalar_t"); } template<> inline void gemm<double>( const cublasHandle_t cublas_handle, const blockAlloc &b_alloc, const blockAlloc &y_alloc, const blockAlloc &i_alloc, double* b_block, double* y_block, double* out_buf, const int mbs) { TORCH_CUDABLAS_CHECK(cublasDgemm( /*handle=*/cublas_handle, /*transa=*/CUBLAS_OP_N, /*transb=*/CUBLAS_OP_T, /*m=*/b_alloc.size, /*n=*/y_alloc.size, /*k=*/i_alloc.size, /*alpha=*/&moned, /*A=*/b_block, /*lda=*/mbs, /*B=*/y_block, /*ldb=*/mbs, /*beta=*/&oned, /*C=*/out_buf, /*ldc=*/mbs )); } template<> inline void gemm<float>( const cublasHandle_t cublas_handle, const blockAlloc &b_alloc, const blockAlloc &y_alloc, const blockAlloc &i_alloc, float* b_block, float* y_block, float* out_buf, const int mbs) { TORCH_CUDABLAS_CHECK(cublasSgemm( /*handle=*/cublas_handle, /*transa=*/CUBLAS_OP_N, /*transb=*/CUBLAS_OP_T, /*m=*/b_alloc.size, /*n=*/y_alloc.size, /*k=*/i_alloc.size, /*alpha=*/&monef, /*A=*/b_block, /*lda=*/mbs, /*B=*/y_block, /*ldb=*/mbs, /*beta=*/&onef, /*C=*/out_buf, /*ldc=*/mbs )); } /* SYRK (cuBLAS) */ template<typename scalar_t> inline void syrk(const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, scalar_t* b_block, scalar_t* out_buf, const int mbs) { throw std::invalid_argument("scalar_t"); } template<> inline void syrk<double>( const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, double* b_block, double* out_buf, const int mbs) { TORCH_CUDABLAS_CHECK(cublasDsyrk( /*handle=*/cublas_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*trans=*/CUBLAS_OP_N, /*n=*/b_alloc.size, /*k=*/i_alloc.size, /*alpha=*/&moned, /*A=*/b_block, /*lda=*/mbs, /*beta=*/&oned, /*C=*/out_buf, /*ldc=*/mbs )); } template<> inline void syrk<float>( const cublasHandle_t cublas_handle, const blockAlloc &i_alloc, const blockAlloc &b_alloc, float* b_block, float* out_buf, const int mbs) { TORCH_CUDABLAS_CHECK(cublasSsyrk( /*handle=*/cublas_handle, /*uplo=*/CUBLAS_FILL_MODE_LOWER, /*trans=*/CUBLAS_OP_N, /*n=*/b_alloc.size, /*k=*/i_alloc.size, /*alpha=*/&monef, /*A=*/b_block, /*lda=*/mbs, /*beta=*/&onef, /*C=*/out_buf, /*ldc=*/mbs )); } /* Data-loading helper functions */ template <typename scalar_t> static inline void load_block( torch::Tensor &data_h, scalar_t* &data_d, const blockAlloc& alloc_i, const blockAlloc& alloc_j, const int mbs, const cudaStream_t stream) { const int64_t si = data_h.stride(0); const int64_t sj = data_h.stride(1); scalar_t *data_h_ptr = data_h.data_ptr<scalar_t>(); const uint64_t offset = si * alloc_i.start + sj * alloc_j.start; TORCH_CUDABLAS_CHECK(cublasSetMatrixAsync( /*rows=*/alloc_i.size, /*cols=*/alloc_j.size, /*elem_size=*/sizeof(scalar_t), /*A=*/(void *)(data_h_ptr + offset), /*lda=*/sj, /*B=*/(void *)data_d, /*ldb=*/mbs, /*stream=*/stream )); } template <typename scalar_t> static inline void get_block( scalar_t* &data_d, torch::Tensor &data_h, const blockAlloc& alloc_i, const blockAlloc& alloc_j, const int mbs, const cudaStream_t stream) { const int64_t si = data_h.stride(0); const int64_t sj = data_h.stride(1); scalar_t *data_h_ptr = data_h.data_ptr<scalar_t>(); const uint64_t offset = si * alloc_i.start + sj * alloc_j.start; TORCH_CUDABLAS_CHECK(cublasGetMatrixAsync( /*rows=*/alloc_i.size, /*cols=*/alloc_j.size, /*elem_size=*/sizeof(scalar_t), /*A=*/(void *)data_d, /*lda=*/mbs, /*B=*/(void *)(data_h_ptr + offset), /*ldb=*/sj, /*stream=*/stream )); } template <typename scalar_t> static inline void opt_load_block( torch::Tensor &data_h, scalar_t* &data_d, const int block_id, std::set<int> &col0_fill, const blockAlloc& alloc_i, const blockAlloc& alloc_j, const int mbs, const cudaStream_t stream) { if (col0_fill.find(block_id) == col0_fill.end()) { load_block<scalar_t>(data_h, data_d, alloc_i, alloc_j, mbs, stream); col0_fill.insert(block_id); } } /* Main parallel POTRF function */ void parallel_potrf_runner(int device_id, std::vector<std::vector<std::atomic<int>>> &work, torch::Tensor &A, std::vector<blockAlloc> &allocs, cusolverDnHandle_t cusolver_handle) { // CUDA devices and stream c10::cuda::CUDAGuard g(device_id); at::cuda::CUDAStream s1 = at::cuda::getStreamFromPool(false, device_id); cudaStream_t s1_c = s1.stream(); at::cuda::CUDAStream s2 = at::cuda::getStreamFromPool(false, device_id); cudaStream_t s2_c = s2.stream(); at::cuda::CUDAStream s3 = at::cuda::getStreamFromPool(false, device_id); cudaStream_t s3_c = s3.stream(); at::cuda::CUDAStreamGuard g0(s1); // Fetch cuBLAS handle and set cuBLAS, cuSOLVER streams to s1 const auto cublas_handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t orig_cublas_stream; TORCH_CUDABLAS_CHECK(cublasGetStream_v2(cublas_handle, &orig_cublas_stream)); TORCH_CUDABLAS_CHECK(cublasSetStream_v2(cublas_handle, s1_c)); cudaStream_t orig_cusolver_stream; TORCH_CUSOLVER_CHECK(cusolverDnGetStream(cusolver_handle, &orig_cusolver_stream)); TORCH_CUSOLVER_CHECK(cusolverDnSetStream(cusolver_handle, s1_c)); const auto scalar_type = A.scalar_type(); const int k = allocs.size(); const int mbs = (*std::max_element(allocs.begin(), allocs.end(), [] (blockAlloc lhs, blockAlloc rhs) { return lhs.size < rhs.size; })).size; const uint64_t mbs_sq = mbs*mbs; // Figure out `my_blocks` the blocks of the current stage std::vector<blockAlloc> my_blocks; std::set<int> my_block_ids; for (auto &block : allocs) { if (block.device == device_id) { my_blocks.push_back(block); my_block_ids.insert(block.id); } } std::map<std::pair<int, int>, callBackData> callback_data; for (int i = 0; i < k; i++) { for (int j = 0; j < k; j++) { const callBackData cback_data = {.work_unit = &(work[i][j]), .x = i, .y = j, .callee = -1}; callback_data.insert(std::pair<std::pair<int, int>, callBackData>(std::pair<int, int>(i, j), cback_data)); } } // col0_fill keeps track of the 'current' column: which blocks are loaded or not. std::set<int> col0_fill; // First GPU buffer allocation. const uint64_t buf_size = mbs_sq * (k + k + 1); const auto buf_opt = torch::TensorOptions() .dtype(A.dtype()) .device(torch::kCUDA, device_id) .layout(torch::kStrided) .requires_grad(false); const auto data_buf = torch::empty(buf_size, buf_opt); AT_DISPATCH_FLOATING_TYPES(scalar_type, "dispatch_parallel_potrf", [&] { scalar_t *A_data = A.data_ptr<scalar_t>(); // How much workspace does potrf need: int potrf_buf_size = potrf_buffer_size<scalar_t>(cusolver_handle, mbs); const auto potrf_buf = torch::empty(potrf_buf_size, buf_opt); const auto potrf_info_buf = torch::zeros(1, torch::dtype(torch::kInt32).device(torch::kCUDA, device_id)); // Data buffers scalar_t *data_buf_ptr = data_buf.data_ptr<scalar_t>(); scalar_t *potrf_buf_ptr = potrf_buf.data_ptr<scalar_t>(); int *potrf_info_buf_ptr = potrf_info_buf.data_ptr<int>(); scalar_t *col0_h[k]; for (int i = 0; i < k; i++) { col0_h[i] = data_buf_ptr; data_buf_ptr += mbs_sq; } scalar_t *col1_h[k]; for (int i = 0; i < k; i++) { col1_h[i] = data_buf_ptr; data_buf_ptr += mbs_sq; } scalar_t *g_buf = data_buf_ptr; // Book-keeping variables (used in the loop) uint col_updates_left; uint trail_updates_left; int potrf_info_h; scalar_t **col_buf_h; scalar_t **next_buf_h; cudaStream_t s_copyback; // Start the main loop for (int i = 0; i < k; i++) { #ifdef DEBUG fprintf(stderr, "Starting iteration %d\n", i); #endif // Setup double-buffering (via pre-inserting elements in col0_fill) // and number of updates. col_updates_left = 0; trail_updates_left = 0; for (const auto& mb : my_blocks) { if (mb.id > i) { col_updates_left += 1; trail_updates_left += mb.id - i; if (i != 0) {col0_fill.insert(mb.id);} } } // Switch the double-buffered col0, col1 if (i % 2 == 0) { col_buf_h = col0_h; next_buf_h = col1_h; s_copyback = s2_c; } else { col_buf_h = col1_h; next_buf_h = col0_h; s_copyback = s3_c; } C10_CUDA_CHECK(cudaStreamSynchronize(s_copyback)); // 1. POTRF scalar_t * i_block = col_buf_h[i]; const auto& i_alloc = allocs[i]; if (i_alloc.device == device_id) { while (work[i][i] != i) { std::this_thread::yield(); } opt_load_block<scalar_t>(A, i_block, i, col0_fill, i_alloc, i_alloc, mbs, s1_c); // [i, i] potrf<scalar_t>(cusolver_handle, mbs, i_alloc, i_block, potrf_buf_ptr, potrf_buf_size, potrf_info_buf_ptr, potrf_info_h, s1_c); C10_CUDA_CHECK(cudaStreamSynchronize(s1_c)); if (potrf_info_h != 0) { AT_ERROR("Cholesky decomposition failed: leading minor of order ", potrf_info_h, " is not positive definite."); } get_block<scalar_t>(i_block, A, i_alloc, i_alloc, mbs, s_copyback); C10_CUDA_CHECK(cudaStreamAddCallback(s_copyback, copyCallBack, &callback_data.at(std::pair<int, int>(i, i)), 0)); #ifdef DEBUG fprintf(stderr, "D:%d Iteration %d stage %d - finished [%d, %d]\n", device_id, i, 1, i, i); #endif } // 2. COLUMN UPDATE while (work[i][i] < i + 1) { std::this_thread::yield(); } // Keep track of which blocks we have already processed. // work table cannot work for this here, since it is set asynchronously. std::unordered_set<int> processed_idx; while (col_updates_left > 0) { for (const auto& b_alloc : my_blocks) { const int b = b_alloc.id; if (b <= i || processed_idx.find(b) != processed_idx.end() || work[b][i] != i) { continue; } scalar_t *b_block = col_buf_h[b]; opt_load_block<scalar_t>(A, i_block, i, col0_fill, i_alloc, i_alloc, mbs, s1_c); // [i, i] opt_load_block<scalar_t>(A, b_block, b, col0_fill, b_alloc, i_alloc, mbs, s1_c); // [b, i] trsm<scalar_t>(cublas_handle, i_alloc, b_alloc, i_block, b_block, mbs); C10_CUDA_CHECK(cudaStreamSynchronize(s1_c)); get_block<scalar_t>(b_block, A, b_alloc, i_alloc, mbs, s_copyback); C10_CUDA_CHECK(cudaStreamAddCallback(s_copyback, copyCallBack, &callback_data.at(std::pair<int, int>(b, i)), 0)); col_updates_left--; processed_idx.insert(b); #ifdef DEBUG fprintf(stderr, "D:%d Iteration %d stage %d - finished [%d, %d]\n", device_id, i, 2, b, i); #endif } } // 3. TRAILING UPDATE // Note that this loop does not need `processed_idx` like loop 2 // since it is processed in order. In fact the outer while loop // is unnecessary #ifdef DEBUG fprintf(stderr, "Starting stage 3\n"); #endif while (trail_updates_left > 0) { for (const auto& b_alloc : my_blocks) { int b = b_alloc.id; if (b < i + 1) { continue; } while (work[b][i] != i + 1) { std::this_thread::yield(); } scalar_t * b_block = col_buf_h[b]; for (int y = b; y > i; y--) { while (work[y][i] != i + 1 || work[b][y] != i) { std::this_thread::yield(); } const auto& y_alloc = allocs[y]; scalar_t *y_block = col_buf_h[y]; opt_load_block<scalar_t>(A, y_block, y, col0_fill, y_alloc, i_alloc, mbs, s1_c); // [y, i] load_block<scalar_t>(A, g_buf, b_alloc, y_alloc, mbs, s1_c); // [b, y] if (b_alloc.id != y_alloc.id) { gemm<scalar_t>(cublas_handle, b_alloc, y_alloc, i_alloc, b_block, y_block, g_buf, mbs); } else { syrk<scalar_t>(cublas_handle, i_alloc, b_alloc, b_block, g_buf, mbs); } if (y == i + 1) { // We are on the column which will be tackled next, can copy directly to col0 C10_CUDA_CHECK( cudaMemcpyAsync(next_buf_h[b], g_buf, mbs_sq * sizeof(scalar_t), cudaMemcpyDeviceToDevice, s1_c)); C10_CUDA_CHECK(cudaStreamSynchronize(s1_c)); get_block<scalar_t>(next_buf_h[b], A, b_alloc, y_alloc, mbs, s_copyback); C10_CUDA_CHECK(cudaStreamAddCallback(s_copyback, copyCallBack, &callback_data.at(std::pair<int, int>(b, y)), 0)); } else { // We must free the `g_buf` variable before the next round. get_block<scalar_t>(g_buf, A, b_alloc, y_alloc, mbs, s1_c); C10_CUDA_CHECK(cudaStreamSynchronize(s1_c)); std::atomic_fetch_add(&work[b][y], 1); } trail_updates_left--; #ifdef DEBUG fprintf(stderr, "D:%d Iteration %d stage %d - finished [%d, %d]\n", device_id, i, 3, b, y); #endif } } } col0_fill.clear(); } C10_CUDA_CHECK(cudaStreamSynchronize(s1_c)); C10_CUDA_CHECK(cudaStreamSynchronize(s2_c)); C10_CUDA_CHECK(cudaStreamSynchronize(s3_c)); }); TORCH_CUDABLAS_CHECK(cublasSetStream_v2(cublas_handle, orig_cublas_stream)); TORCH_CUSOLVER_CHECK(cusolverDnSetStream(cusolver_handle, orig_cusolver_stream)); } torch::Tensor parallel_potrf_cuda( std::vector<gpuInfo> gpu_info, std::vector<blockAlloc> allocations, torch::Tensor &A) { CHECK_CPU(A); // Initialize the atomic table int k = allocations.size(); std::vector<std::vector<std::atomic<int>>> work(k); for (int i = 0; i < k; i++) { work[i] = std::vector<std::atomic<int>>(k); for (int j = 0; j < k; j++) { work[i][j].store(0); } } std::vector<std::thread> threads; for (const auto& gi : gpu_info) { threads.push_back( std::thread(&parallel_potrf_runner, gi.id, std::ref(work), std::ref(A), std::ref(allocations), gi.cusolver_handle)); } for (auto& t : threads) { t.join(); } return A; }
the_stack
#define SOLVE_ZERO_INI_GUESS //#define DEBUG namespace amgx { struct TestCase { std::string file_name; std::string config_string; double max_mem_usage; size_t max_mem_leak; }; template <class Handle> struct CWrapper { AMGX_Mode mode; Handle hdl; }; // parameter is used as test name DECLARE_UNITTEST_BEGIN(Memory_Use_Base); std::string base_keywords() { return "stress"; } size_t get_memory_usage() { size_t free_mem, total_mem; cudaMemGetInfo( &free_mem, &total_mem ); size_t used_mem = total_mem - free_mem; //used_mem /= size_t(1024); //double used_mem_f = double(used_mem) / 1024.0; return used_mem; } void check_memory_usage(const char *msg, size_t &mem_before, TestCase &test_case) { std::stringstream mem_msg; size_t used_mem = get_memory_usage(); mem_msg << "Maximum used memory (" << (int)(used_mem / (1024.*1024.)) << ") exceeds limit set to " << test_case.max_mem_usage << " for matrix " << test_case.file_name << " : " << msg; UNITTEST_ASSERT_TRUE_DESC(mem_msg.str().c_str(), (used_mem - mem_before) / (1024.*1024.) < test_case.max_mem_usage); #ifdef DEBUG std::cout << msg << ": " << used_mem / (1024.*1024.) << " Mb" << std::endl; #endif } void launch_test_case(TestCase &test_case) { SignalHandler::hook(); AMGX_finalize_plugins(); AMGX_finalize(); UnitTest::amgx_intialized = false; // Empty kernel call to initialize cuda context size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); size_t context_buffer = 10000000; thrust::device_vector<double> test_vector; int vec_size = (free_mem - context_buffer) / 8; test_vector.resize(vec_size); test_vector.clear(); test_vector.shrink_to_fit(); // Create and destroy cusparse and cublas handles size_t before = get_memory_usage(); { cusparseHandle_t cusparse_handle; cusparseCreate(&cusparse_handle); cusparseDestroy(cusparse_handle); cublasHandle_t cublas_handle; cublasCreate(&cublas_handle); cublasDestroy(cublas_handle); } size_t after = get_memory_usage(); size_t used_mem_before = get_memory_usage(); mem_test_main(test_case, used_mem_before); size_t used_mem_after = get_memory_usage(); std::stringstream msg; msg << "Memory leak: " << (used_mem_after - used_mem_before) << " bytes exceed threshold set to " << test_case.max_mem_leak ; #ifdef DEBUG std::cout << "Mem leak=" << (used_mem_after - used_mem_before) << std::endl; #endif UNITTEST_ASSERT_TRUE_DESC(msg.str().c_str(), (used_mem_after - used_mem_before) <= test_case.max_mem_leak); UNITTEST_ASSERT_EQUAL(AMGX_initialize(), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_initialize_plugins(), AMGX_OK); UnitTest::amgx_intialized = true; } void mem_test_main(TestCase &test_case, size_t &mem_before) { check_memory_usage("before initialize", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_initialize(), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_initialize_plugins(), AMGX_OK); UnitTest::amgx_intialized = true; check_memory_usage("after initialize", mem_before, test_case); // -------------------------------------- // Create matrix arrays from file // -------------------------------------- Matrix_h Atemp; Vector_h btemp, xtemp; // ------------------------------------------- // Read the matrix // ------------------------------------------- std::string fail_msg = "Cannot open " + test_case.file_name; this->PrintOnFail(fail_msg.c_str()); UNITTEST_ASSERT_TRUE(this->read_system(test_case.file_name.c_str(), Atemp, btemp, xtemp)); bool hasDiag = Atemp.hasProps(DIAG); std::stringstream nan_msg; nan_msg << "Nan residual for matrix " << test_case.file_name; // Create row_offsets, col_indices, off_dia_values and dia_values arrays from the matrix just read int num_rows = Atemp.get_num_rows(); int num_nz = Atemp.get_num_nz(); int bsize_x = Atemp.get_block_dimx(); int bsize_y = Atemp.get_block_dimy(); int bsize = bsize_x * bsize_y; if (xtemp.size() == 0) { xtemp.resize(num_rows * bsize_y, 0.); } std::vector<int> row_offsets(num_rows + 1); std::vector<int> col_indices(num_nz); std::vector<double> off_dia_values(num_nz * bsize); std::vector<double> dia_values; if (hasDiag) { dia_values.resize(num_rows * bsize); } std::vector<double> x_vec(num_rows * bsize_y); std::vector<double> b_vec(num_rows * bsize_x); // Fill vectors int *raw_row_ptr = Atemp.row_offsets.raw(); int *raw_col_ptr = Atemp.col_indices.raw(); double *raw_val_ptr = Atemp.values.raw(); // Row offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] = raw_row_ptr[i]; } // Column indices for (int i = 0; i < num_nz; i++) { col_indices[i] = raw_col_ptr[i]; } // Off-diagonal values for (int i = 0; i < num_nz; i++) for (int j = 0; j < bsize; j++) { off_dia_values[i * bsize + j] = raw_val_ptr[i * bsize + j]; } // Diagonal values if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] = raw_val_ptr[num_nz * bsize + i * bsize + j]; } } } // RHS double *b_raw_ptr = btemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_x; j++) { b_vec[i * bsize_x + j] = b_raw_ptr[i * bsize_x + j]; } //b_vec[i*bsize_x+j] = b_raw_ptr[i*bsize_x+j]+(1.0*rand()/RAND_MAX); // x vector double *x_raw_ptr = xtemp.raw(); for (int i = 0; i < num_rows; i++) for (int j = 0; j < bsize_y; j++) { x_vec[i * bsize_y + j] = x_raw_ptr[i * bsize_y + j]; } //x_vec[i*bsize_y+j] = x_raw_ptr[i*bsize_y+j]+(1.0*rand()/RAND_MAX); // All of these should create the same result std::string option_string = test_case.config_string; check_memory_usage("before config create", mem_before, test_case); AMGX_config_handle cfg; UNITTEST_ASSERT_EQUAL(AMGX_config_create( &cfg, option_string.c_str() ), AMGX_OK); AMGX_config_handle rsrc_cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create(&rsrc_cfg, ""), AMGX_OK); check_memory_usage("after config create", mem_before, test_case); // Choosing device 0 check_memory_usage("before resources create", mem_before, test_case); int device = 0; AMGX_resources_handle rsrc = NULL; UNITTEST_ASSERT_EQUAL(AMGX_resources_create(&rsrc, rsrc_cfg, NULL, 1, &device), AMGX_OK); check_memory_usage("after resources create", mem_before, test_case); double old_max_mem_usage = test_case.max_mem_usage; #if 1 // query device pool size amgx::CWrapper<AMGX_resources_handle> *c_resources = (amgx::CWrapper<AMGX_resources_handle> *)rsrc; test_case.max_mem_usage += ((Resources *)c_resources->hdl)->getPoolSize() / 1024.0 / 1024.0; AMGX_matrix_handle matrix; check_memory_usage("before matrix create", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_matrix_create( &matrix, rsrc, AMGX_mode_dDDI ), AMGX_OK); check_memory_usage("after matrix create", mem_before, test_case); AMGX_solver_handle solver; check_memory_usage("before solver create", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_create( &solver, rsrc, AMGX_mode_dDDI, cfg), AMGX_OK); check_memory_usage("after solver create", mem_before, test_case); int num_setup_iters = 3; for (int i_setup = 0; i_setup < num_setup_iters; i_setup++) { // ------------------------------------------ // Upload the new matrix and call setup // ------------------------------------------ if (i_setup == 0) { if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0]), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all(matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], NULL), AMGX_OK); } check_memory_usage("after matrix upload", mem_before, test_case); check_memory_usage("before solver setup", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_setup( solver, matrix ), AMGX_OK); check_memory_usage("after solver setup", mem_before, test_case); } else { check_memory_usage("before matrix replace", mem_before, test_case); if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], &dia_values[0]), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients(matrix, num_rows, num_nz, &off_dia_values[0], NULL), AMGX_OK); } check_memory_usage("after matrix replace", mem_before, test_case); check_memory_usage("before solver resetup", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_resetup( solver, matrix ), AMGX_OK); check_memory_usage("after solver resetup", mem_before, test_case); } // Run several solves. const int num_solves = 5; //std::cout << "------------------------------------------" << std::endl; //std::cout << "RUNNING " << num_solves << " SOLVE ITERATIONS" << std::endl; //std::cout << "------------------------------------------" << std::endl; for ( int i_solve = 0 ; i_solve < num_solves ; ++i_solve ) { // Create new RHS // Create vectors. AMGX_vector_handle b, x; check_memory_usage("before vector create", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &b, rsrc, AMGX_mode_dDDI ), AMGX_OK); check_memory_usage("after vector create", mem_before, test_case); check_memory_usage("before vector create", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &x, rsrc, AMGX_mode_dDDI ), AMGX_OK); check_memory_usage("after vector create", mem_before, test_case); // Upload to the GPU. check_memory_usage("before vector upload", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( b, num_rows, bsize_y, &b_vec[0] ), AMGX_OK); check_memory_usage("after vector upload", mem_before, test_case); // Before solve. #ifdef SOLVE_ZERO_INI_GUESS UNITTEST_ASSERT_EQUAL(AMGX_vector_set_zero( x, num_rows, bsize_y ), AMGX_OK); check_memory_usage("before solver solve", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_solve_with_0_initial_guess( solver, b, x ), AMGX_OK); check_memory_usage("after solver solve", mem_before, test_case); #else check_memory_usage("before vector upload", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( x, num_rows, bsize_x, &x_vec[0] ), AMGX_OK); check_memory_usage("after vector upload", mem_before, test_case); check_memory_usage("before solver solve", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_solve( solver, b, x ), AMGX_OK); check_memory_usage("after solver solve", mem_before, test_case); #endif // Read the number of iterations. int num_iterations = 0; UNITTEST_ASSERT_EQUAL(AMGX_solver_get_iterations_number( solver, &num_iterations ), AMGX_OK); check_memory_usage("after get iteration number", mem_before, test_case); // Read the residuals and check for NaNs std::vector<double> res(bsize_y); for ( int iter = 0 ; iter < num_iterations ; ++iter ) { for (int j = 0; j < bsize_y; j++) { AMGX_solver_get_iteration_residual(solver, iter, j, &res[j]); UNITTEST_ASSERT_TRUE_DESC(nan_msg.str().c_str(), res[j] == res[j]); } } check_memory_usage("after get iteration residual", mem_before, test_case); check_memory_usage("before vector destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( b ), AMGX_OK); check_memory_usage("after vector destroy", mem_before, test_case); check_memory_usage("before vector destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( x ), AMGX_OK); check_memory_usage("after vector destroy", mem_before, test_case); } // Solve iterations } // Setup iterations check_memory_usage("before solver destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_solver_destroy( solver ), AMGX_OK); check_memory_usage("after solver destroy", mem_before, test_case); check_memory_usage("before matrix destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_matrix_destroy( matrix ), AMGX_OK); check_memory_usage("after matrix destroy", mem_before, test_case); #endif check_memory_usage("before cfg destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( cfg ), AMGX_OK); check_memory_usage("after cfg destroy", mem_before, test_case); check_memory_usage("before rsrc_cfg destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( rsrc_cfg ), AMGX_OK); check_memory_usage("after rsrc_cfg destroy", mem_before, test_case); test_case.max_mem_usage = old_max_mem_usage; check_memory_usage("before resources destroy", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_resources_destroy( rsrc ), AMGX_OK); check_memory_usage("after resources destroy", mem_before, test_case); check_memory_usage("before finalize", mem_before, test_case); UNITTEST_ASSERT_EQUAL(AMGX_finalize_plugins(), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_finalize(), AMGX_OK); check_memory_usage("after finalize", mem_before, test_case); //cudaDeviceReset(); //} } DECLARE_UNITTEST_END(Memory_Use_Base); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_DILU, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; // List test cases temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_DILU, block_format=ROW_MAJOR, main_solver:coloring_level=2, main_solver:max_iters=3"; temp_case.file_name = "Public/florida/atmosdd.mtx"; temp_case.max_mem_usage = 318; // Mb temp_case.max_mem_leak = 0; // bytes temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_DILU); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_DILU2, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_DILU, block_format=ROW_MAJOR, main_solver:insert_diag_while_reordering=1, main_solver:reorder_cols_by_color=1, main_solver:coloring_level=2, main_solver:max_iters=3 "; temp_case.file_name = "Public/florida/atmosdd.mtx"; // temp_case.max_mem_usage = 338.; // Mb temp_case.max_mem_usage = 602.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_DILU2); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_DILU3, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_DILU, block_format=ROW_MAJOR, main_solver:insert_diag_while_reordering=0, main_solver:reorder_cols_by_color=1, main_solver:coloring_level=2, main_solver:max_iters=3 "; temp_case.file_name = "Public/florida/atmosdd.mtx"; temp_case.max_mem_usage = 338.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_DILU3); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_ILU, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; // ILU0 temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_ILU, main_solver:ilu_sparsity_level=0, block_format=ROW_MAJOR, main_solver:insert_diag_while_reordering=1, main_solver:reorder_cols_by_color=1, main_solver:coloring_level=2, main_solver:max_iters=3"; temp_case.file_name = "Public/florida/atmosdd.mtx"; //temp_case.max_mem_usage = 518.; // Mb temp_case.max_mem_usage = 792.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_ILU); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_ILU2, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; // ILU1 temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_ILU, main_solver:ilu_sparsity_level=1, block_format=ROW_MAJOR, main_solver:insert_diag_while_reordering=1, main_solver:reorder_cols_by_color=1, main_solver:coloring_level=2, main_solver:max_iters=3"; temp_case.file_name = "Public/florida/atmosdd.mtx"; temp_case.max_mem_usage = 718.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_ILU2); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_ILU3, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; // ILU1 temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_ILU, main_solver:ilu_sparsity_level=1, block_format=COL_MAJOR, main_solver:insert_diag_while_reordering=0, main_solver:reorder_cols_by_color=0, main_solver:coloring_level=2, main_solver:max_iters=5"; temp_case.file_name = "Public/florida/atmosdd.mtx"; temp_case.max_mem_usage = 1368.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_ILU3); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_ILU4, Memory_Use_Base<T_Config>); void run() { TestCase temp_case; // ILU1 temp_case.config_string = "config_version=2, solver(main_solver)=MULTICOLOR_ILU, main_solver:ilu_sparsity_level=1, block_format=COL_MAJOR, main_solver:insert_diag_while_reordering=0, main_solver:reorder_cols_by_color=0, main_solver:coloring_level=2, main_solver:max_iters=5"; temp_case.file_name = "Public/florida/atmosdd.mtx"; temp_case.max_mem_usage = 5068.; // Mb temp_case.max_mem_leak = 0; temp_case.config_string += ","; temp_case.config_string += "main_solver:print_solve_stats=1,"; temp_case.config_string += "main_solver:monitor_residual=1,"; temp_case.config_string += "determinism_flag=1,"; temp_case.config_string += "max_uncolored_percentage=0.,"; temp_case.config_string += "main_solver:store_res_history=1"; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_ILU4); DECLARE_UNITTEST_BEGIN_EXTD(Memory_Use_atmosmodd_pressure, Memory_Use_Base<T_Config>); void run() { std::vector<TestCase> test_cases; TestCase temp_case; std::ostringstream cfg_options; cfg_options << "config_version=2,"; cfg_options << "solver=PBICGSTAB,"; cfg_options << "max_iters=1,"; cfg_options << "convergence=RELATIVE_INI_CORE,"; cfg_options << "tolerance=1.0e-4,"; cfg_options << "preconditioner(amg_solver)=AMG,"; cfg_options << "amg_solver:algorithm=CLASSICAL,"; cfg_options << "amg_solver:coarseAgenerator=THRUST,"; cfg_options << "amg_solver:strength_threshold=0.25,"; cfg_options << "amg_solver:max_levels=8,"; cfg_options << "amg_solver:max_iters=1,"; cfg_options << "amg_solver:smoother(amg_smoother)=BLOCK_JACOBI,"; cfg_options << "amg_smoother:relaxation_factor=0.6,"; cfg_options << "amg_solver:interpolator=D2,"; cfg_options << "amg_solver:spmm_no_sort=1,"; cfg_options << "amg_solver:monitor_residual=0,"; cfg_options << "amg_solver:print_grid_stats=1,"; cfg_options << "amg_solver:coarsest_sweeps=1,"; cfg_options << "monitor_residual=1,"; cfg_options << "print_solve_stats=1,"; cfg_options << "store_res_history=1"; temp_case.config_string = cfg_options.str(); temp_case.file_name = "Public/Florida/atmosmodd.mtx"; // temp_case.max_mem_usage = 1068.; // Mb temp_case.max_mem_usage = 1306.; // Mb temp_case.max_mem_leak = 0.05; Memory_Use_Base<T_Config>::launch_test_case(temp_case); } DECLARE_UNITTEST_END(Memory_Use_atmosmodd_pressure); Memory_Use_DILU<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_DILU_dDDI; Memory_Use_DILU2<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_DILU2_dDDI; Memory_Use_DILU3<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_DILU3_dDDI; Memory_Use_ILU<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_ILU_dDDI; Memory_Use_ILU2<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_ILU2_dDDI; Memory_Use_ILU3<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_ILU3_dDDI; //Memory_Use_ILU4<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_ILU4_dDDI; Memory_Use_atmosmodd_pressure<TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_atmosmodd_pressure_dDDI; // if you want to be able run this test for all available configs you can write this: //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE // or run for all device configs //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE //Memory_Use <TemplateMode<AMGX_mode_dDDI>::Type> Memory_Use_instance_mode_dDDI; // or you can specify several desired configs //TemplateTest <TemplateMode<AMGX_mode_hDFI>::Type> TemplateTest_hDFI; //TemplateTest <TemplateMode<AMGX_mode_dDFI>::Type> TemplateTest_dDFI; } // namespace amgx
the_stack
#include <cuda_runtime.h> #include <cuda_fp16.h> #include "math.hpp" #include "bbox_utils.hpp" #include "grid_stride_range.hpp" #include "block_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "memory.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/csl/tensor.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, bool NORMALIZED_BBOX, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void grid_nms(Span<unsigned int> mask_, Span<int> count_, View<T> bboxes_, size_type num_classes, index_type background_class_id, size_type topK, size_type topK_gs, float nms_threshold) { // topK_gs is topK rounded upwards to some size // mask: [batch_size, num_classes, topK_gs, topK_gs / 32] // bboxes: [batch_size, num_classes, topK, 4] // count: [batch_size, num_classes] const index_type c = blockIdx.y; const index_type b = blockIdx.z; if (c == background_class_id) return; auto mask = mask_.data() + (b * num_classes + c) * topK_gs * topK_gs / 32; auto bboxes = bboxes_.data() + (b * num_classes + c) * topK * 4; auto count = count_.data() + b * num_classes + c; const auto boxes = *count; if (boxes == 0) return; /* We divide the set of boxes into groups containing BLOCK_SIZE boxes */ const auto num_groups = (boxes + BLOCK_SIZE - 1) / BLOCK_SIZE; /* We need to calculate IOUs for every pair of boxes. We can generalize and say that * we need to compute IOUs of every group with every other group including itself. */ // Each block processes a pair of groups. const index_type group_i = blockIdx.x % num_groups; const index_type group_j = blockIdx.x / num_groups; /* we use __syncthreads() later but note that the following condition will cause all threads * in the block to exit; hence, no thread will execute a divergent __syncthreads() */ if (group_i >= num_groups || group_j >= num_groups) return; /* Note that IOU(A, B) = IOU(B, A). Hence, if we compute IOU(GROUP_A, GROUP_B), we do not need * to compute IOU(GROUP_B, GROUP_A). We still have to compute IOU(GROUP_A, GROUP_A) though since * each group has many boxes and we need IOUs amongst boxes within a group. * * We arbitarily choose a scheme to exit : exit if group_i is greater than group_j. This way we only * compute IOUs between groups once. While nearly half the blocks are wasted, it's ok since they exit * early on and the working blocks are compute heavy. */ if (group_i > group_j) return; /* the following variables contain the absolute box number of the first box of their respective groups */ const auto group_i_offset = group_i * BLOCK_SIZE; const auto group_j_offset = group_j * BLOCK_SIZE; /* MAIN LOOP LOGIC: * We compare a box `i` from group_i with all boxes in group_j in each iteration. The box `j` is fixed * for each thread. The `j` exactly maps to the thread index. Hence, the `j` is a loop invariant. Each * thread of the block computes the overlap between box `i` and its box `j`. * * for (int i = 0; i < BLOCK_SIZE; i++) * { * // i = box 1 * // j = threadIdx.x = box 2 * } */ /* The `j` box is fixed for each thread. All `i` boxes will be required for every thread. * We store the `i` boxes in shared memory to allow global memory coalesing. */ using vector_type = get_vector_type_t<T, 4>; __shared__ vector_type group_i_boxes[BLOCK_SIZE]; /* We will precompute the sizes of `i` boxes in the code where we load them. The size computation * is distributed across the block. Otherwise, all threads will have to compute the size of the same * box simultaneously in the main loop. The size is computed while the memory subsystem is busy * servicing requests for box coordinates; the compute resources would otherwise be idle in this phase. */ /* we store the size as a float since the size can exceed fp16 limits for unnormalized boxes */ __shared__ float group_i_size[BLOCK_SIZE]; const auto bboxes_vPtr = vector_type::get_pointer(bboxes); // load `i` boxes and precompute their sizes { int i = threadIdx.x; if (group_i_offset + i < boxes) { vector_type box; v_load(box, bboxes_vPtr[group_i_offset + i]); v_store(group_i_boxes[i], box); BoundingBox bbox; bbox.xmin = box.data[0]; bbox.ymin = box.data[1]; bbox.xmax = box.data[2]; bbox.ymax = box.data[3]; group_i_size[i] = compute_bbox_size<NORMALIZED_BBOX>(bbox); } } __syncthreads(); /* We compute overlap between boxes and check if the IOU exceeds the nms threshold. * We store the result (exceeds or below nms_thresold) in a two-dimensional matrix. * (i, j) is set to one if the overlap between i and j is within the nms threshold. * We pack 32 results into one 32-bit integer. The effective memory layout of the * matrix hence is (BLOCK_SIZE, BLOCK_SIZE / 32). */ __shared__ unsigned int mask_shared[BLOCK_SIZE * BLOCK_SIZE / 32]; // load box `j` and precompute its size (fixed per thread) BoundingBox bbox_j; float bbox_j_size = 0; if (group_j_offset + threadIdx.x < boxes) { vector_type box; v_load(box, bboxes_vPtr[group_j_offset + threadIdx.x]); bbox_j.xmin = box.data[0]; bbox_j.ymin = box.data[1]; bbox_j.xmax = box.data[2]; bbox_j.ymax = box.data[3]; bbox_j_size = compute_bbox_size<NORMALIZED_BBOX>(bbox_j); } /* Each thread computes a predicate which is broadcasted across the warp to obtain a 32-bit mask. * The lane zero thread of each warp saves the mask. We store the offset to the mask array beforehand * to save cycles in the compute-intensive main loop. */ auto mask_offset = threadIdx.x / 32; /* The main loop is compute intensive and causes the kernel to be overall compute-bound. Hence, * this loop has been highly tuned. Please profile and verify carefully before making changes. */ /* UNROLL_SIZE is the number of boxes that must be processed per iteration. We manually unroll * the loop since the compiler cannot effectively unroll on its own preassumably due to presence * of instructions forcing warp synchronization. */ constexpr int UNROLL_SIZE = 4; #pragma unroll 8 for (int s = 0; s < BLOCK_SIZE; s += UNROLL_SIZE) { bool do_not_reject_j[UNROLL_SIZE]; #pragma unroll for (int k = 0; k < UNROLL_SIZE; k++) { int i = s + k; /* The number of boxes need not necessarily be a multiple of BLOCK_SIZE. * However, the shared memory allocated can hold BLOCK_SIZE boxes from * each group. Accessing the undefined regions of shared memory is * a valid memory operation as long as the memory has been allocated. * * The condition below is only required when one of the groups does not * fully filled with valid boxes. This situations are relatively rare. It's * more common to see both groups completely filled. * * We comment this condition to improve the performance of the common case. * This leads to a net improvement. */ // if (group_i_offset + i < boxes && group_j_offset + threadIdx.x < boxes) { BoundingBox bbox_i; float bbox_i_size; { vector_type box; v_load(box, group_i_boxes[i]); bbox_i.xmin = box.data[0]; bbox_i.ymin = box.data[1]; bbox_i.xmax = box.data[2]; bbox_i.ymax = box.data[3]; bbox_i_size = group_i_size[i]; } using device::min; using device::max; BoundingBox intersect_bbox; intersect_bbox.xmin = max(bbox_i.xmin, bbox_j.xmin); intersect_bbox.ymin = max(bbox_i.ymin, bbox_j.ymin); intersect_bbox.xmax = min(bbox_i.xmax, bbox_j.xmax); intersect_bbox.ymax = min(bbox_i.ymax, bbox_j.ymax); float intersect_size = compute_bbox_size<NORMALIZED_BBOX>(intersect_bbox); using device::fast_divide_ftz; float iou = fast_divide_ftz(intersect_size, bbox_i_size + bbox_j_size - intersect_size); do_not_reject_j[k] = iou <= nms_threshold; } } #pragma unroll for (int k = 0; k < UNROLL_SIZE; k++) { // FORWARD_COMPATIBILITY_TAG: WARP_SIZE_DEPENDENT_CODE auto predicate = __ballot_sync(0xFFFFFFFF, do_not_reject_j[k]); if (threadIdx.x % 32 == 0) mask_shared[mask_offset] = predicate; /* The following operation should logically be inside the previous if branch. Note that `mask_offset` * is only used by lane zero threads. Hence, there is no harm in executing it other threads as it is * unused there. * * Keeping it inside prevents the compiler from treating it as a constexpr addition to the address in * successive unrolled iterations. A register is used and instructions are emitted to multiply the * addend by four to obtain the byte offset. Pulling it out of the branch makes the compiler do constexpr * addition on the address in successive unrolled iterations. */ mask_offset += BLOCK_SIZE / 32; } } __syncthreads(); /* The mask data is organized as a two-dimensional bit matrix of size topK_gs * topK_gs. * (i, j) is set to true if the overlap between `i` and `j` is beyond the nms threshold. * We pack 32 results into one 32-bit integer. So the effective memory layout is topK_gs * topK_gs / 32. */ /* Each box `i` was compared with BLOCK_SIZE `j` boxes. This amounts to BLOCK_SIZE / 32 * 32-bit integers per box `i`. */ using mask_vector_type = get_vector_type_t<unsigned int, BLOCK_SIZE / 32>; const int i = threadIdx.x; auto mask_shared_vPtr = mask_vector_type::get_pointer(DevicePtr<unsigned>(mask_shared)); mask_vector_type temp; v_load(temp, mask_shared_vPtr[i]); for (int i = 0; i < mask_vector_type::size(); i++) temp.data[i] = __brev(temp.data[i]); auto mask_vPtr = mask_vector_type::get_pointer(mask); v_store(mask_vPtr[((group_i_offset + i) * topK_gs + group_j_offset) / 32 / mask_vector_type::size()], temp); } template <int ITEMS_PER_THREAD, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void grid_nms_collect(Span<int> indices_, Span<int> count_, View<unsigned int> mask_, size_type num_classes, index_type background_class_id, size_type topK, size_type topK_gs_by32) { const index_type c = blockIdx.x; if (c == background_class_id) return; const index_type b = blockIdx.y; // topK_gs is topK rounded upwards to some size // indices: [batch_size, num_classes, topK] // count: [batch_size, num_classes] // mask: [batch_size, num_classes, topK_gs, topK_gs / 32] auto indices = indices_.data() + (b * num_classes + c) * topK; auto count = count_.data() + (b * num_classes + c); auto mask = mask_.data() + (b * num_classes + c) * topK_gs_by32 * 32 * topK_gs_by32; const auto boxes = *count; if (boxes == 0) return; /* We have a fixed number of threads and an arbitary number of boxes. We use an array of * bits to store which boxes haven't been eliminated and which are still active. We organize * the array of bits into a matrix of bits of the shape (num_rows, BLOCK_SIZE, 32) which * is equivalent to (num_rows, BLOCK_SIZE) where the type is a 32-bit unsigned integer. * `num_rows` is the minimum number of rows required to cover all the boxes. * * Each thread handles a specific column in the matrix. To improve performance, we process * `ITEMS_PER_THREAD` number of elements per thread. This changes the shape to (num_rows, * ROW_WIDTH) where ROW_WIDTH is BLOCK_SIZE * ITEMS_PER_THREAD. */ constexpr int ROW_WIDTH = BLOCK_SIZE * ITEMS_PER_THREAD; const index_type num_32b_masks = static_cast<unsigned>(boxes + 31) / 32; const index_type num_rows = static_cast<unsigned>(num_32b_masks + ROW_WIDTH - 1) / ROW_WIDTH; extern __shared__ unsigned int active_boxes[]; // the matrix described earlier #pragma unroll 1 for (auto idx : block_stride_range<BLOCK_SIZE>(num_32b_masks)) active_boxes[idx] = (idx == num_32b_masks - 1) ? __brev((1u << (boxes % 32)) - 1) : 0xFFFFFFFF; __syncthreads(); using vector_type = get_vector_type_t<unsigned int, ITEMS_PER_THREAD>; auto mask_vPtr = vector_type::get_pointer(mask); auto shared_vPtr = vector_type::get_pointer(DevicePtr<unsigned>(active_boxes)); int index_temp; int thread0_count = 0; int thread_id = threadIdx.x; for (int step = 0; step < num_32b_masks; step++) { auto current_active = active_boxes[step]; while (current_active) { const index_type bit = __clz(current_active); const index_type i = step * 32 + bit; const int mask_offset = static_cast<unsigned>(i * topK_gs_by32) / ITEMS_PER_THREAD; /* We fetch the index from the memory and store it in a register. We will not use it until * much later. This helps avoid a long scoreboard stall. */ if (thread_id == 0) index_temp = indices[i]; __syncthreads(); if (threadIdx.x == 0) active_boxes[step] = current_active ^ (0x80000000 >> bit); __syncthreads(); #pragma unroll 1 for (int r = 0; r < num_rows; r++) { const int idx = r * BLOCK_SIZE + thread_id; if ((step & ~(ITEMS_PER_THREAD - 1)) <= idx * ITEMS_PER_THREAD && idx * ITEMS_PER_THREAD < num_32b_masks) { auto active_boxes_vec = shared_vPtr[idx]; auto mask_vec = mask_vPtr[mask_offset + idx]; for (int i = 0; i < vector_type::size(); i++) active_boxes_vec.data[i] &= mask_vec.data[i]; shared_vPtr[idx] = active_boxes_vec; } } __syncthreads(); if (thread_id == 0) { indices[thread0_count] = index_temp; thread0_count++; } current_active = active_boxes[step]; } } if (threadIdx.x == 0) *count = thread0_count; } } constexpr int GROUP_SIZE = 128; static std::size_t getAlignedTopK(std::size_t topK) { auto remainder = topK % GROUP_SIZE; if (remainder == 0) return topK; return topK + (GROUP_SIZE - remainder); } std::size_t getGridNMSWorkspaceSizePerBatchItem(std::size_t num_classes, std::size_t classwise_topK) { auto topK_gs = getAlignedTopK(classwise_topK); return num_classes * topK_gs * topK_gs / 32 * sizeof(unsigned int); } template <class T> void grid_nms(const Stream& stream, Span<unsigned int> workspace, TensorSpan<int> indices, TensorSpan<int> count, TensorView<T> bboxes, int background_class_id, bool normalized_bbox, float nms_threshold) { // workspace: [batch_size, num_classes, topK_gs, topK_gs / 32] // indices: [batch_size, num_classes, topK] // count: [batch_size, num_classes] // bboxes: [batch_size, num_classes, topK, 4] (only first count[b][c] boxes are read) const auto batch_size = indices.get_axis_size(0); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(bboxes.get_axis_size(0) == batch_size); const auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(bboxes.get_axis_size(1) == num_classes); const auto topK = indices.get_axis_size(2); CV_Assert(bboxes.get_axis_size(2) == topK); CV_Assert(bboxes.get_axis_size(3) == 4); const auto topK_gs = getAlignedTopK(topK); CV_Assert(workspace.size() >= topK_gs * topK_gs / 32); const auto boxes = topK; const auto num_groups = (boxes + GROUP_SIZE - 1) / GROUP_SIZE; { // grid = (num_groups * num_groups, num_classes, batch_size) // if the background class is the last class, we can reduce grid y dim by one auto grid_num_classes = num_classes; //(background_class_id == num_classes - 1) ? num_classes - 1 : num_classes; constexpr int BLOCK_SIZE = GROUP_SIZE; dim3 grid_size(num_groups * num_groups, grid_num_classes, batch_size); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); if (normalized_bbox) { auto kernel = raw::grid_nms<T, true, BLOCK_SIZE>; launch_kernel(kernel, policy, workspace, count, bboxes, num_classes, background_class_id, topK, topK_gs, nms_threshold); } else { auto kernel = raw::grid_nms<T, false, BLOCK_SIZE>; launch_kernel(kernel, policy, workspace, count, bboxes, num_classes, background_class_id, topK, topK_gs, nms_threshold); } } { // grid = (num_classes, batch_size) // if the background class is the last class, we can reduce grid x dim by one auto grid_num_classes = num_classes; //(background_class_id == num_classes - 1) ? num_classes - 1 : num_classes; constexpr int BLOCK_SIZE = 64; constexpr int ITEMS_PER_THREAD = 4; auto kernel = raw::grid_nms_collect<ITEMS_PER_THREAD, BLOCK_SIZE>; dim3 grid_size(grid_num_classes, batch_size); auto sharedMem = topK_gs / 32 * 4; auto policy = execution_policy(grid_size, BLOCK_SIZE, sharedMem, stream); launch_kernel(kernel, policy, indices, count, workspace, num_classes, background_class_id, topK, topK_gs / 32); } } std::size_t getGridNMSWorkspaceSizePerBatchItem(std::size_t num_classes, std::size_t classwise_topK); template void grid_nms(const Stream& stream, Span<unsigned int> workspace, TensorSpan<int> indices, TensorSpan<int> count, TensorView<__half> bboxes, int, bool normalized_bbox, float nms_threshold); template void grid_nms(const Stream& stream, Span<unsigned int> workspace, TensorSpan<int> indices, TensorSpan<int> count, TensorView<float> bboxes, int, bool normalized_bbox, float nms_threshold); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
the_stack
#include "cml/cml_blas.cuh" #include "cml/cml_matrix.cuh" #include "cml/cml_vector.cuh" #include "equil_helper.cuh" #include "matrix/matrix.h" #include "matrix/matrix_dense.h" #include "util.h" #include "timer.h" #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/transform_reduce.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/extrema.h> #include <thrust/pair.h> #include <thrust/advance.h> #include <cmath> #include <limits> #include <thrust/fill.h> #include "../include/cuda_utils.h" namespace h2o4gpu { //////////////////////////////////////////////////////////////////////////////// ////////////////////////////// Helper Functions //////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace { // File scoped constants. const NormTypes kNormEquilibrate = kNorm2; const NormTypes kNormNormalize = kNormFro; template<typename T> struct GpuData { const T *orig_data; // pointer to data on CPU cublasHandle_t handle; // handle for data on GPU GpuData(const T *orig_data) : orig_data(orig_data) { cublasCreate(&handle); // fprintf(stderr,"HEREstart: %ld\n",handle); fflush(stderr); DEBUG_CUDA_CHECK_ERR(); } ~GpuData() { // fprintf(stderr,"HEREend: %ld\n",handle); fflush(stderr); if(handle!=NULL) cublasDestroy(handle); DEBUG_CUDA_CHECK_ERR(); } }; cublasOperation_t OpToCublasOp(char trans) { ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T'); return trans == 'n' || trans == 'N' ? CUBLAS_OP_N : CUBLAS_OP_T; } template <typename T> T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A); template <typename T> void MultDiag(const T *d, const T *e, size_t m, size_t n, typename MatrixDense<T>::Ord ord, T *data); } // namespace //////////////////////////////////////////////////////////////////////////////// /////////////////////// MatrixDense Implementation ///////////////////////////// //////////////////////////////////////////////////////////////////////////////// // original MatrixDense where only trainX and no trainY or validX or validY // Used by elastic_net.cpp to pass CPU data and put on GPU template <typename T> MatrixDense<T>::MatrixDense(int sharedA, int wDev, char ord, size_t m, size_t n, const T *data) : Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _de(0) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); _me=_wDev; // assume thread same as wDev if not given _datay=NULL; _vdata=NULL; _vdatay=NULL; _weight=NULL; ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C'); _ord = (ord == 'r' || ord == 'R') ? ROW : COL; DEBUG_FPRINTF(stderr,"MatrixDense1: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr); #ifdef DEBUG // CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, _wDev)); fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr); #endif // Set GPU specific _info. PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_infoy = reinterpret_cast<void*>(infoy); GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfo = reinterpret_cast<void*>(vinfo); GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfoy = reinterpret_cast<void*>(vinfoy); GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_weightinfo = reinterpret_cast<void*>(weightinfo); POP_RANGE("MDnew",MDnew,1); if(!this->_done_alloc){ this->_done_alloc = true; // unlike CPU case, input pointer is always CPU so have to always allocate on GPU when calling this function. So no use of sharedA related to pointer copy like in CPU case. // Copy Matrix to GPU. PUSH_RANGE("MDsend",MDsend,1); // GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData double t0 = timer<double>(); cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU double t1 = timer<double>(); cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU double t2 = timer<double>(); #ifdef DEBUG printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0); printf("Time to copy the data matrix to the GPU : %f\n", t2-t1); #endif cudaMalloc(&_de, (m + n) * sizeof(T)); thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0])); T fill_value=0.0; thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value); if(sharedA>0){ Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } POP_RANGE("MDsend",MDsend,1); } } template <typename T> MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data) : MatrixDense<T>(0, 0, ord, m, n, data){} // assume sharedA=0 and thread=wDev=0 if not given template <typename T> MatrixDense<T>::MatrixDense(int sharedA, int wDev, int datatype, char ord, size_t m, size_t n, T *data) : Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0),_de(0) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); _me=_wDev; // assume thread=wDev if not given _datay=NULL; _vdata=NULL; _vdatay=NULL; _weight=NULL; ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C'); _ord = (ord == 'r' || ord == 'R') ? ROW : COL; DEBUG_FPRINTF(stderr,"MatrixDense2: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr); #ifdef DEBUG // CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, _wDev)); fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr); #endif if(datatype==1){ // input data pointer is already on GPU on this wDev, so just copy pointer // no info->orig_data, so send 0 to GpuData PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); POP_RANGE("MDnew",MDnew,1); GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_infoy = reinterpret_cast<void*>(infoy); GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfo = reinterpret_cast<void*>(vinfo); GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfoy = reinterpret_cast<void*>(vinfoy); GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_weightinfo = reinterpret_cast<void*>(weightinfo); // source pointer is on this GPU // just copy GPU pointer _data = data; if(!this->_done_alloc){ this->_done_alloc = true; cudaMalloc(&_de, (m + n) * sizeof(T)); thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0])); T fill_value=0.0; thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value); Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } } else{ PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_infoy = reinterpret_cast<void*>(infoy); GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfo = reinterpret_cast<void*>(vinfo); GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_vinfoy = reinterpret_cast<void*>(vinfoy); GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_weightinfo = reinterpret_cast<void*>(weightinfo); POP_RANGE("MDnew",MDnew,1); if(!this->_done_alloc){ this->_done_alloc = true; // Unlike CPU case, can't pointer copy as going from CPU to GPU // Copy CPU Matrix to GPU. PUSH_RANGE("MDsend",MDsend,1); // GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData double t0 = timer<double>(); cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU double t1 = timer<double>(); cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU cudaMalloc(&_de, (m + n) * sizeof(T)); thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0])); T fill_value=0.0; thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value); if(sharedA>0){ Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } double t2 = timer<double>(); #ifdef DEBUG printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0); printf("Time to copy the data matrix to the GPU : %f\n", t2-t1); #endif POP_RANGE("MDsend",MDsend,1); } } } // like original MatrixDense, but also feed in CPU data for trainY, validX, and validY // Used by elastic_net_ptr.cpp to pass CPU data and put on GPU template <typename T> MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight) : Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C'); _ord = (ord == 'r' || ord == 'R') ? ROW : COL; DEBUG_FPRINTF(stderr,"MatrixDense3: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));fflush(stderr); #ifdef DEBUG // CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, _wDev)); fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr); #endif // source pointer is on CPU // Set GPU specific _info. PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle) GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle) GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); this->_infoy = reinterpret_cast<void*>(infoy); this->_vinfo = reinterpret_cast<void*>(vinfo); this->_vinfoy = reinterpret_cast<void*>(vinfoy); this->_weightinfo = reinterpret_cast<void*>(weightinfo); POP_RANGE("MDnew",MDnew,1); if(!this->_done_alloc){ this->_done_alloc = true; // Unlike CPU case, can't pointer copy even if sharedA!=0 // Copy Matrix to GPU. PUSH_RANGE("MDsend",MDsend,1); // GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData double t0 = timer<double>(); cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU double t1 = timer<double>(); cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU if(infoy->orig_data){ cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU _dopredict=0; } else{ _dopredict=1; } if(vinfo->orig_data){ cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU } else{ if(this->_mvalid>0){ fprintf(stderr,"vinfo->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); } } if(vinfoy->orig_data){ cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU } else{ if(this->_mvalid>0){ fprintf(stderr,"vinfoy->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); } } if(weightinfo->orig_data){ cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU } else{// if no weights, set as unity weights thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0])); T fill_value=1.0; thrust::fill(dev_ptr, dev_ptr + m, fill_value); } cudaMalloc(&_de, (m + n) * sizeof(T)); thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0])); T fill_value=0.0; thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value); if(sharedA>0){ Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } double t2 = timer<double>(); #ifdef DEBUG printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0); printf("Time to copy the data matrix to the GPU : %f\n", t2-t1); #endif POP_RANGE("MDsend",MDsend,1); } } template <typename T> MatrixDense<T>::MatrixDense(int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight) : MatrixDense<T>(0,wDev,wDev,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and source thread=wDev if not given // like original MatrixDense, but also feed in CPU data for trainY, validX, and validY // Used by elastic_net_ptr.cpp to pass CPU data and put on GPU // datatype=0: CPU pointer to data // datatype=1: GPU pointer to data template <typename T> MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight) : Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); DEBUG_FPRINTF(stderr,"%d\n", ord == 'r'); DEBUG_FPRINTF(stderr,"%d\n", ord == 'c'); DEBUG_FPRINTF(stderr,"ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid)); DEBUG_FPRINTF(stderr,"MatrixDense4: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid)); ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C'); _ord = (ord == 'r' || ord == 'R') ? ROW : COL; #ifdef DEBUG // CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, _wDev)); DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); #endif if(datatype==1){ // source pointer is on GPU already // Set GPU specific _info. PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); this->_infoy = reinterpret_cast<void*>(infoy); this->_vinfo = reinterpret_cast<void*>(vinfo); this->_vinfoy = reinterpret_cast<void*>(vinfoy); this->_weightinfo = reinterpret_cast<void*>(weightinfo); POP_RANGE("MDnew",MDnew,1); // Just copy GPU pointer _data = data; _datay = datay; _vdata = vdata; _vdatay = vdatay; _weight = weight; if(_datay) _dopredict=0; else _dopredict=1; if(_weight==NULL){ DEBUG_FPRINTF(stderr,"datatype=1: making up unity weights: %d %p\n",m,&_weight); CUDACHECK(cudaMalloc(&_weight, m * sizeof(T))); // allocate on GPU thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0])); T fill_value=1.0; thrust::fill(dev_ptr, dev_ptr + m, fill_value); } if(!this->_done_alloc){ CUDACHECK(cudaMalloc(&_de, (m + n) * sizeof(T))); CUDACHECK(cudaDeviceSynchronize()); CUDACHECK(cudaGetLastError()); const thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(_de); const T fill_value=0.0; thrust::fill_n(dev_ptr, m + n, fill_value); if(sharedA>0){ Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } this->_done_alloc = true; } } else{ // source pointer is on CPU // Set GPU specific _info. PUSH_RANGE("MDnew",MDnew,1); GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle) GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle) GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle) GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle) this->_info = reinterpret_cast<void*>(info); this->_infoy = reinterpret_cast<void*>(infoy); this->_vinfo = reinterpret_cast<void*>(vinfo); this->_vinfoy = reinterpret_cast<void*>(vinfoy); this->_weightinfo = reinterpret_cast<void*>(weightinfo); POP_RANGE("MDnew",MDnew,1); if(!this->_done_alloc){ this->_done_alloc = true; // Copy CPU Matrix to GPU. PUSH_RANGE("MDsend",MDsend,1); // GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData double t0 = timer<double>(); cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU double t1 = timer<double>(); cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU if(infoy->orig_data){ cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU _dopredict=0; } else{ _dopredict=1; } cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU if(weightinfo->orig_data){ cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU } else{ DEBUG_FPRINTF(stderr,"datatype=0: making up unity weights: %d\n",m); CUDACHECK(cudaMalloc(&_weight, this->_m * sizeof(T))); // allocate on GPU thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(_weight)); T fill_value=1.0; thrust::fill(dev_ptr, dev_ptr + this->_m, fill_value); } cudaMalloc(&_de, (m + n) * sizeof(T)); thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0])); T fill_value=0.0; thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value); if(sharedA>0){ Init(); // does nothing right now Equil(1); // JONTODO: Hack for now. Need to pass equil } double t2 = timer<double>(); #ifdef DEBUG printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0); printf("Time to copy the data matrix to the GPU : %f\n", t2-t1); #endif POP_RANGE("MDsend",MDsend,1); } } } template <typename T> MatrixDense<T>::MatrixDense(int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight) : MatrixDense<T>(0,wDev,wDev,datatype,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and thread=wDev if not given // MatrixDense where input actual A object that contains all CPU information, but need to go from 1 GPU to multiple GPU // Used by elastic_net_ptr.cpp inside openmp loop for each core template <typename T> MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, const MatrixDense<T>& A) : Matrix<T>(A._m, A._n, A._mvalid), _sharedA(sharedA), _me(me), _wDev(wDev), _data(0),_de(0), _ord(A._ord) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); DEBUG_FPRINTF(stderr,"MatrixDense5: ord=%c m=%d n=%d mValid=%d\n",A._ord,A._m,A._n,A._mvalid); PUSH_RANGE("MDnew",MDnew,2); GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info); // cast from void to GpuData GpuData<T> *infoy_A = reinterpret_cast<GpuData<T>*>(A._infoy); // cast from void to GpuData GpuData<T> *vinfo_A = reinterpret_cast<GpuData<T>*>(A._vinfo); // cast from void to GpuData GpuData<T> *vinfoy_A = reinterpret_cast<GpuData<T>*>(A._vinfoy); // cast from void to GpuData GpuData<T> *weightinfo_A = reinterpret_cast<GpuData<T>*>(A._weightinfo); // cast from void to GpuData GpuData<T> *info; GpuData<T> *infoy; GpuData<T> *vinfo; GpuData<T> *vinfoy; GpuData<T> *weightinfo; if(info_A->orig_data) info = new GpuData<T>(info_A->orig_data); // create new GpuData structure with point to CPU data else info = new GpuData<T>(0); // create new GpuData structure with point to CPU data if(infoy_A->orig_data) infoy = new GpuData<T>(infoy_A->orig_data); // create new GpuData structure with point to CPU data else infoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data if(vinfo_A->orig_data) vinfo = new GpuData<T>(vinfo_A->orig_data); // create new GpuData structure with point to CPU data else vinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data if(vinfoy_A->orig_data) vinfoy = new GpuData<T>(vinfoy_A->orig_data); // create new GpuData structure with point to CPU data else vinfoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data if(weightinfo_A->orig_data) weightinfo = new GpuData<T>(weightinfo_A->orig_data); // create new GpuData structure with point to CPU data else weightinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data this->_info = reinterpret_cast<void*>(info); // back to cast as void this->_infoy = reinterpret_cast<void*>(infoy); // back to cast as void this->_vinfo = reinterpret_cast<void*>(vinfo); // back to cast as void this->_vinfoy = reinterpret_cast<void*>(vinfoy); // back to cast as void this->_weightinfo = reinterpret_cast<void*>(weightinfo); // back to cast as void POP_RANGE("MDnew",MDnew,2); if(!this->_done_alloc){ this->_done_alloc = true; if(A._wDev == _wDev && A._me == _me && (A._sharedA==0 || _sharedA==0)){ // if on same device and same thread, just copy pointer DEBUG_FPRINTF(stderr,"ATYPE%d\n",0); _data = A._data; _datay = A._datay; _vdata = A._vdata; _vdatay = A._vdatay; _weight = A._weight; _de = A._de; _dopredict = A._dopredict; // Init(); // this->_done_equil=1; } else if(A._wDev == _wDev && A._sharedA!=0 && _sharedA!=0){ // if on same device and sharing memory, then just copy pointer DEBUG_FPRINTF(stderr,"ATYPE%d\n",1); _data = A._data; _datay = A._datay; _vdata = A._vdata; _vdatay = A._vdatay; _weight = A._weight; _de = A._de; _dopredict = A._dopredict; Init(); this->_done_equil=1; } else{ DEBUG_FPRINTF(stderr,"ATYPE%d\n",2); // Copy Matrix to from source GPU to this GPU PUSH_RANGE("MDcopy",MDcopy,1); //GpuData<T> *info = reinterpret_cast<GpuData<T>*>(_info); // cast void -> GpuData double t0 = timer<double>(); if(A._data) cudaMalloc(&_data, A._m * A._n * sizeof(T)); // allocate on GPU if(A._datay) cudaMalloc(&_datay, A._m * sizeof(T)); // allocate on GPU if(A._vdata) cudaMalloc(&_vdata, A._mvalid * A._n * sizeof(T)); // allocate on GPU if(A._vdatay) cudaMalloc(&_vdatay, A._mvalid * sizeof(T)); // allocate on GPU if(A._weight) cudaMalloc(&_weight, A._m * sizeof(T)); // allocate on GPU double t1 = timer<double>(); if(A._data) cudaMemcpyPeer(_data, _wDev, A._data, A._wDev, A._m * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev if(A._datay){ cudaMemcpyPeer(_datay, _wDev, A._datay, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev _dopredict=0; } else{ _dopredict=1; } if(A._vdata) cudaMemcpyPeer(_vdata, _wDev, A._vdata, A._wDev, A._mvalid * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev if(A._vdatay) cudaMemcpyPeer(_vdatay, _wDev, A._vdatay, A._wDev, A._mvalid * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev if(A._weight) cudaMemcpyPeer(_weight, _wDev, A._weight, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev if(A._de) cudaMalloc(&_de, (A._m + A._n) * sizeof(T)); cudaMemcpyPeer(_de, _wDev, A._de, A._wDev, (A._m + A._n) * sizeof(T)); if(sharedA>0){ Init(); Equil(1); } double t2 = timer<double>(); #ifdef DEBUG printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0); printf("Time to copy the data matrix to the GPU : %f\n", t2-t1); #endif POP_RANGE("MDcopy",MDcopy,1); } } } template <typename T> MatrixDense<T>::MatrixDense(int me, int wDev, const MatrixDense<T>& A) : MatrixDense<T>(0, me, wDev, A){} // then assume not sharing memory template <typename T> MatrixDense<T>::MatrixDense(int wDev, const MatrixDense<T>& A) : MatrixDense<T>(wDev, wDev, A){} // then assume thread=wDev for the new matrix (i.e. not input A) template <typename T> MatrixDense<T>::MatrixDense(const MatrixDense<T>& A) : MatrixDense<T>(A._wDev, A){} // then assume same device as input A template <typename T> MatrixDense<T>::~MatrixDense() { // return;//TODO: Some deconstructor issue FIXME. Segfaults after adding weights. Can't find issue. checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); if(1){ GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); GpuData<T> *infoy = reinterpret_cast<GpuData<T>*>(this->_infoy); GpuData<T> *vinfo = reinterpret_cast<GpuData<T>*>(this->_vinfo); GpuData<T> *vinfoy = reinterpret_cast<GpuData<T>*>(this->_vinfoy); GpuData<T> *weightinfo = reinterpret_cast<GpuData<T>*>(this->_weightinfo); if(info) delete info; this->_info = 0; if(infoy) delete infoy; this->_infoy = 0; if(vinfo) delete vinfo; this->_vinfo = 0; if(vinfoy) delete vinfoy; this->_vinfoy = 0; if(weightinfo) delete weightinfo; this->_weightinfo = 0; } // fprintf(stderr,"HERE1\n"); fflush(stderr); if(0){ // Note that this frees these pointers as soon as MatrixDense constructor goes out of scope, // and might want more fine-grained control over GPU memory if inside (say) high-level python API // If 0 is used, then need to ensure user calls a finish() or something to free memory. If 0, also // allows user to call (say) fit() or fitptr() multiple times if (this->_done_init && _data) { // fprintf(stderr,"Freeing _data: %p\n",(void*)_data); fflush(stderr); cudaFree(_data); this->_data = 0; DEBUG_CUDA_CHECK_ERR(); } // fprintf(stderr,"HERE2\n"); fflush(stderr); if (this->_done_init && _datay) { // fprintf(stderr,"Freeing _datay: %p\n",(void*)_datay); fflush(stderr); cudaFree(_datay); this->_datay = 0; DEBUG_CUDA_CHECK_ERR(); } // fprintf(stderr,"HERE3\n"); fflush(stderr); if (this->_done_init && _vdata) { // fprintf(stderr,"Freeing _vdata: %p\n",(void*)_vdata); fflush(stderr); cudaFree(_vdata); this->_vdata = 0; DEBUG_CUDA_CHECK_ERR(); } // fprintf(stderr,"HERE4\n"); fflush(stderr); if (this->_done_init && _vdatay) { // fprintf(stderr,"Freeing _vdatay: %p\n",(void*)_vdatay); fflush(stderr); cudaFree(_vdatay); this->_vdatay = 0; DEBUG_CUDA_CHECK_ERR(); } // fprintf(stderr,"HERE5\n"); fflush(stderr); if (this->_done_init && _weight) { // fprintf(stderr,"Freeing _weight: %p\n",(void*)_weight); fflush(stderr); cudaFree(_weight); this->_weight = 0; DEBUG_CUDA_CHECK_ERR(); } } // Risky, but looks like we don't use code that may become broken // fprintf(stderr,"HERE6\n"); fflush(stderr); if(this->_done_init && _de && !_sharedA){ // JONTODO: When sharedA=1, only free on sourceme thread and sourcewDev device (can store sourcethread for-- sourceme -- data and only free if on source thread) // fprintf(stderr,"Freeing _de: %p\n",(void*)_weight); fflush(stderr); cudaFree(_de); this->_de=0; DEBUG_CUDA_CHECK_ERR(); } } template <typename T> int MatrixDense<T>::Init() { DEBUG_EXPECT(!this->_done_init); if (this->_done_init) return 1; this->_done_init = true; CUDACHECK(cudaSetDevice(_wDev)); PUSH_RANGE("MDinit",MDinit,1); POP_RANGE("MDinit",MDinit,1); DEBUG_CUDA_CHECK_ERR(); return 0; } template <typename T> int MatrixDense<T>::GetTrainX(int datatype, size_t size, T**data) const { CUDACHECK(cudaSetDevice(_wDev)); if(_data){ if(datatype==1){ cudaMemcpy(*data, _data, size* sizeof(T),cudaMemcpyDeviceToHost); CUDA_CHECK_ERR(); } else{ std::memcpy(*data, _data, size * sizeof(T)); } return(0); } else return(1); } template <typename T> int MatrixDense<T>::GetTrainY(int datatype, size_t size, T**data) const { CUDACHECK(cudaSetDevice(_wDev)); if(_datay){ if(datatype==1){ cudaMemcpy(*data, _datay, size* sizeof(T),cudaMemcpyDeviceToHost); CUDA_CHECK_ERR(); } else{ std::memcpy(*data, _datay, size * sizeof(T)); } return(0); } else return(1); } template <typename T> int MatrixDense<T>::GetValidX(int datatype, size_t size, T**data) const { CUDACHECK(cudaSetDevice(_wDev)); if(_vdata){ if(datatype==1){ cudaMemcpy(*data, _vdata, size* sizeof(T),cudaMemcpyDeviceToHost); CUDA_CHECK_ERR(); } else{ std::memcpy(*data, _vdata, size * sizeof(T)); } return(0); } else return(1); } template <typename T> int MatrixDense<T>::GetValidY(int datatype, size_t size, T**data) const { CUDACHECK(cudaSetDevice(_wDev)); if(_vdatay){ if(datatype==1){ cudaMemcpy(*data, _vdatay, size* sizeof(T),cudaMemcpyDeviceToHost); CUDA_CHECK_ERR(); } else{ std::memcpy(*data, _vdatay, size * sizeof(T)); } return(0); } else return(1); } template <typename T> int MatrixDense<T>::GetWeight(int datatype, size_t size, T**data) const { CUDACHECK(cudaSetDevice(_wDev)); if(_weight){ if(datatype==1){ cudaMemcpy(*data, _weight, size* sizeof(T),cudaMemcpyDeviceToHost); CUDA_CHECK_ERR(); } else{ std::memcpy(*data, _weight, size * sizeof(T)); } return(0); } else return(1); } template <typename T> int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const { DEBUG_EXPECT(this->_done_init); if (!this->_done_init) return 1; CUDACHECK(cudaSetDevice(_wDev)); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n); cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m); // Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix // _data is A on GPU //https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG if (_ord == ROW) { cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec); } else { cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec); } CUDA_CHECK_ERR(); return 0; } template <typename T> int MatrixDense<T>::Mulvalid(char trans, T alpha, const T *x, T beta, T *y) const { DEBUG_EXPECT(this->_done_init); if (!this->_done_init) return 1; CUDACHECK(cudaSetDevice(_wDev)); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n); cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_mvalid); // Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix // _vdata is A on GPU //https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG if (_ord == ROW) { cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_vdata, this->_mvalid, this->_n); cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec); } else { cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_vdata, this->_mvalid, this->_n); cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec); } CUDA_CHECK_ERR(); return 0; } // col-major order (fortran) A, but still print as row major template <typename T> void printMatrix(int m, int n, const T*A, int lda, const char* name) { printf("rows=%d cols=%d lda=%d\n",m,n,lda); for(int row = 0 ; row < m ; row++){ for(int col = 0 ; col < n ; col++){ T Areg = A[row + col*lda]; printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg); } } } // row-major order (c) A printed as row major template <typename T> void printMatrix2(int m, int n, const T*A, int lda, const char* name) { printf("rows=%d cols=%d lda=%d\n",m,n,lda); for(int row = 0 ; row < m ; row++){ for(int col = 0 ; col < n ; col++){ T Areg = A[col + row*n]; printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg); } } } /* * How to compile (assume cuda is installed at /usr/local/cuda/) * nvcc -c -I/usr/local/cuda/include svd_example.cpp * g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver * */ inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *work, int lwork, float *rwork, int *devInfo){ return(cusolverDnSgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo)); } inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *work, int lwork, double *rwork, int *devInfo){ return(cusolverDnDgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo)); } inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc){ return(cublasSgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)); } inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc){ return(cublasDgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc)); } inline cublasStatus_t cublasdgmm(cublasHandle_t handle, cublasSideMode_t mode, int m, int n, const float *A, int lda, const float *x, int incx, float *C, int ldc){ return(cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc)); } inline cublasStatus_t cublasdgmm(cublasHandle_t handle, cublasSideMode_t mode, int m, int n, const double *A, int lda, const double *x, int incx, double *C, int ldc){ return(cublasDdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc)); } inline cublasStatus_t cublasnrm2(cublasHandle_t handle, int n, const double *x, int incx, double *result){ return(cublasDnrm2_v2(handle, n, x, incx, result)); } inline cublasStatus_t cublasnrm2(cublasHandle_t handle, int n, const float *x, int incx, float *result){ return(cublasSnrm2_v2(handle, n, x, incx, result)); } // // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements // // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes // // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS #define TILE_DIM 16 #define BLOCK_ROWS 16 // __global__ void transposeNaive(float *odata, float* idata, // int width, int height) // { // int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; // int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; // int index_in = xIndex + width * yIndex; // int index_out = yIndex + height * xIndex; // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // odata[index_out+i] = idata[index_in+i*width]; // } // } // __global__ void transposeNaive(double *odata, double* idata, // int width, int height) // { // int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; // int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; // int index_in = xIndex + width * yIndex; // int index_out = yIndex + height * xIndex; // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // odata[index_out+i] = idata[index_in+i*width]; // } // } // __global__ void transposeCoalesced(float *odata, // float *idata, int width, int height) // { // __shared__ float tile[TILE_DIM][TILE_DIM]; // int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; // int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; // int index_in = xIndex + (yIndex)*width; // xIndex = blockIdx.y * TILE_DIM + threadIdx.x; // yIndex = blockIdx.x * TILE_DIM + threadIdx.y; // int index_out = xIndex + (yIndex)*height; // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // tile[threadIdx.y+i][threadIdx.x] = // idata[index_in+i*width]; // } // __syncthreads(); // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // odata[index_out+i*height] = // tile[threadIdx.x][threadIdx.y+i]; // } // } // __global__ void transposeCoalesced(double *odata, // double *idata, int width, int height) // { // __shared__ double tile[TILE_DIM][TILE_DIM]; // int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; // int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; // int index_in = xIndex + (yIndex)*width; // xIndex = blockIdx.y * TILE_DIM + threadIdx.x; // yIndex = blockIdx.x * TILE_DIM + threadIdx.y; // int index_out = xIndex + (yIndex)*height; // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // tile[threadIdx.y+i][threadIdx.x] = // idata[index_in+i*width]; // } // __syncthreads(); // for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { // odata[index_out+i*height] = // tile[threadIdx.x][threadIdx.y+i]; // } // } // in-place transpose for row-major matrix on device of A[m][n] void cudaintranspose(float *odata, float *idata, int m, int n){ cudaError_t cudaStat1 = cudaSuccess; cudaStat1 = cudaMemcpy(odata, idata, sizeof(float)*m*n, cudaMemcpyDeviceToDevice); assert(cudaSuccess == cudaStat1); float const alpha(1.0); float const beta(0.0); cublasHandle_t handle; cublasCreate(&handle); cublasSgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m ); cublasDestroy(handle); } void cudaintranspose(double *odata, double *idata, int m, int n){ cudaError_t cudaStat1 = cudaSuccess; cudaStat1 = cudaMemcpy(odata, idata, sizeof(double)*m*n, cudaMemcpyDeviceToDevice); assert(cudaSuccess == cudaStat1); double const alpha(1.0); double const beta(0.0); cublasHandle_t handle; cublasCreate(&handle); cublasDgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m ); cublasDestroy(handle); } #define MIN(a,b) ((a)<(b) ? (a) : (b)) template <typename T> int MatrixDense<T>::svd1(void) { fprintf(stderr,"begin svd inside0\n"); fflush(stderr); fflush(stdout); DEBUG_ASSERT(this->_done_init); if (!this->_done_init) Init(); fprintf(stderr,"begin svd inside\n"); fflush(stderr); fflush(stdout); cusolverDnHandle_t cusolverH = NULL; cublasHandle_t cublasH = NULL; cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; cudaError_t cudaStat1 = cudaSuccess; cudaError_t cudaStat2 = cudaSuccess; cudaError_t cudaStat3 = cudaSuccess; cudaError_t cudaStat4 = cudaSuccess; cudaError_t cudaStat5 = cudaSuccess; cudaError_t cudaStat6 = cudaSuccess; int m = this->_m; int n = this->_n; // const int m = this->_m; // const int n = this->_n; int lda = m; /* | 1 2 | * A = | 4 5 | * | 2 1 | */ unsigned char ord='r'; // TODO; should be inputted // original device vector T *d_A0; d_A0 = this->_data; // device vectors T *d_A = NULL; T *d_S = NULL; T *d_U = NULL; T *d_VT = NULL; int *devInfo = NULL; T *d_work = NULL; T *d_rwork = NULL; T *d_W = NULL; // W = S*VT int lwork = 0; int info_gpu = 0; const T h_one = 1; const T h_minus_one = -1; double t0 = timer<double>(); // step 1: create cusolverDn/cublas handle cusolver_status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); cublas_status = cublasCreate(&cublasH); assert(CUBLAS_STATUS_SUCCESS == cublas_status); fprintf(stderr,"HERE1\n"); fflush(stderr); fflush(stdout); // step 2: copy A to device // cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n); // svd destroys d_A, so make copy for testing error // OPTMARK cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaDeviceSynchronize(); assert(cudaSuccess == cudaStat1); int ldu=m; //lda; int ldureal=n; // actual storage int ldvt=n; if(ord=='r'){ // transpose // execution configuration parameters //dim3 grid(n/TILE_DIM, lda/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); // transposeCoalesced<<<grid, threads>>>(d_A, d_A0, n, lda); // transposeNaive<<<grid, threads>>>(d_A, d_A0, n, lda); cudaintranspose(d_A,d_A0,m,n); // OPTMARK cudaStat1 = cudaDeviceSynchronize(); assert(cudaSuccess == cudaStat1); // below debug only for printMatrix2 to view, shouldn't actually swap for use. if(0){ int temp=m; m=n; n=temp; lda=m; ldu=m; //lda; ldureal=n; // actual storage ldvt=n; } } else{ d_A = d_A0; } fprintf(stderr,"HERE PRE\n"); fflush(stderr); fflush(stdout); // old host side vectors // T A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0}; // GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(this->_info); // cast from void to GpuData // T *A = const_cast<T*>(info_A->orig_data); #if(0) T A[lda*n]; // for debug T U[ldureal*m]; // m-by-m unitary matrix T VT[ldvt*n]; // n-by-n unitary matrix T S[MIN(n,m)]; // singular value #endif // T S_exact[n] = {7.065283497082729, 1.040081297712078}; fprintf(stderr,"HERE POST\n"); fflush(stderr); fflush(stdout); // now d_A has column-major order matrix fprintf(stderr,"HERE2\n"); fflush(stderr); fflush(stdout); #if(0) // debug cudaStat1 = cudaMemcpy(A, d_A, sizeof(T)*lda*n, cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); cudaStat1 = cudaDeviceSynchronize(); assert(cudaSuccess == cudaStat1); printf("A = (matlab base-1)\n"); printMatrix(m, n, A, lda, "A"); printf("=====\n"); printf("A = (matlab base-1)\n"); printMatrix2(m, n, A, lda, "A"); printf("=====\n"); #endif fprintf(stderr,"HERE3\n"); fflush(stderr); fflush(stdout); cudaStat2 = cudaMalloc ((void**)&d_S , sizeof(T)*MIN(n,m)); cudaStat3 = cudaMalloc ((void**)&d_U , sizeof(T)*ldureal*m); cudaStat4 = cudaMalloc ((void**)&d_VT , sizeof(T)*ldvt*n); cudaStat5 = cudaMalloc ((void**)&devInfo, sizeof(int)); cudaStat6 = cudaMalloc ((void**)&d_W , sizeof(T)*lda*n); // assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); assert(cudaSuccess == cudaStat5); assert(cudaSuccess == cudaStat6); // host->device // cudaStat1 = cudaMemcpy(d_A, A, sizeof(T)*lda*n, cudaMemcpyHostToDevice); // assert(cudaSuccess == cudaStat1); // step 3: query working space of SVD //The dense matrices are assumed to be stored in column-major order in memory. cusolver_status = cusolverDnDgesvd_bufferSize( cusolverH, m, n, &lwork ); assert (cusolver_status == CUSOLVER_STATUS_SUCCESS); cudaStat1 = cudaMalloc((void**)&d_work , sizeof(T)*lwork); assert(cudaSuccess == cudaStat1); double t1 = timer<double>(); fprintf(stderr,"SVD init: %g\n",t1-t0); fflush(stderr); fflush(stdout); // step 4: compute SVD double t0c = timer<double>(); signed char jobu = 'A'; // all m columns of U signed char jobvt = 'A'; // all n columns of VT cusolver_status = cusolverDngesvd( cusolverH, jobu, jobvt, m, n, d_A, lda, d_S, d_U, ldu, d_VT, ldvt, d_work, lwork, d_rwork, devInfo); cudaStat4 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost); printf("after gesvd: info_gpu = %d\n", info_gpu); fflush(stdout); assert(0 == info_gpu); printf("=====\n"); fflush(stdout); cudaStat1 = cudaDeviceSynchronize(); assert(cudaSuccess == cudaStat1); fprintf(stderr,"BAD: %d\n",cusolver_status); fflush(stderr); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); double t1c = timer<double>(); fprintf(stderr,"SVD compute: %g\n",t1-t0); fflush(stderr); fflush(stdout); #if(0) ///////////////////////// // Copy solution device->host double t0h = timer<double>(); cudaStat1 = cudaMemcpy(U , d_U , sizeof(T)*ldureal*m, cudaMemcpyDeviceToHost); cudaStat2 = cudaMemcpy(VT, d_VT, sizeof(T)*ldvt*n, cudaMemcpyDeviceToHost); cudaStat3 = cudaMemcpy(S , d_S , sizeof(T)*MIN(n,m), cudaMemcpyDeviceToHost); assert(cudaSuccess == cudaStat1); assert(cudaSuccess == cudaStat2); assert(cudaSuccess == cudaStat3); assert(cudaSuccess == cudaStat4); if(0){ // debug printf("S = (matlab base-1)\n"); printMatrix(n, 1, S, lda, "S"); printf("=====\n"); printf("U = (matlab base-1)\n"); printMatrix(m, m, U, ldureal, "U"); printf("=====\n"); printf("VT = (matlab base-1)\n"); printMatrix(n, n, VT, ldvt, "VT"); printf("=====\n"); ///////////////////////// // measure error of singular value // T ds_sup = 0; // for(int j = 0; j < n; j++){ // T err = fabs( S[j] - S_exact[j] ); // ds_sup = (ds_sup > err)? ds_sup : err; // } // printf("|S - S_exact| = %E \n", ds_sup); } double t1h = timer<double>(); fprintf(stderr,"SVD back to host: %g\n",t1h-t0h); fflush(stderr); fflush(stdout); #endif ///////////////////////// // now check double t0c1 = timer<double>(); // step 5: |A - U*S*VT| // W = S*VT cublas_status = cublasdgmm( cublasH, CUBLAS_SIDE_LEFT, n, n, d_VT, ldvt, d_S, 1, d_W, lda); assert(CUBLAS_STATUS_SUCCESS == cublas_status); double t1c1 = timer<double>(); fprintf(stderr,"SVD check1: %g\n",t1c1-t0c1); fflush(stderr); fflush(stdout); // A := -U*W + A double t0c2 = timer<double>(); cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice); // copy because original d_A was destroyed assert(cudaSuccess == cudaStat1); cublas_status = cublasgemm( cublasH, CUBLAS_OP_N, // U CUBLAS_OP_N, // W m, // number of rows of A n, // number of columns of A n, // number of columns of U &h_minus_one, /* host pointer */ d_U, // U ldu, d_W, // W lda, &h_one, /* hostpointer */ d_A, lda); assert(CUBLAS_STATUS_SUCCESS == cublas_status); double t1c2 = timer<double>(); fprintf(stderr,"SVD check2: %g\n",t1c2-t0c2); fflush(stderr); fflush(stdout); double t0c3 = timer<double>(); T dR_fro = 0.0; cublas_status = cublasnrm2( cublasH, lda*n, d_A, 1, &dR_fro); assert(CUBLAS_STATUS_SUCCESS == cublas_status); printf("|A - U*S*VT| = %E \n", dR_fro); fflush(stdout); double t1c3 = timer<double>(); fprintf(stderr,"SVD check3: %g\n",t1c3-t0c3); fflush(stderr); fflush(stdout); // free resources double t0f = timer<double>(); //if (d_A ) cudaFree(d_A); if (d_S ) cudaFree(d_S); if (d_U ) cudaFree(d_U); if (d_VT ) cudaFree(d_VT); if (devInfo) cudaFree(devInfo); if (d_work ) cudaFree(d_work); if (d_rwork) cudaFree(d_rwork); if (d_W ) cudaFree(d_W); if (cublasH ) cublasDestroy(cublasH); if (cusolverH) cusolverDnDestroy(cusolverH); // cudaDeviceReset(); double t1f = timer<double>(); fprintf(stderr,"SVD free: %g\n",t1f-t0f); fflush(stderr); fflush(stdout); fprintf(stderr,"end svd inside\n"); fflush(stderr); fflush(stdout); return 0; } // Equilibration (precondition) matrix using Sinkhorn Knopp method wrapped to allow any norm // See https://arxiv.org/pdf/1610.03871.pdf for more information template <typename T> int MatrixDense<T>::Equil(bool equillocal) { DEBUG_ASSERT(this->_done_init); if (!this->_done_init) return 1; if (this->_done_equil) return 0; else this->_done_equil=1; CUDACHECK(cudaSetDevice(_wDev)); // Extract cublas handle from _info. GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; T *d = _de; T *e = d + this->_m; // Number of elements in matrix. size_t num_el = this->_m * this->_n; // Create bit-vector with signs of entries in A and then let A = f(A), // where f = |A| or f = |A|.^2. unsigned char *sign = NULL; size_t num_sign_bytes = (num_el + 7) / 8; cudaMalloc(&sign, num_sign_bytes); CUDA_CHECK_ERR(); size_t num_chars = num_el / 8; size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize); if(equillocal){ // Fill sign bits, assigning each thread a multiple of 8 elements. if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) { __SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars, SquareF<T>()); } else { __SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars, AbsF<T>()); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); // If numel(A) is not a multiple of 8, then we need to set the last couple // of sign bits too. if (num_el > num_chars * 8) { if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) { __SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars, num_el - num_chars * 8, SquareF<T>()); } else { __SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars, num_el - num_chars * 8, AbsF<T>()); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); } } // Perform Sinkhorn-Knopp equilibration to obtain a doubly stochastic matrix. SinkhornKnopp(this, d, e, equillocal); wrapcudaDeviceSynchronize(); if(equillocal){ // Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed, // or A = sign(A) .* A if the 1-norm was equilibrated. if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) { __UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars, SqrtF<T>()); } else { __UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars, IdentityF<T>()); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); // Deal with last few entries if num_el is not a multiple of 8. if (num_el > num_chars * 8) { if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) { __UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars, num_el - num_chars * 8, SqrtF<T>()); } else { __UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars, num_el - num_chars * 8, IdentityF<T>()); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); } } // Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated. if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) { thrust::transform(thrust::device_pointer_cast(d), thrust::device_pointer_cast(d + this->_m), thrust::device_pointer_cast(d), SqrtF<T>()); thrust::transform(thrust::device_pointer_cast(e), thrust::device_pointer_cast(e + this->_n), thrust::device_pointer_cast(e), SqrtF<T>()); wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); } // Compute A := D * A * E. MultDiag(d, e, this->_m, this->_n, _ord, _data); wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); // Scale A to have norm of 1 (in the kNormNormalize norm). T normA = NormEst(hdl, kNormNormalize, *this); CUDA_CHECK_ERR(); wrapcudaDeviceSynchronize(); cml::vector<T> a_vec = cml::vector_view_array(_data, num_el); cml::vector_scale(&a_vec, 1 / normA); wrapcudaDeviceSynchronize(); // Scale d and e to account for normalization of A. cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m); cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n); cml::vector_scale(&d_vec, 1 / sqrt(normA)); cml::vector_scale(&e_vec, 1 / sqrt(normA)); wrapcudaDeviceSynchronize(); DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA, cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec)); cudaFree(sign); CUDA_CHECK_ERR(); return 0; } // This example computes several statistical properties of a data // series in a single reduction. The algorithm is described in detail here: // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm // // Thanks to Joseph Rhoads for contributing this example // structure used to accumulate the moments and other // statistical properties encountered so far. template <typename T> struct summary_stats_data { T n; T min; T max; T mean; T M2; T M3; T M4; // initialize to the identity element void initialize() { n = mean = M2 = M3 = M4 = 0; min = std::numeric_limits<T>::max(); max = std::numeric_limits<T>::min(); } T variance() { return M2 / (n - 1); } T variance_n() { return M2 / n; } T skewness() { return std::sqrt(n) * M3 / std::pow(M2, (T) 1.5); } T kurtosis() { return n * M4 / (M2 * M2); } }; // stats_unary_op is a functor that takes in a value x and // returns a variace_data whose mean value is initialized to x. template <typename T> struct summary_stats_unary_op { __host__ __device__ summary_stats_data<T> operator()(const T& x) const { summary_stats_data<T> result; result.n = 1; result.min = x; result.max = x; result.mean = x; result.M2 = 0; result.M3 = 0; result.M4 = 0; return result; } }; // summary_stats_binary_op is a functor that accepts two summary_stats_data // structs and returns a new summary_stats_data which are an // approximation to the summary_stats for // all values that have been agregated so far template <typename T> struct summary_stats_binary_op : public thrust::binary_function<const summary_stats_data<T>&, const summary_stats_data<T>&, summary_stats_data<T> > { __host__ __device__ summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const { summary_stats_data<T> result; // precompute some common subexpressions T n = x.n + y.n; T n2 = n * n; T n3 = n2 * n; T delta = y.mean - x.mean; T delta2 = delta * delta; T delta3 = delta2 * delta; T delta4 = delta3 * delta; //Basic number of samples (n), min, and max result.n = n; result.min = thrust::min(x.min, y.min); result.max = thrust::max(x.max, y.max); result.mean = x.mean + delta * y.n / n; result.M2 = x.M2 + y.M2; result.M2 += delta2 * x.n * y.n / n; result.M3 = x.M3 + y.M3; result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2; result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n; result.M4 = x.M4 + y.M4; result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3; result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2; result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n; return result; } }; template <typename Iterator> void print_range(const std::string& name, Iterator first, Iterator last) { typedef typename std::iterator_traits<Iterator>::value_type T; std::cout << name << ": "; thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " ")); std::cout << "\n"; } template<typename T> struct absolute_value : public thrust::unary_function<T,T> { __host__ __device__ T operator()(const T &x) const { return x < T(0) ? -x : x; } }; // --- Operator for testing nan values template<typename T> struct isnan_test { __host__ __device__ bool operator()(const T a) const { return isnan(a) || isinf(a); } }; // check properties of input data template <typename T> int MatrixDense<T>::Stats(int intercept, T *min, T *max, T *mean, T *var, T *sd, T *skew, T *kurt, T &lambda_max0) { CUDACHECK(cudaSetDevice(_wDev)); if(_data!=NULL) {// check for nan or inf in data thrust::device_ptr<T> begin = thrust::device_pointer_cast(_data); thrust::device_ptr<T> end = thrust::device_pointer_cast(_data+this->_m*this->_n); bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>()); if(h_result==true){ fprintf(stderr,"Data matrix (trainX) has nan/inf or missing was not encoded\n"); fflush(stderr); exit(1); } } if(_datay!=NULL) {// check for nan or inf in data thrust::device_ptr<T> begin = thrust::device_pointer_cast(_datay); thrust::device_ptr<T> end = thrust::device_pointer_cast(_datay+this->_m); bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>()); if(h_result==true){ fprintf(stderr,"Data training predictions/labels (trainY) has nan/inf or missing was not encoded\n"); fflush(stderr); exit(1); } } if(_vdata!=NULL) {// check for nan or inf in data thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdata); thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdata+this->_mvalid*this->_n); bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>()); if(h_result==true){ fprintf(stderr,"Validation Data matrix (validX) has nan/inf or missing was not encoded\n"); fflush(stderr); exit(1); } } if(_vdatay!=NULL) {// check for nan or inf in data thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdatay); thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdatay+this->_mvalid); bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>()); if(h_result==true){ fprintf(stderr,"Validation Data training predictions/labels (validY) has nan/inf or missing was not encoded\n"); fflush(stderr); exit(1); } } if(_weight!=NULL) {// check for nan or inf in data thrust::device_ptr<T> begin = thrust::device_pointer_cast(_weight); thrust::device_ptr<T> end = thrust::device_pointer_cast(_weight+this->_m); bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>()); if(h_result==true){ fprintf(stderr,"Weight Training Data has nan/inf or missing was not encoded\n"); fflush(stderr); exit(1); } } // nothing else to do if _datay==NULL if(_datay==NULL) return(0); // setup arguments summary_stats_unary_op<T> unary_op; summary_stats_binary_op<T> binary_op; summary_stats_data<T> init; init.initialize(); int len=0; // cast GPU pointer as thrust pointer thrust::device_ptr<T> dataybegin=thrust::device_pointer_cast(_datay); len=this->_m; thrust::device_ptr<T> datayend=thrust::device_pointer_cast(_datay+len); // compute summary statistics summary_stats_data<T> resulty = thrust::transform_reduce(dataybegin, datayend, unary_op, init, binary_op); min[0]=resulty.min; max[0]=resulty.max; mean[0]=resulty.mean; var[0]=resulty.variance(); sd[0]=std::sqrt(resulty.variance_n()); skew[0]=resulty.skewness(); kurt[0]=resulty.kurtosis(); #ifdef DEBUG std::cout <<"******Summary Statistics of Response Train*****"<<std::endl; // print_range("The data", dataybegin, datayend); std::cout <<"Count : "<< resulty.n << std::endl; std::cout <<"Minimum : "<< min[0]<<std::endl; std::cout <<"Maximum : "<< max[0]<<std::endl; std::cout <<"Mean : "<< mean[0]<< std::endl; std::cout <<"Variance : "<< var[0]<< std::endl; std::cout <<"Standard Deviation : "<< sd[0]<< std::endl; std::cout <<"Skewness : "<< skew[0]<< std::endl; std::cout <<"Kurtosis : "<< kurt[0]<< std::endl; #endif // cast GPU pointer as thrust pointer thrust::device_ptr<T> vdataybegin=thrust::device_pointer_cast(_vdatay); len=this->_mvalid; thrust::device_ptr<T> vdatayend=thrust::device_pointer_cast(_vdatay+len); // compute summary statistics summary_stats_data<T> vresulty = thrust::transform_reduce(vdataybegin, vdatayend, unary_op, init, binary_op); min[1]=vresulty.min; max[1]=vresulty.max; mean[1]=vresulty.mean; var[1]=vresulty.variance(); sd[1]=std::sqrt(vresulty.variance_n()); skew[1]=vresulty.skewness(); kurt[1]=vresulty.kurtosis(); #ifdef DEBUG std::cout <<"******Summary Statistics of Response Valid*****"<<std::endl; // print_range("The data", vdataybegin, vdatayend); std::cout <<"Count : "<< vresulty.n << std::endl; std::cout <<"Minimum : "<< min[1]<<std::endl; std::cout <<"Maximum : "<< max[1]<<std::endl; std::cout <<"Mean : "<< mean[1]<< std::endl; std::cout <<"Variance : "<< var[1]<< std::endl; std::cout <<"Standard Deviation : "<< sd[1]<< std::endl; std::cout <<"Skewness : "<< skew[1]<< std::endl; std::cout <<"Kurtosis : "<< kurt[1]<< std::endl; #endif if(1){ // normal usage // Get Cublas handle GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; // Set up views for raw vectors. cml::vector<T> y_vec = cml::vector_view_array(_datay, this->_m); // b cml::vector<T> weight_vec; auto free_weight_vec = false; if(_weight) weight_vec = cml::vector_view_array(_weight, this->_m); // weight else{ weight_vec = cml::vector_calloc<T>(this->_m); // weight make up free_weight_vec = true; cml::vector_add_constant(&weight_vec, static_cast<T>(1.0)); // make unity weights } cml::vector<T> ytemp = cml::vector_calloc<T>(this->_m); // b cml::vector<T> xtemp = cml::vector_calloc<T>(this->_n); // x cml::vector_memcpy(&ytemp, &y_vec); // y_vec->ytemp cml::vector_add_constant(&ytemp, -static_cast<T>(intercept)*mean[0]); // ytemp -> ytemp - intercept*mean[0] cml::vector_mul(&ytemp,&weight_vec); // ytemp*weight -> ytemp // Compute A^T . b if (_ord == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); // just view cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp } else{ const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); // just view cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp } thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(&xtemp.data[0]); lambda_max0 = thrust::transform_reduce(thrust::device, dev_ptr, dev_ptr + this->_n-intercept, absolute_value<T>(), static_cast<T>(0.0), thrust::maximum<T>()); cml::vector_free(&ytemp); cml::vector_free(&xtemp); if(free_weight_vec) cml::vector_free(&weight_vec); } else{ lambda_max0 = 7000; // test } CUDA_CHECK_ERR(); return 0; } //////////////////////////////////////////////////////////////////////////////// /////////////////////// Equilibration Helpers ////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// namespace { // Estimates norm of A. norm_type should either be kNorm2 or kNormFro. template <typename T> T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) { switch (norm_type) { case kNorm2: { return Norm2Est(hdl, &A); } case kNormFro: { const cml::vector<T> a = cml::vector_view_array(A.Data(), A.Rows() * A.Cols()); return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols())); } case kNorm1: // 1-norm normalization doens't make make sense since it treats rows and // columns differently. default: ASSERT(false); return static_cast<T>(0.); } } // Performs A := D * A * E for A in row major template <typename T> void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x) data[t] *= d[t / n] * e[t % n]; } // Performs A := D * A * E for A in col major template <typename T> void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x) data[t] *= d[t % m] * e[t / m]; } template <typename T> void MultDiag(const T *d, const T *e, size_t m, size_t n, typename MatrixDense<T>::Ord ord, T *data) { if (ord == MatrixDense<T>::ROW) { size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize); __MultRow<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data); } else { size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize); __MultCol<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data); } } } // namespace // Explicit template instantiation. #if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1 template class MatrixDense<double>; #endif #if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1 template class MatrixDense<float>; #endif // upload data function. Uploads to a single GPU. // mimics otherwise similar MatrixDense constructor, but has no destruction of uploaded data pointers template <typename T> int makePtr_dense(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight, T **_data, T **_datay, T **_vdata, T **_vdatay, T **_weight){ checkwDev(wDev); CUDACHECK(cudaSetDevice(wDev)); DEBUG_FPRINTF(stderr,"makePtr_dense: %d\n",0); #ifdef DEBUG // CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, wDev)); DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); #endif // Copy Matrix to GPU (unlike CPU case, cannot copy just pointer because always assume input is CPU and output is GPU) double t0 = timer<double>(); PUSH_RANGE("MDsendsource",MDsendsource,1); if(data){ CUDACHECK(cudaMalloc(_data, m * n * sizeof(T))); // allocate on GPU CUDACHECK(cudaMemcpy(*_data, data, m * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU // fprintf(stderr,"_data: %p\n",(void*)*_data); fflush(stderr); } else *_data=NULL; if(datay){ CUDACHECK(cudaMalloc(_datay, m * sizeof(T))); // allocate on GPU CUDACHECK(cudaMemcpy(*_datay, datay, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU // fprintf(stderr,"_datay: %p\n",(void*)*_datay); fflush(stderr); } else *_datay=NULL; if(vdata){ CUDACHECK(cudaMalloc(_vdata, mValid * n * sizeof(T))); // allocate on GPU CUDACHECK(cudaMemcpy(*_vdata, vdata, mValid * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU // fprintf(stderr,"_vdata: %p\n",(void*)*_vdata); fflush(stderr); } else *_vdata=NULL; if(vdatay){ CUDACHECK(cudaMalloc(_vdatay, mValid * sizeof(T))); // allocate on GPU CUDACHECK(cudaMemcpy(*_vdatay, vdatay, mValid * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU // fprintf(stderr,"_vdatay: %p\n",(void*)*_vdatay); fflush(stderr); } else *_vdatay=NULL; // fprintf(stderr,"weight=%p\n",weight); fflush(stderr); if(weight){ CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU CUDACHECK(cudaMemcpy(*_weight, weight, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU } else{ DEBUG_FPRINTF(stderr,"making up unity weights: %d\n",m); CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(*_weight)); T fill_value=1.0; thrust::fill(dev_ptr, dev_ptr + m, fill_value); // fprintf(stderr,"_weight: %p\n",(void*)*_weight); fflush(stderr); } POP_RANGE("MDsendsource",MDsendsource,1); double t2 = timer<double>(); DEBUG_FPRINTF(stdout,"Time to allocate and copy the data matrix on the GPU: %f\n", t2-t0); cudaDeviceSynchronize(); DEBUG_FPRINTF(stderr,"pointer data %p\n",(void*)*_data); DEBUG_FPRINTF(stderr,"pointer datay %p\n",(void*)*_datay); DEBUG_FPRINTF(stderr,"pointer vdata %p\n",(void*)*_vdata); DEBUG_FPRINTF(stderr,"pointer vdaty %p\n",(void*)*_vdatay); DEBUG_FPRINTF(stderr,"pointer weight %p\n",(void*)*_weight); return(0); } template int makePtr_dense<double>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const double *data, const double *datay, const double *vdata, const double *vdatay, const double *weight, double **_data, double **_datay, double **_vdata, double **_vdatay, double **_weight); template int makePtr_dense<float>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const float *data, const float *datay, const float *vdata, const float *vdatay, const float *weight, float **_data, float **_datay, float **_vdata, float **_vdatay, float **_weight); template <typename T> int modelFree1(T *aptr){ if(aptr!=NULL){ // TODO: use T** instead everywhere to prevent a scenario when we keep an address of allocated memory // TODO: flush cpu cache as it can be invoked by background GC thread CUDACHECK(cudaFree(aptr)); } return(0); } template int modelFree1<float>(float *aptr); template int modelFree1<double>(double *aptr); } // namespace h2o4gpu int modelfree1_double(double *aptr){ return h2o4gpu::modelFree1<double>(aptr); } int modelfree1_float(float *aptr){ return h2o4gpu::modelFree1<float>(aptr); } int make_ptr_double(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord, const double* trainX, const double* trainY, const double* validX, const double* validY, const double *weight, double**a, double**b, double**c, double**d, double **e) { return h2o4gpu::makePtr_dense<double>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e); } int make_ptr_float(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord, const float* trainX, const float* trainY, const float* validX, const float* validY, const float *weight, float**a, float**b, float**c, float**d, float **e) { return h2o4gpu::makePtr_dense<float>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e); }
the_stack
* \file * cub::BlockHisto256 provides methods for constructing (and compositing into) 256-bin histograms from 8b data partitioned across threads within a CUDA thread block. */ #pragma once #include "../util_arch.cuh" #include "../block/block_radix_sort.cuh" #include "../block/block_discontinuity.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Algorithmic variants ******************************************************************************/ /** * \brief BlockHisto256Algorithm enumerates alternative algorithms for the parallel construction of 8b histograms. */ enum BlockHisto256Algorithm { /** * \par Overview * Sorting followed by differentiation. Execution is comprised of two phases: * -# Sort the 8b data using efficient radix sort * -# Look for "runs" of same-valued 8b keys by detecting discontinuities; the run-lengths are histogram bin counts. * * \par Performance Considerations * Delivers consistent throughput regardless of sample bin distribution. */ BLOCK_HISTO_256_SORT, /** * \par Overview * Use atomic addition to update byte counts directly * * \par Performance Considerations * Performance is strongly tied to the hardware implementation of atomic * addition, and may be significantly degraded for non uniformly-random * input distributions where many concurrent updates are likely to be * made to the same bin counter. */ BLOCK_HISTO_256_ATOMIC, }; /****************************************************************************** * Block histogram ******************************************************************************/ /** * \addtogroup BlockModule * @{ */ /** * \brief BlockHisto256 provides methods for constructing (and compositing into) 256-bin histograms from 8b data partitioned across threads within a CUDA thread block. ![](histogram_logo.png) * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a> * counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>). * * \par * For convenience, BlockHisto256 provides alternative entrypoints that differ by: * - Complete/incremental composition (compute a new histogram vs. update existing histogram data) * * \tparam BLOCK_THREADS The threadblock size in threads * \tparam ITEMS_PER_THREAD The number of items per thread * \tparam ALGORITHM <b>[optional]</b> cub::BlockHisto256Algorithm enumerator specifying the underlying algorithm to use (default = cub::BLOCK_HISTO_256_SORT) * * \par Algorithm * BlockHisto256 can be (optionally) configured to use different algorithms: * -# <b>cub::BLOCK_HISTO_256_SORT</b>. Sorting followed by differentiation. [More...](\ref cub::BlockHisto256Algorithm) * -# <b>cub::BLOCK_HISTO_256_ATOMIC</b>. Use atomic addition to update byte counts directly. [More...](\ref cub::BlockHisto256Algorithm) * * \par Usage Considerations * - The histogram output can be constructed in shared or global memory * - Supports partially-full threadblocks (i.e., the most-significant thread ranks having undefined values). * - \smemreuse{BlockHisto256::SmemStorage} * * \par Performance Considerations * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: * - \p BLOCK_THREADS is a multiple of the architecture's warp size * - Every thread has a valid input (i.e., full <em>vs.</em> partial-tiles) * - See cub::BlockHisto256Algorithm for performance details regarding algorithmic alternatives * * \par Examples * \par * <em>Example 1.</em> Compute a simple 8b histogram in shared memory from 512 byte values that * are partitioned across a 128-thread threadblock (where each thread holds 4 values). * \code * #include <cub/cub.cuh> * * __global__ void SomeKernel(...) * { * // Parameterize BlockHisto256 for 128 threads * typedef cub::BlockHisto256<128> BlockHisto256; * * // Declare shared memory for BlockHisto256 * __shared__ typename BlockHisto256::SmemStorage smem_storage; * * // Declare shared memory for histogram bins * __shared__ unsigned int smem_histogram[256]; * * // Input items per thread * unsigned char data[4]; * * // Obtain items * ... * * // Compute the threadblock-wide histogram * BlockHisto256::Histogram(smem_storage, smem_histogram, data); * * ... * \endcode * * \par * <em>Example 2:</em> Composite an incremental round of 8b histogram data onto * an existing histogram in global memory. * \code * #include <cub/cub.cuh> * * template <int BLOCK_THREADS> * __global__ void SomeKernel(..., int *d_histogram) * { * // Parameterize BlockHisto256 * typedef cub::BlockHisto256<BLOCK_THREADS> BlockHisto256; * * // Declare shared memory for BlockHisto256 * __shared__ typename BlockHisto256::SmemStorage smem_storage; * * // Guarded load of input item * int data; * if (threadIdx.x < num_items) data = ...; * * // Compute the threadblock-wide sum of valid elements in thread0 * BlockHisto256::Composite(smem_storage, d_histogram, data); * * ... * \endcode * */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockHisto256Algorithm ALGORITHM = BLOCK_HISTO_256_SORT> class BlockHisto256 { private: /****************************************************************************** * Constants ******************************************************************************/ /** * Ensure the template parameterization meets the requirements of the * targeted device architecture. BLOCK_HISTO_256_ATOMIC can only be used * on version SM120 or later. Otherwise BLOCK_HISTO_256_SORT is used * regardless. */ static const BlockHisto256Algorithm SAFE_ALGORITHM = ((ALGORITHM == BLOCK_HISTO_256_ATOMIC) && (CUB_PTX_ARCH < 120)) ? BLOCK_HISTO_256_SORT : ALGORITHM; #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /****************************************************************************** * Algorithmic variants ******************************************************************************/ /** * BLOCK_HISTO_256_SORT algorithmic variant */ template <BlockHisto256Algorithm _ALGORITHM, int DUMMY = 0> struct BlockHisto256Internal { // Parameterize BlockRadixSort type for our thread block typedef BlockRadixSort<unsigned char, BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT; // Parameterize BlockDiscontinuity type for our thread block typedef BlockDiscontinuity<unsigned char, BLOCK_THREADS> BlockDiscontinuityT; // Shared memory union SmemStorage { // Storage for sorting bin values typename BlockRadixSortT::SmemStorage sort_storage; struct { // Storage for detecting discontinuities in the tile of sorted bin values typename BlockDiscontinuityT::SmemStorage discont_storage; // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values unsigned int run_begin[256]; unsigned int run_end[256]; }; }; // Discontinuity functor struct DiscontinuityOp { // Reference to smem_storage SmemStorage &smem_storage; // Constructor __device__ __forceinline__ DiscontinuityOp(SmemStorage &smem_storage) : smem_storage(smem_storage) {} // Discontinuity predicate __device__ __forceinline__ bool operator()(const unsigned char &a, const unsigned char &b, unsigned int b_index) { if (a != b) { // Note the begin/end offsets in shared storage smem_storage.run_begin[b] = b_index; smem_storage.run_end[a] = b_index; return true; } else { return false; } } }; // Composite data onto an existing histogram template < typename HistoCounter> static __device__ __forceinline__ void Composite( SmemStorage &smem_storage, ///< [in] Reference to shared memory allocation having layout type SmemStorage unsigned char (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's 8b input values to histogram HistoCounter histogram[256]) ///< [out] Reference to shared/global memory 256-bin histogram { enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD }; // Sort bytes in blocked arrangement BlockRadixSortT::SortBlocked(smem_storage.sort_storage, items); __syncthreads(); // Initialize the shared memory's run_begin and run_end for each bin int histo_offset = 0; #pragma unroll for(; histo_offset + BLOCK_THREADS <= 256; histo_offset += BLOCK_THREADS) { smem_storage.run_begin[histo_offset + threadIdx.x] = TILE_SIZE; smem_storage.run_end[histo_offset + threadIdx.x] = TILE_SIZE; } // Finish up with guarded initialization if necessary if ((histo_offset < BLOCK_THREADS) && (histo_offset + threadIdx.x < 256)) { smem_storage.run_begin[histo_offset + threadIdx.x] = TILE_SIZE; smem_storage.run_end[histo_offset + threadIdx.x] = TILE_SIZE; } __syncthreads(); int flags[ITEMS_PER_THREAD]; // unused // Note the begin/end run offsets of bin runs in the sorted tile DiscontinuityOp flag_op(smem_storage); BlockDiscontinuityT::Flag(smem_storage.discont_storage, items, flag_op, flags); // Update begin for first item if (threadIdx.x == 0) smem_storage.run_begin[items[0]] = 0; __syncthreads(); // Composite into histogram histo_offset = 0; #pragma unroll for(; histo_offset + BLOCK_THREADS <= 256; histo_offset += BLOCK_THREADS) { int thread_offset = histo_offset + threadIdx.x; HistoCounter count = smem_storage.run_end[thread_offset] - smem_storage.run_begin[thread_offset]; histogram[thread_offset] += count; } // Finish up with guarded composition if necessary if ((histo_offset < BLOCK_THREADS) && (histo_offset + threadIdx.x < 256)) { int thread_offset = histo_offset + threadIdx.x; HistoCounter count = smem_storage.run_end[thread_offset] - smem_storage.run_begin[thread_offset]; histogram[thread_offset] += count; } } }; /** * BLOCK_HISTO_256_ATOMIC algorithmic variant */ template <int DUMMY> struct BlockHisto256Internal<BLOCK_HISTO_256_ATOMIC, DUMMY> { /// Shared memory storage layout type struct SmemStorage {}; /// Composite data onto an existing histogram template < typename HistoCounter> static __device__ __forceinline__ void Composite( SmemStorage &smem_storage, ///< [in] Reference to shared memory allocation having layout type SmemStorage unsigned char (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's 8b input values to histogram HistoCounter histogram[256]) ///< [out] Reference to shared/global memory 256-bin histogram { // Update histogram #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; ++i) { atomicAdd(histogram + items[i], 1); } } }; #endif // DOXYGEN_SHOULD_SKIP_THIS /// Shared memory storage layout type for BlockHisto256 typedef typename BlockHisto256Internal<SAFE_ALGORITHM>::SmemStorage _SmemStorage; public: /// \smemstorage{BlockHisto256} typedef _SmemStorage SmemStorage; /** * Initialize shared histogram */ template <typename HistoCounter> static __device__ __forceinline__ void InitHistogram(HistoCounter histogram[256]) { // Initialize histogram bin counts to zeros int histo_offset = 0; #pragma unroll for(; histo_offset + BLOCK_THREADS <= 256; histo_offset += BLOCK_THREADS) { histogram[histo_offset + threadIdx.x] = 0; } // Finish up with guarded initialization if necessary if ((histo_offset < BLOCK_THREADS) && (histo_offset + threadIdx.x < 256)) { histogram[histo_offset + threadIdx.x] = 0; } } /** * \brief Constructs a threadblock-wide histogram in shared/global memory. Each thread contributes an array of 8b input elements. * * \smemreuse * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of 8b values per thread * \tparam HistoCounter <b>[inferred]</b> Histogram counter type */ template < typename HistoCounter> static __device__ __forceinline__ void Histogram( SmemStorage &smem_storage, ///< [in] Reference to shared memory allocation having layout type SmemStorage unsigned char (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's 8b input values to histogram HistoCounter histogram[256]) ///< [out] Reference to shared/global memory 256-bin histogram { // Initialize histogram bin counts to zeros InitHistogram(histogram); // Composite the histogram BlockHisto256Internal<SAFE_ALGORITHM>::Composite(smem_storage, items, histogram); } /** * \brief Updates an existing threadblock-wide histogram in shared/global memory. Each thread composites an array of 8b input elements. * * \smemreuse * * \tparam HistoCounter <b>[inferred]</b> Histogram counter type */ template < typename HistoCounter> static __device__ __forceinline__ void Composite( SmemStorage &smem_storage, ///< [in] Reference to shared memory allocation having layout type SmemStorage unsigned char (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's 8b input values to histogram HistoCounter histogram[256]) ///< [out] Reference to shared/global memory 256-bin histogram { BlockHisto256Internal<SAFE_ALGORITHM>::Composite(smem_storage, items, histogram); } }; /** @} */ // end group BlockModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <cudf/detail/concatenate.hpp> #include <cudf/detail/copy.hpp> namespace legate { namespace pandas { namespace comm { using namespace Legion; namespace detail { struct packed_columns_view { packed_columns_view(const cudf::packed_columns &columns) : metadata(columns.metadata_.get()), gpu_data(columns.gpu_data.get()) { } cudf::packed_columns::metadata *metadata; rmm::device_buffer *gpu_data; }; std::unique_ptr<cudf::table> shuffle(const std::vector<packed_columns_view> &packed_tables, coord_t task_id, coord_t num_pieces, ncclComm_t *comm, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { // Use the default resource for temporary allocations auto temp_mr = rmm::mr::get_current_device_resource(); AutoStream as{}; auto meta_stream = as.stream(); // All-gather buffer sizes so that receivers can allocate buffers of the right sizes Rect<1> rect(Point<1>(0), Point<1>(num_pieces * num_pieces - 1)); DeferredBuffer<size_t, 1> all_buffer_sizes(Memory::Z_COPY_MEM, rect); DeferredBuffer<size_t, 1> all_metadata_sizes(Memory::Z_COPY_MEM, rect); auto get_aligned_size = [](size_t size) { return std::max<size_t>(16, (size + 15) / 16 * 16); }; auto index = [&num_pieces](coord_t from, coord_t to) { return from * num_pieces + to; }; for (coord_t to = 0; to < num_pieces; ++to) { auto &table = packed_tables[to]; auto idx = index(task_id, to); all_buffer_sizes[idx] = table.gpu_data->size(); all_metadata_sizes[idx] = table.metadata->size(); } NCCLCHECK(ncclAllGather(all_buffer_sizes.ptr(task_id * num_pieces), all_buffer_sizes.ptr(0), num_pieces, ncclUint64, *comm, meta_stream)); NCCLCHECK(ncclAllGather(all_metadata_sizes.ptr(task_id * num_pieces), all_metadata_sizes.ptr(0), num_pieces, ncclUint64, *comm, meta_stream)); // We must synchronize here before proceeding, as we need the sizes to arrive in order to allocate // buffers below SYNC_AND_CHECK_STREAM(meta_stream); // Allocate necessary buffers for the trasnfers std::vector<std::unique_ptr<rmm::device_buffer>> recv_data_buffers; for (coord_t from = 0; from < num_pieces; ++from) { auto buffer_size = get_aligned_size(all_buffer_sizes[index(from, task_id)]); recv_data_buffers.push_back(std::make_unique<rmm::device_buffer>(buffer_size, stream, temp_mr)); } auto meta_buffer_allocator = alloc::DeferredBufferAllocator(Memory::Kind::GPU_FB_MEM); auto host_meta_buffer_allocator = alloc::DeferredBufferAllocator(Memory::Kind::Z_COPY_MEM); std::vector<uint8_t *> send_meta_buffers; std::vector<uint8_t *> recv_meta_buffers; for (coord_t other_id = 0; other_id < num_pieces; ++other_id) { auto send_size = get_aligned_size(all_metadata_sizes[index(task_id, other_id)]); auto recv_size = get_aligned_size(all_metadata_sizes[index(other_id, task_id)]); auto send_buffer = static_cast<uint8_t *>(meta_buffer_allocator.allocate(send_size)); auto recv_buffer = static_cast<uint8_t *>(meta_buffer_allocator.allocate(recv_size)); auto &send_metadata = packed_tables[other_id].metadata; cudaMemcpyAsync(send_buffer, send_metadata->data(), send_metadata->size(), cudaMemcpyHostToDevice, meta_stream); send_meta_buffers.push_back(send_buffer); recv_meta_buffers.push_back(recv_buffer); } // Perform all-to-all exchange. We exchange the host-sidemetadata first // so that we can run the unpacking logic while the data is being transferred. NCCLCHECK(ncclGroupStart()); for (auto other_id = 0; other_id < num_pieces; ++other_id) { auto send_size = get_aligned_size(all_metadata_sizes[index(task_id, other_id)]); auto recv_size = get_aligned_size(all_metadata_sizes[index(other_id, task_id)]); auto send_buffer = send_meta_buffers[other_id]; auto recv_buffer = recv_meta_buffers[other_id]; NCCLCHECK(ncclSend(send_buffer, send_size, ncclInt8, other_id, *comm, meta_stream)); NCCLCHECK(ncclRecv(recv_buffer, recv_size, ncclInt8, other_id, *comm, meta_stream)); } NCCLCHECK(ncclGroupEnd()); std::vector<uint8_t *> host_recv_meta_buffers; for (coord_t other_id = 0; other_id < num_pieces; ++other_id) { auto recv_size = get_aligned_size(all_metadata_sizes[index(other_id, task_id)]); auto recv_buffer = recv_meta_buffers[other_id]; auto host_recv_buffer = static_cast<uint8_t *>(host_meta_buffer_allocator.allocate(recv_size)); cudaMemcpyAsync(host_recv_buffer, recv_buffer, recv_size, cudaMemcpyDeviceToHost, meta_stream); host_recv_meta_buffers.push_back(host_recv_buffer); } rmm::device_buffer dummy_buffer(get_aligned_size(0), stream, temp_mr); NCCLCHECK(ncclGroupStart()); for (auto other_id = 0; other_id < num_pieces; ++other_id) { auto send_size = get_aligned_size(all_buffer_sizes[index(task_id, other_id)]); auto recv_size = get_aligned_size(all_buffer_sizes[index(other_id, task_id)]); auto send_buffer = packed_tables[other_id].gpu_data->data(); send_buffer = nullptr == send_buffer ? dummy_buffer.data() : send_buffer; auto recv_buffer = recv_data_buffers[other_id]->data(); NCCLCHECK(ncclSend(send_buffer, send_size, ncclInt8, other_id, *comm, stream)); NCCLCHECK(ncclRecv(recv_buffer, recv_size, ncclInt8, other_id, *comm, stream)); } NCCLCHECK(ncclGroupEnd()); #ifdef DEBUG_PANDAS SYNC_AND_CHECK_STREAM(stream); #endif // This synchronization is mandatory, since the unpacking needs the host-side metadata SYNC_AND_CHECK_STREAM(meta_stream); // Once we've received all tables (more precisely, their metadata), unpack and concatenate them // to construct the result. std::vector<cudf::table_view> tables; for (auto other_id = 0; other_id < num_pieces; ++other_id) { auto buffer_size = all_buffer_sizes[index(other_id, task_id)]; auto data_buffer = buffer_size > 0 ? static_cast<const uint8_t *>(recv_data_buffers[other_id]->data()) : nullptr; tables.push_back(cudf::unpack(host_recv_meta_buffers[other_id], data_buffer)); } return cudf::detail::concatenate(tables, stream, mr); } } // namespace detail std::unique_ptr<cudf::table> shuffle(const cudf::table_view &input, const std::vector<cudf::size_type> &splits, coord_t task_id, ncclComm_t *comm, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { // Use the default resource for temporary allocations auto temp_mr = rmm::mr::get_current_device_resource(); // Split the table into contiguous chunks auto packed_subtables = contiguous_split(input, splits, stream, temp_mr); std::vector<detail::packed_columns_view> packed_subtables_views; for (auto &packed_subtable : packed_subtables) packed_subtables_views.emplace_back(packed_subtable.data); return detail::shuffle(packed_subtables_views, task_id, splits.size() + 1, comm, stream, mr); } std::unique_ptr<cudf::table> all_gather(const cudf::table_view &input, coord_t task_id, coord_t num_tasks, ncclComm_t *comm, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { // Use the default resource for temporary allocations auto temp_mr = rmm::mr::get_current_device_resource(); // Split the table into contiguous chunks auto packed_table = pack(input, stream, temp_mr); std::vector<detail::packed_columns_view> broadcasted_tables(num_tasks, packed_table); return detail::shuffle(broadcasted_tables, task_id, num_tasks, comm, stream, mr); } std::pair<cudf::table_view, std::unordered_map<uint32_t, cudf::column_view>> extract_dictionaries( const cudf::table_view &input) { std::vector<cudf::column_view> columns; std::unordered_map<uint32_t, cudf::column_view> dictionaries; for (auto idx = 0; idx < input.num_columns(); ++idx) { auto &column = input.column(idx); if (column.type().id() == cudf::type_id::DICTIONARY32) { const auto &codes = column.child(0); dictionaries[idx] = column.child(1); columns.push_back( cudf::column_view(codes.type(), column.size(), codes.head(), column.null_mask())); } else columns.push_back(column); } return std::make_pair(cudf::table_view(std::move(columns)), std::move(dictionaries)); } std::pair<std::unique_ptr<cudf::table>, std::unordered_map<uint32_t, std::unique_ptr<cudf::column>>> extract_dictionaries(std::unique_ptr<cudf::table> &&input) { std::vector<std::unique_ptr<cudf::column>> columns; std::unordered_map<uint32_t, std::unique_ptr<cudf::column>> dictionaries; auto input_columns = input->release(); for (auto idx = 0; idx < input_columns.size(); ++idx) { auto &column = input_columns[idx]; if (column->type().id() == cudf::type_id::DICTIONARY32) { // Hash out the column auto size = column->size(); auto contents = column->release(); // Set aside the dictionary that will later be merged back to the column dictionaries[idx] = std::move(contents.children[1]); // Combine the parent's bitmask with the codes column and put it in the table auto &codes = contents.children[0]; auto codes_contents = codes->release(); auto combined = std::make_unique<cudf::column>( codes->type(), size, std::move(*codes_contents.data), std::move(*codes_contents.null_mask)); columns.push_back(std::move(combined)); } else columns.push_back(std::move(column)); } return std::make_pair(std::make_unique<cudf::table>(std::move(columns)), std::move(dictionaries)); } cudf::table_view embed_dictionaries( const cudf::table_view &input, const std::unordered_map<uint32_t, cudf::column_view> &dictionaries) { std::vector<cudf::column_view> columns; for (uint32_t idx = 0; idx < input.num_columns(); ++idx) { auto &column = input.column(idx); auto finder = dictionaries.find(idx); if (finder == dictionaries.end()) columns.push_back(column); else { auto codes = cudf::column_view(column.type(), column.size(), column.head()); columns.push_back(cudf::column_view(cudf::data_type(cudf::type_id::DICTIONARY32), column.size(), nullptr, column.null_mask(), -1, 0, {codes, finder->second})); } } return cudf::table_view(std::move(columns)); } std::unique_ptr<cudf::table> embed_dictionaries( std::unique_ptr<cudf::table> &&input, const std::unordered_map<uint32_t, cudf::column_view> &dictionaries) { std::vector<std::unique_ptr<cudf::column>> columns; auto input_columns = input->release(); for (uint32_t idx = 0; idx < input_columns.size(); ++idx) { auto &column = input_columns[idx]; auto finder = dictionaries.find(idx); if (finder == dictionaries.end()) columns.push_back(std::move(column)); else { auto codes_size = column->size(); auto codes_contents = column->release(); std::vector<std::unique_ptr<cudf::column>> children; children.push_back(std::make_unique<cudf::column>( cudf::data_type(cudf::type_id::UINT32), codes_size, std::move(*codes_contents.data))); columns.push_back(std::make_unique<cudf::column>(cudf::data_type(cudf::type_id::DICTIONARY32), codes_size, rmm::device_buffer{}, std::move(*codes_contents.null_mask), -1, std::move(children))); } } return std::make_unique<cudf::table>(std::move(columns)); } } // namespace comm } // namespace pandas } // namespace legate
the_stack
// Maintainer: joaander #include "ComputeThermoGPU.cuh" #include "hoomd/HOOMDMath.h" #include "hoomd/VectorMath.h" #include <hip/hip_runtime.h> #include <assert.h> /*! \file ComputeThermoGPU.cu \brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU. */ //! Perform partial sums of the thermo properties on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_body Particle body id \param d_tag Particle tag \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group this GPU processes \param offset Offset of this GPU in list of group members \param block_offset Offset of this GPU in the array of partial sums All partial sums are packaged up in a Scalar4 to keep pointer management down. - 2*Kinetic energy is summed in .x - Potential energy is summed in .y - W is summed in .z One thread is executed per group member. That thread reads in the values for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_partial_sums(Scalar4* d_scratch, Scalar4* d_net_force, Scalar* d_net_virial, const size_t virial_pitch, Scalar4* d_velocity, unsigned int* d_body, unsigned int* d_tag, unsigned int* d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { extern __shared__ Scalar3 compute_thermo_sdata[]; // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar3 my_element; // element of scratch space read in // non-participating thread: contribute 0 to the sum my_element = make_scalar3(0, 0, 0); if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx + offset]; // ignore rigid body constituent particles in the sum unsigned int body = d_body[idx]; unsigned int tag = d_tag[idx]; if (body >= MIN_FLOPPY || body == tag) { // update positions to the next timestep and update velocities to the next half step Scalar4 net_force = d_net_force[idx]; Scalar net_isotropic_virial; // (1/3)*trace of virial tensor net_isotropic_virial = Scalar(1.0 / 3.0) * (d_net_virial[0 * virial_pitch + idx] // xx + d_net_virial[3 * virial_pitch + idx] // yy + d_net_virial[5 * virial_pitch + idx]); // zz Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; // compute our contribution to the sum my_element.x = mass * (vel.x * vel.x + vel.y * vel.y + vel.z * vel.z); my_element.y = net_force.w; my_element.z = net_isotropic_virial; } } compute_thermo_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_sdata[threadIdx.x].x += compute_thermo_sdata[threadIdx.x + offs].x; compute_thermo_sdata[threadIdx.x].y += compute_thermo_sdata[threadIdx.x + offs].y; compute_thermo_sdata[threadIdx.x].z += compute_thermo_sdata[threadIdx.x + offs].z; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { Scalar3 res = compute_thermo_sdata[0]; d_scratch[block_offset + blockIdx.x] = make_scalar4(res.x, res.y, res.z, 0); } } //! Perform partial sums of the pressure tensor on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_net_force Net force / pe array from ParticleData \param d_net_virial Net virial array from ParticleData \param virial_pitch pitch of 2D virial array \param d_velocity Particle velocity and mass array from ParticleData \param d_body Particle body id \param d_tag Particle tag \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group \param offset Offset of this GPU in the list of group members \param block_offset Offset of this GPU in the array of partial sums \param num_blocks Total number of partial sums by all GPUs One thread is executed per group member. That thread reads in the six values (components of the pressure tensor) for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block. These partial sums are written to d_scratch[i*gridDim.x + blockIdx.x], where i=0..5 is the index of the component. For this kernel to run, 6*sizeof(Scalar)*block_size of dynamic shared memory are needed. */ __global__ void gpu_compute_pressure_tensor_partial_sums(Scalar* d_scratch, Scalar4* d_net_force, Scalar* d_net_virial, const size_t virial_pitch, Scalar4* d_velocity, unsigned int* d_body, unsigned int* d_tag, unsigned int* d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset, unsigned int num_blocks) { extern __shared__ Scalar compute_pressure_tensor_sdata[]; // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element[6]; // element of scratch space read in // non-participating threads: contribute 0 to the sum my_element[0] = 0; my_element[1] = 0; my_element[2] = 0; my_element[3] = 0; my_element[4] = 0; my_element[5] = 0; if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx + offset]; // ignore rigid body constituent particles in the sum unsigned int body = d_body[idx]; unsigned int tag = d_tag[idx]; if (body >= MIN_FLOPPY || body == tag) { // compute contribution to pressure tensor and store it in my_element Scalar4 vel = d_velocity[idx]; Scalar mass = vel.w; my_element[0] = mass * vel.x * vel.x + d_net_virial[0 * virial_pitch + idx]; // xx my_element[1] = mass * vel.x * vel.y + d_net_virial[1 * virial_pitch + idx]; // xy my_element[2] = mass * vel.x * vel.z + d_net_virial[2 * virial_pitch + idx]; // xz my_element[3] = mass * vel.y * vel.y + d_net_virial[3 * virial_pitch + idx]; // yy my_element[4] = mass * vel.y * vel.z + d_net_virial[4 * virial_pitch + idx]; // yz my_element[5] = mass * vel.z * vel.z + d_net_virial[5 * virial_pitch + idx]; // zz } } for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = my_element[i]; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) d_scratch[num_blocks * i + blockIdx.x + block_offset] = compute_pressure_tensor_sdata[i * blockDim.x]; } } //! Perform partial sums of the rotational KE on the GPU /*! \param d_scratch Scratch space to hold partial sums. One element is written per block \param d_orientation Orientation quaternions from ParticleData \param d_angmom Conjugate quaternions from ParticleData \param d_inertia Moments of inertia from ParticleData \param d_body Particle body id \param d_tag Particle tag \param d_group_members List of group members for which to sum properties \param work_size Number of particles in the group processed by this GPU \param offset Offset of this GPU in the list of group members \param block_offset Output offset of this GPU */ __global__ void gpu_compute_rotational_ke_partial_sums(Scalar* d_scratch, const Scalar4* d_orientation, const Scalar4* d_angmom, const Scalar3* d_inertia, unsigned int* d_body, unsigned int* d_tag, unsigned int* d_group_members, unsigned int work_size, unsigned int offset, unsigned int block_offset) { extern __shared__ Scalar compute_ke_rot_sdata[]; // determine which particle this thread works on int group_idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar my_element; // element of scratch space read in // non-participating thread: contribute 0 to the sum my_element = Scalar(0.0); if (group_idx < work_size) { unsigned int idx = d_group_members[group_idx + offset]; // ignore rigid body constituent particles in the sum unsigned int body = d_body[idx]; unsigned int tag = d_tag[idx]; if (body >= MIN_FLOPPY || body == tag) { quat<Scalar> q(d_orientation[idx]); quat<Scalar> p(d_angmom[idx]); vec3<Scalar> I(d_inertia[idx]); quat<Scalar> s(Scalar(0.5) * conj(q) * p); Scalar ke_rot(0.0); if (I.x >= EPSILON) { ke_rot += s.v.x * s.v.x / I.x; } if (I.y >= EPSILON) { ke_rot += s.v.y * s.v.y / I.y; } if (I.z >= EPSILON) { ke_rot += s.v.z * s.v.z / I.z; } // compute our contribution to the sum my_element = ke_rot * Scalar(1.0 / 2.0); } } compute_ke_rot_sdata[threadIdx.x] = my_element; __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) compute_ke_rot_sdata[threadIdx.x] += compute_ke_rot_sdata[threadIdx.x + offs]; offs >>= 1; __syncthreads(); } // write out our partial sum if (threadIdx.x == 0) { d_scratch[blockIdx.x + block_offset] = compute_ke_rot_sdata[0]; } } //! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic //! contribution) /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param d_scratch_rot Partial sums of rotational kinetic energy \param ndof Number of degrees of freedom this group possesses \param box Box the particles are in \param D Dimensionality of the system \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial External contribution to virial (1/3 trace) \param external_energy External contribution to potential energy Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. sizeof(Scalar4)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_thermo_final_sums(Scalar* d_properties, Scalar4* d_scratch, Scalar* d_scratch_rot, Scalar ndof, BoxDim box, unsigned int D, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial, Scalar external_energy) { extern __shared__ Scalar4 compute_thermo_final_sdata[]; Scalar4 final_sum = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { Scalar4 scratch = d_scratch[start + threadIdx.x]; Scalar scratch_rot = d_scratch_rot[start + threadIdx.x]; compute_thermo_final_sdata[threadIdx.x] = make_scalar4(scratch.x, scratch.y, scratch.z, scratch_rot); } else compute_thermo_final_sdata[threadIdx.x] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { compute_thermo_final_sdata[threadIdx.x].x += compute_thermo_final_sdata[threadIdx.x + offs].x; compute_thermo_final_sdata[threadIdx.x].y += compute_thermo_final_sdata[threadIdx.x + offs].y; compute_thermo_final_sdata[threadIdx.x].z += compute_thermo_final_sdata[threadIdx.x + offs].z; compute_thermo_final_sdata[threadIdx.x].w += compute_thermo_final_sdata[threadIdx.x + offs].w; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { final_sum.x += compute_thermo_final_sdata[0].x; final_sum.y += compute_thermo_final_sdata[0].y; final_sum.z += compute_thermo_final_sdata[0].z; final_sum.w += compute_thermo_final_sdata[0].w; } } if (threadIdx.x == 0) { // compute final quantities Scalar ke_trans_total = final_sum.x * Scalar(0.5); Scalar pe_total = final_sum.y + external_energy; Scalar W = final_sum.z + external_virial; Scalar ke_rot_total = final_sum.w; // compute the pressure // volume/area & other 2D stuff needed Scalar volume; Scalar3 L = box.getL(); if (D == 2) { // "volume" is area in 2D volume = L.x * L.y; // W needs to be corrected since the 1/3 factor is built in W *= Scalar(3.0) / Scalar(2.0); } else { volume = L.x * L.y * L.z; } // pressure: P = (N * K_B * T + W)/V Scalar pressure = (Scalar(2.0) * ke_trans_total / Scalar(D) + W) / volume; // fill out the GPUArray d_properties[thermo_index::translational_kinetic_energy] = Scalar(ke_trans_total); d_properties[thermo_index::rotational_kinetic_energy] = Scalar(ke_rot_total); d_properties[thermo_index::potential_energy] = Scalar(pe_total); d_properties[thermo_index::pressure] = pressure; } } //! Complete partial sums and compute final pressure tensor /*! \param d_properties Property array to write final values \param d_scratch Partial sums \param box Box the particles are in \param group_size Number of particles in the group \param num_partial_sums Number of partial sums in \a d_scratch \param external_virial_xx External contribution to virial (xx component) \param external_virial_xy External contribution to virial (xy component) \param external_virial_xz External contribution to virial (xz component) \param external_virial_yy External contribution to virial (yy component) \param external_virial_yz External contribution to virial (yz component) \param external_virial_zz External contribution to virial (zz component) Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final sums, the thermodynamic properties are computed and written to d_properties. 6*sizeof(Scalar)*block_size bytes of shared memory are needed for this kernel to run. */ __global__ void gpu_compute_pressure_tensor_final_sums(Scalar* d_properties, Scalar* d_scratch, BoxDim box, unsigned int group_size, unsigned int num_partial_sums, Scalar external_virial_xx, Scalar external_virial_xy, Scalar external_virial_xz, Scalar external_virial_yy, Scalar external_virial_yz, Scalar external_virial_zz, bool twod) { extern __shared__ Scalar compute_pressure_tensor_sdata[]; Scalar final_sum[6]; final_sum[0] = external_virial_xx; final_sum[1] = external_virial_xy; final_sum[2] = external_virial_xz; final_sum[3] = external_virial_yy; final_sum[4] = external_virial_yz; final_sum[5] = external_virial_zz; // sum up the values in the partial sum via a sliding window for (int start = 0; start < num_partial_sums; start += blockDim.x) { __syncthreads(); if (start + threadIdx.x < num_partial_sums) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = d_scratch[i * num_partial_sums + start + threadIdx.x]; } else for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = Scalar(0.0); __syncthreads(); // reduce the sum in parallel int offs = blockDim.x >> 1; while (offs > 0) { if (threadIdx.x < offs) { for (unsigned int i = 0; i < 6; i++) compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x + offs]; } offs >>= 1; __syncthreads(); } if (threadIdx.x == 0) { for (unsigned int i = 0; i < 6; i++) final_sum[i] += compute_pressure_tensor_sdata[i * blockDim.x]; } } if (threadIdx.x == 0) { // fill out the GPUArray // we have thus far calculated the sum of the kinetic part of the pressure tensor // and the virial part, the definition includes an inverse factor of the box volume Scalar V = box.getVolume(twod); d_properties[thermo_index::pressure_xx] = final_sum[0] / V; d_properties[thermo_index::pressure_xy] = final_sum[1] / V; d_properties[thermo_index::pressure_xz] = final_sum[2] / V; d_properties[thermo_index::pressure_yy] = final_sum[3] / V; d_properties[thermo_index::pressure_yz] = final_sum[4] / V; d_properties[thermo_index::pressure_zz] = final_sum[5] / V; } } //! Compute partial sums of thermodynamic properties of a group on the GPU, /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_body Particle body id \param d_tag Particle tag \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param gpu_partition Load balancing info for multi-GPU reduction This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ hipError_t gpu_compute_thermo_partial(Scalar* d_properties, Scalar4* d_vel, unsigned int* d_body, unsigned int* d_tag, unsigned int* d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy, const GPUPartition& gpu_partition) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); unsigned int block_offset = 0; // iterate over active GPUs in reverse, to end up on first GPU when returning from this function for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev) { auto range = gpu_partition.getRangeAndSetGPU(idev); unsigned int nwork = range.second - range.first; dim3 grid(nwork / args.block_size + 1, 1, 1); dim3 threads(args.block_size, 1, 1); size_t shared_bytes = sizeof(Scalar3) * args.block_size; hipLaunchKernelGGL(gpu_compute_thermo_partial_sums, dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_body, d_tag, d_group_members, nwork, range.first, block_offset); if (compute_pressure_tensor) { assert(args.d_scratch_pressure_tensor); shared_bytes = 6 * sizeof(Scalar) * args.block_size; // run the kernel hipLaunchKernelGGL(gpu_compute_pressure_tensor_partial_sums, dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_pressure_tensor, args.d_net_force, args.d_net_virial, args.virial_pitch, d_vel, d_body, d_tag, d_group_members, nwork, range.first, block_offset, args.n_blocks); } if (compute_rotational_energy) { assert(args.d_scratch_pressure_tensor); shared_bytes = sizeof(Scalar) * args.block_size; // run the kernel hipLaunchKernelGGL(gpu_compute_rotational_ke_partial_sums, dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_rot, args.d_orientation, args.d_angmom, args.d_inertia, d_body, d_tag, d_group_members, nwork, range.first, block_offset); } block_offset += grid.x; } assert(block_offset <= args.n_blocks); return hipSuccess; } //! Compute thermodynamic properties of a group on the GPU /*! \param d_properties Array to write computed properties \param d_vel particle velocities and masses on the GPU \param d_body Particle body id \param d_tag Particle tag \param d_group_members List of group members \param group_size Number of group members \param box Box the particles are in \param args Additional arguments \param compute_pressure_tensor whether to compute the full pressure tensor \param compute_rotational_energy whether to compute the rotational kinetic energy \param num_blocks Number of partial sums to reduce This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details. */ hipError_t gpu_compute_thermo_final(Scalar* d_properties, Scalar4* d_vel, unsigned int* d_body, unsigned int* d_tag, unsigned int* d_group_members, unsigned int group_size, const BoxDim& box, const compute_thermo_args& args, bool compute_pressure_tensor, bool compute_rotational_energy) { assert(d_properties); assert(d_vel); assert(d_group_members); assert(args.d_net_force); assert(args.d_net_virial); assert(args.d_scratch); // setup the grid to run the final kernel int final_block_size = 256; dim3 grid = dim3(1, 1, 1); dim3 threads = dim3(final_block_size, 1, 1); size_t shared_bytes = sizeof(Scalar4) * final_block_size; Scalar external_virial = Scalar(1.0 / 3.0) * (args.external_virial_xx + args.external_virial_yy + args.external_virial_zz); // run the kernel hipLaunchKernelGGL(gpu_compute_thermo_final_sums, dim3(grid), dim3(threads), shared_bytes, 0, d_properties, args.d_scratch, args.d_scratch_rot, args.ndof, box, args.D, group_size, args.n_blocks, external_virial, args.external_energy); if (compute_pressure_tensor) { shared_bytes = 6 * sizeof(Scalar) * final_block_size; // run the kernel hipLaunchKernelGGL(gpu_compute_pressure_tensor_final_sums, dim3(grid), dim3(threads), shared_bytes, 0, d_properties, args.d_scratch_pressure_tensor, box, group_size, args.n_blocks, args.external_virial_xx, args.external_virial_xy, args.external_virial_xz, args.external_virial_yy, args.external_virial_yz, args.external_virial_zz, args.D == 2); } return hipSuccess; }
the_stack
"This file can only be used from inside common/unified/base/kernel_launch_reduction.hpp" #endif #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/device_guard.hpp" #include "cuda/base/types.hpp" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> __global__ __launch_bounds__( default_block_size) void generic_kernel_reduction_1d(int64 size, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* storage, KernelArgs... args) { __shared__ UninitializedArray<ValueType, default_block_size / config::warp_size> warp_partial; static_assert(default_block_size / config::warp_size <= config::warp_size, "needs third reduction level"); auto tidx = thread::get_thread_id_flat<int64>(); auto grid_size = thread::get_thread_num_flat<int64>(); auto warp = group::tiled_partition<config::warp_size>(group::this_thread_block()); auto partial = identity; for (int64 i = tidx; i < size; i += grid_size) { partial = op(partial, fn(i, args...)); } partial = reduce(warp, partial, op); if (warp.thread_rank() == 0) { warp_partial[threadIdx.x / config::warp_size] = partial; } __syncthreads(); if (threadIdx.x < config::warp_size) { partial = reduce(warp, threadIdx.x < default_block_size / config::warp_size ? warp_partial[threadIdx.x] : identity, op); if (threadIdx.x == 0) { storage[blockIdx.x] = finalize(partial); } } } template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> __global__ __launch_bounds__( default_block_size) void generic_kernel_reduction_2d(int64 rows, int64 cols, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* storage, KernelArgs... args) { __shared__ UninitializedArray<ValueType, default_block_size / config::warp_size> warp_partial; static_assert(default_block_size / config::warp_size <= config::warp_size, "needs third reduction level"); auto tidx = thread::get_thread_id_flat<int64>(); auto grid_size = thread::get_thread_num_flat<int64>(); auto warp = group::tiled_partition<config::warp_size>(group::this_thread_block()); auto partial = identity; for (int64 i = tidx; i < rows * cols; i += grid_size) { const auto row = i / cols; const auto col = i % cols; partial = op(partial, fn(row, col, args...)); } partial = reduce(warp, partial, op); if (warp.thread_rank() == 0) { warp_partial[threadIdx.x / config::warp_size] = partial; } __syncthreads(); if (threadIdx.x < config::warp_size) { partial = reduce(warp, threadIdx.x < default_block_size / config::warp_size ? warp_partial[threadIdx.x] : identity, op); if (threadIdx.x == 0) { storage[blockIdx.x] = finalize(partial); } } } template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> void run_kernel_reduction(std::shared_ptr<const CudaExecutor> exec, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, size_type size, KernelArgs&&... args) { constexpr int oversubscription = 16; gko::cuda::device_guard guard{exec->get_device_id()}; constexpr auto block_size = default_block_size; const auto num_blocks = std::min<int64>( ceildiv(size, block_size), exec->get_num_warps() * oversubscription); if (num_blocks > 1) { Array<ValueType> partial{exec, static_cast<size_type>(num_blocks)}; generic_kernel_reduction_1d<<<num_blocks, block_size>>>( static_cast<int64>(size), fn, op, [] __device__(auto v) { return v; }, as_cuda_type(identity), as_cuda_type(partial.get_data()), map_to_device(args)...); generic_kernel_reduction_1d<<<1, block_size>>>( static_cast<int64>(num_blocks), [] __device__(auto i, auto v) { return v[i]; }, op, finalize, as_cuda_type(identity), as_cuda_type(result), as_cuda_type(partial.get_const_data())); } else { generic_kernel_reduction_1d<<<1, block_size>>>( static_cast<int64>(size), fn, op, finalize, as_cuda_type(identity), as_cuda_type(result), map_to_device(args)...); } } template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> void run_kernel_reduction(std::shared_ptr<const CudaExecutor> exec, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, dim<2> size, KernelArgs&&... args) { constexpr int oversubscription = 16; gko::cuda::device_guard guard{exec->get_device_id()}; constexpr auto block_size = default_block_size; const auto rows = static_cast<int64>(size[0]); const auto cols = static_cast<int64>(size[1]); const auto num_blocks = std::min<int64>(ceildiv(rows * cols, block_size), exec->get_num_warps() * oversubscription); if (num_blocks > 1) { Array<ValueType> partial{exec, static_cast<size_type>(num_blocks)}; generic_kernel_reduction_2d<<<num_blocks, block_size>>>( rows, cols, fn, op, [] __device__(auto v) { return v; }, as_cuda_type(identity), as_cuda_type(partial.get_data()), map_to_device(args)...); generic_kernel_reduction_1d<<<1, block_size>>>( static_cast<int64>(num_blocks), [] __device__(auto i, auto v) { return v[i]; }, op, finalize, as_cuda_type(identity), as_cuda_type(result), as_cuda_type(partial.get_const_data())); } else { generic_kernel_reduction_2d<<<1, block_size>>>( rows, cols, fn, op, finalize, as_cuda_type(identity), as_cuda_type(result), map_to_device(args)...); } } template <int subwarp_size, typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> __global__ __launch_bounds__(default_block_size) void generic_kernel_row_reduction_2d( int64 rows, int64 cols, int64 col_blocks, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, int64 result_stride, KernelArgs... args) { const auto idx = thread::get_subwarp_id_flat<subwarp_size, int64>(); const auto row = idx % rows; const auto col_block = idx / rows; if (col_block >= col_blocks) { return; } const auto cols_per_part = ceildiv(ceildiv(cols, subwarp_size), col_blocks) * subwarp_size; const auto begin = cols_per_part * col_block; const auto end = min(begin + cols_per_part, cols); auto subwarp = group::tiled_partition<subwarp_size>(group::this_thread_block()); auto partial = identity; for (auto col = begin + subwarp.thread_rank(); col < end; col += subwarp_size) { partial = op(partial, fn(row, col, args...)); } partial = reduce(subwarp, partial, op); if (subwarp.thread_rank() == 0) { result[(row + col_block * rows) * result_stride] = finalize(partial); } } template <int subwarp_size, typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> __global__ __launch_bounds__(default_block_size) void generic_kernel_col_reduction_2d_small( int64 rows, int64 cols, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, KernelArgs... args) { constexpr auto warp_size = config::warp_size; constexpr auto warps_per_block = default_block_size / warp_size; // stores the subwarp_size partial sums from each warp, grouped by warp constexpr auto shared_storage = warps_per_block * subwarp_size; __shared__ UninitializedArray<ValueType, shared_storage> block_partial; const auto subwarp_id = thread::get_subwarp_id_flat<subwarp_size, int64>(); const auto local_warp_id = threadIdx.x / warp_size; const auto local_subwarp_id = threadIdx.x % warp_size / subwarp_size; const auto subwarp_num = thread::get_subwarp_num_flat<subwarp_size, int64>(); const auto block = group::this_thread_block(); const auto warp = group::tiled_partition<warp_size>(block); const auto warp_rank = warp.thread_rank(); const auto subwarp_rank = warp_rank % subwarp_size; const auto col = static_cast<int64>(subwarp_rank); auto partial = identity; // accumulate within a thread if (col < cols) { for (auto row = subwarp_id; row < rows; row += subwarp_num) { partial = op(partial, fn(row, col, args...)); } } // accumulate between all subwarps in the warp #pragma unroll for (unsigned i = subwarp_size; i < warp_size; i *= 2) { partial = op(partial, warp.shfl_xor(partial, i)); } // store the result to shared memory if (local_subwarp_id == 0) { block_partial[local_warp_id * subwarp_size + subwarp_rank] = partial; } block.sync(); // in a single thread: accumulate the results if (local_warp_id == 0) { partial = identity; // accumulate the partial results within a thread if (shared_storage >= warp_size) { #pragma unroll for (int i = 0; i < shared_storage; i += warp_size) { partial = op(partial, block_partial[i + warp_rank]); } } else if (warp_rank < shared_storage) { partial = op(partial, block_partial[warp_rank]); } // accumulate between all subwarps in the warp #pragma unroll for (unsigned i = subwarp_size; i < warp_size; i *= 2) { partial = op(partial, warp.shfl_xor(partial, i)); } if (warp_rank < cols) { result[warp_rank + blockIdx.x * cols] = finalize(partial); } } } template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> __global__ __launch_bounds__(default_block_size) void generic_kernel_col_reduction_2d_blocked( int64 rows, int64 cols, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, KernelArgs... args) { constexpr auto warp_size = config::warp_size; __shared__ UninitializedArray<ValueType, default_block_size> block_partial; const auto warp_id = thread::get_subwarp_id_flat<warp_size, int64>(); const auto warp_num = thread::get_subwarp_num_flat<warp_size, int64>(); const auto block = group::this_thread_block(); const auto warp = group::tiled_partition<warp_size>(block); const auto warp_rank = warp.thread_rank(); const auto col = warp_rank + static_cast<int64>(blockIdx.y) * warp_size; auto partial = identity; // accumulate within a thread if (col < cols) { for (auto row = warp_id; row < rows; row += warp_num) { partial = op(partial, fn(row, col, args...)); } } block_partial[threadIdx.x] = partial; block.sync(); // in a single warp: accumulate the results if (threadIdx.x < warp_size) { partial = identity; // accumulate the partial results within a thread #pragma unroll for (int i = 0; i < default_block_size; i += warp_size) { partial = op(partial, block_partial[i + warp_rank]); } if (col < cols) { result[col + blockIdx.x * cols] = finalize(partial); } } } template <typename ValueType, typename ReductionOp, typename FinalizeOp> __global__ __launch_bounds__(default_block_size) void generic_kernel_reduction_finalize_2d( int64 num_results, int64 num_blocks, ReductionOp op, FinalizeOp finalize, ValueType identity, const ValueType* input, int64 result_stride, ValueType* result) { const auto idx = thread::get_thread_id_flat<int64>(); if (idx >= num_results) { return; } auto partial = identity; for (int64 block = 0; block < num_blocks; block++) { partial = op(partial, input[idx + block * num_results]); } result[idx * result_stride] = finalize(partial); } namespace { template <int subwarp_size, typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> void run_generic_kernel_row_reduction(syn::value_list<int, subwarp_size>, int64 rows, int64 cols, int64 col_blocks, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, int64 result_stride, KernelArgs... args) { const auto num_blocks = ceildiv(rows * col_blocks * subwarp_size, default_block_size); generic_kernel_row_reduction_2d<subwarp_size> <<<num_blocks, default_block_size>>>( rows, cols, col_blocks, fn, op, finalize, as_cuda_type(identity), as_cuda_type(result), result_stride, args...); } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_run_generic_kernel_row_reduction, run_generic_kernel_row_reduction); template <int subwarp_size, typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... MappedKernelArgs> void run_generic_col_reduction_small(syn::value_list<int, subwarp_size>, int64 max_blocks, std::shared_ptr<const CudaExecutor> exec, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, dim<2> size, MappedKernelArgs... args) { const auto rows = static_cast<int64>(size[0]); const auto cols = static_cast<int64>(size[1]); const auto num_blocks = std::min<int64>( ceildiv(rows * subwarp_size, default_block_size), max_blocks); if (num_blocks <= 1) { generic_kernel_col_reduction_2d_small<subwarp_size> <<<1, default_block_size>>>(rows, cols, fn, op, finalize, as_cuda_type(identity), as_cuda_type(result), args...); } else { Array<ValueType> tmp_storage{exec, static_cast<size_type>(num_blocks * cols)}; generic_kernel_col_reduction_2d_small<subwarp_size> <<<num_blocks, default_block_size>>>( rows, cols, fn, op, [] __device__(auto v) { return v; }, as_cuda_type(identity), as_cuda_type(tmp_storage.get_data()), args...); generic_kernel_reduction_finalize_2d<<< ceildiv(cols, default_block_size), default_block_size>>>( cols, num_blocks, op, finalize, as_cuda_type(identity), as_cuda_type(tmp_storage.get_const_data()), 1, as_cuda_type(result)); } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_generic_col_reduction_small, run_generic_col_reduction_small); } // namespace template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> void run_kernel_row_reduction(std::shared_ptr<const CudaExecutor> exec, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, size_type result_stride, dim<2> size, KernelArgs&&... args) { using subwarp_sizes = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; constexpr int oversubscription = 16; gko::cuda::device_guard guard{exec->get_device_id()}; const auto rows = static_cast<int64>(size[0]); const auto cols = static_cast<int64>(size[1]); const auto resources = exec->get_num_warps() * config::warp_size * oversubscription; if (rows * cols > resources && rows < cols) { const auto col_blocks = ceildiv(rows * cols, resources); Array<ValueType> partial{exec, static_cast<size_type>(col_blocks * rows)}; const auto num_blocks = ceildiv(rows * col_blocks * config::warp_size, default_block_size); generic_kernel_row_reduction_2d<config::warp_size> <<<num_blocks, default_block_size>>>( rows, cols, col_blocks, fn, op, [] __device__(auto v) { return v; }, as_cuda_type(identity), as_cuda_type(partial.get_data()), 1, map_to_device(args)...); const auto num_finalize_blocks = ceildiv(rows, default_block_size); generic_kernel_reduction_finalize_2d<<<num_finalize_blocks, default_block_size>>>( rows, col_blocks, op, finalize, as_cuda_type(identity), as_cuda_type(partial.get_const_data()), static_cast<int64>(result_stride), as_cuda_type(result)); } else { select_run_generic_kernel_row_reduction( subwarp_sizes(), [cols](int compiled_subwarp_size) { return compiled_subwarp_size >= cols || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), rows, cols, 1, fn, op, finalize, identity, result, static_cast<int64>(result_stride), map_to_device(args)...); } } template <typename ValueType, typename KernelFunction, typename ReductionOp, typename FinalizeOp, typename... KernelArgs> void run_kernel_col_reduction(std::shared_ptr<const CudaExecutor> exec, KernelFunction fn, ReductionOp op, FinalizeOp finalize, ValueType identity, ValueType* result, dim<2> size, KernelArgs&&... args) { using subwarp_sizes = syn::value_list<int, 1, 2, 4, 8, 16, 32, config::warp_size>; constexpr int oversubscription = 16; gko::cuda::device_guard guard{exec->get_device_id()}; const auto rows = static_cast<int64>(size[0]); const auto cols = static_cast<int64>(size[1]); const auto max_blocks = exec->get_num_warps() * config::warp_size * oversubscription / default_block_size; if (cols <= config::warp_size) { select_generic_col_reduction_small( subwarp_sizes(), [cols](int compiled_subwarp_size) { return compiled_subwarp_size >= cols || compiled_subwarp_size == config::warp_size; }, syn::value_list<int>(), syn::type_list<>(), max_blocks, exec, fn, op, finalize, identity, result, size, map_to_device(args)...); } else { const auto col_blocks = ceildiv(cols, config::warp_size); const auto row_blocks = ceildiv(std::min<int64>( ceildiv(rows * config::warp_size, default_block_size), max_blocks), col_blocks); if (row_blocks <= 1) { generic_kernel_col_reduction_2d_blocked<<<dim3(1, col_blocks), default_block_size>>>( rows, cols, fn, op, finalize, as_cuda_type(identity), as_cuda_type(result), map_to_device(args)...); } else { Array<ValueType> tmp_storage{ exec, static_cast<size_type>(row_blocks * cols)}; generic_kernel_col_reduction_2d_blocked<<< dim3(row_blocks, col_blocks), default_block_size>>>( rows, cols, fn, op, [] __device__(auto v) { return v; }, as_cuda_type(identity), as_cuda_type(tmp_storage.get_data()), map_to_device(args)...); generic_kernel_reduction_finalize_2d<<< ceildiv(cols, default_block_size), default_block_size>>>( cols, row_blocks, op, finalize, as_cuda_type(identity), as_cuda_type(tmp_storage.get_const_data()), 1, as_cuda_type(result)); } } } } // namespace cuda } // namespace kernels } // namespace gko
the_stack
#include <limits> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/fsa_algo.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/hash.h" #include "k2/csrc/macros.h" #include "k2/csrc/ragged_ops.h" namespace k2 { namespace intersect_internal { struct StateInfo { // the state_idx01 in a_fsas_. int32_t a_fsas_state_idx01; // the state_idx01 in b_fsas_. int32_t b_fsas_state_idx01; }; struct ArcInfo { int32_t a_arc_idx012; // The idx012 of the source arc in a_fsas_. int32_t b_arc_idx012; // The idx012 of the source arc in b_fsas_. // Note: other fields, e.g. the label and score, can be worked // out from the arc-indexes. }; /* static std::ostream &operator<<(std::ostream &os, const StateInfo &s) { os << "StateInfo{" << s.a_fsas_state_idx01 << "," << s.b_fsas_state_idx01 << "}"; return os; } static std::ostream &operator<<(std::ostream &os, const ArcInfo &a) { os << "ArcInfo{" << a.a_arc_idx012 << "," << a.b_arc_idx012 << "}"; return os; } */ } // namespace intersect_internal using namespace intersect_internal; // NOLINT /* Intersection (a.k.a. composition) that corresponds to decoding for speech recognition-type tasks. Can use either different decoding graphs (one per acoustic sequence) or a shared graph. How to use this object: Construct it Call Intersect() Call FormatOutput() */ class DeviceIntersector { public: /** This object does intersection on device (the general case, but without treating epsilons specially) @param [in] a_fsas An FsaVec (3 axes), must be valid. Caution: in future, we may require that it be arc-sorted. @param [in] b_fsas An FsaVec (3 axes), must be valid. @param [in] b_to_a_map Map from fsa-index in b_fsas to the index of the FSA in a_fsas that we want to intersect it with. @param [in] sorted_match_a If true, the arcs of a_fsas arcs must be sorted by label (checked by calling code via properties), and we'll use a matching approach that requires this. Does not fully check its args (see wrapping code). After constructing this object, call Intersect() and then FormatOutput(). */ DeviceIntersector(FsaVec &a_fsas, FsaVec &b_fsas, const Array1<int32_t> &b_to_a_map, bool sorted_match_a): c_(a_fsas.Context()), a_fsas_(a_fsas), sorted_match_a_(sorted_match_a), b_fsas_(b_fsas), b_to_a_map_(b_to_a_map), a_states_multiple_(b_fsas_.TotSize(1) | 1) { NVTX_RANGE(K2_FUNC); int32_t num_key_bits = NumBitsNeededFor(a_fsas_.shape.MaxSize(1) * (int64_t)a_states_multiple_); // in future the accessor for num_key_bits==32 may be more efficient, and // there's no point leaving >32 bits for the value since our arrays don't // support that and also we can't have more values than the #keys // since the values are consecutive. if (num_key_bits < 32) num_key_bits = 32; // We may want to tune this default hash size eventually. // We will expand the hash as needed. int32_t hash_size = 4 * RoundUpToNearestPowerOfTwo(b_fsas.NumElements()), min_hash_size = 1 << 16; if (hash_size < min_hash_size) hash_size = min_hash_size; // caution: also use hash_size in FirstIter() as default size of various arrays. int32_t num_value_bits = std::max<int32_t>(NumBitsNeededFor(hash_size - 1), 64 - num_key_bits); state_pair_to_state_ = Hash(c_, hash_size, num_key_bits, num_value_bits); K2_CHECK(c_->IsCompatible(*b_fsas.Context())); K2_CHECK(c_->IsCompatible(*b_to_a_map.Context())); } void FirstIter() { NVTX_RANGE(K2_FUNC); int32_t initial_size = state_pair_to_state_.NumBuckets(); arcs_row_ids_ = Array1<int32_t>(c_, initial_size); arcs_row_ids_.Resize(0, true); arcs_ = Array1<ArcInfo>(c_, initial_size); arcs_.Resize(0, true); int32_t num_fsas = b_fsas_.Dim0(); states_ = Array1<StateInfo>(c_, initial_size); Renumbering renumber_initial_states(c_, num_fsas); char *keep_initial_states = renumber_initial_states.Keep().Data(); const int32_t *b_fsas_row_splits1_data = b_fsas_.RowSplits(1).Data(), *b_to_a_map_data = b_to_a_map_.Data(), *a_fsas_row_splits1_data = a_fsas_.RowSplits(1).Data(); K2_EVAL(c_, num_fsas, lambda_set_keep, (int32_t i) -> void { int nonempty_b = b_fsas_row_splits1_data[i+1] > b_fsas_row_splits1_data[i], i_a = b_to_a_map_data[i], nonempty_a = a_fsas_row_splits1_data[i_a+1] > a_fsas_row_splits1_data[i_a]; keep_initial_states[i] = (char)(nonempty_a & nonempty_b); }); int32_t num_initial_states = renumber_initial_states.New2Old().Dim(); states_.Resize(num_initial_states, true); final_states_ = Array1<StateInfo>(c_, num_initial_states); StateInfo *states_data = states_.Data(), *final_states_data = final_states_.Data(); const int32_t *new2old_data = renumber_initial_states.New2Old().Data(); K2_EVAL(c_, num_initial_states, lambda_set_state_info, (int32_t new_i) -> void { int32_t b_idx0 = new2old_data[new_i], b_idx01 = b_fsas_row_splits1_data[b_idx0], a_idx0 = b_to_a_map_data[b_idx0], a_idx01 = a_fsas_row_splits1_data[a_idx0]; StateInfo info; info.a_fsas_state_idx01 = a_idx01; info.b_fsas_state_idx01 = b_idx01; states_data[new_i] = info; // now set final-state info. info.a_fsas_state_idx01 = a_fsas_row_splits1_data[a_idx0 + 1] - 1; info.b_fsas_state_idx01 = b_fsas_row_splits1_data[b_idx0 + 1] - 1; final_states_data[new_i] = info; }); iter_to_state_row_splits_cpu_.reserve(128); iter_to_state_row_splits_cpu_.push_back(0); iter_to_state_row_splits_cpu_.push_back(num_initial_states); } /* Adds the StateInfo for the final-states to the states_ array. */ void LastIter() { NVTX_RANGE(K2_FUNC); int32_t num_final_states = final_states_.Dim(); int32_t cur_num_states = states_.Dim(), tot_num_states = cur_num_states + num_final_states; states_.Resize(tot_num_states); Array1<StateInfo> dest = states_.Arange(cur_num_states, tot_num_states); Assign(final_states_, &dest); K2_CHECK_EQ(cur_num_states, iter_to_state_row_splits_cpu_.back()); // Remove this line. iter_to_state_row_splits_cpu_.push_back(tot_num_states); } /* Does the main work of intersection/composition, but doesn't produce any output; the output is provided when you call FormatOutput(). */ void Intersect() { FirstIter(); if (sorted_match_a_) ForwardSortedA(); else Forward(); LastIter(); } /* Creates and returns a ragged array indexed [fsa][state][arc], containing the result of intersection. (Note: we don't guarantee that all states are coaccessible (i.e. can reach the end); if that might be an issue in your case, you can call Connect() afterward. @param [out] arc_map_a_out If non-NULL, the map from (arc-index of returned FsaVec) to (arc-index in a_fsas_) will be written to here. @param [out] arc_map_b_out If non-NULL, the map from (arc-index of returned FsaVec) to (arc-index in b_fsas_) will be written to here. @return Returns a FsaVec that is the composed result. It may contain states and/or arcs that are not co-accessible. */ FsaVec FormatOutput(Array1<int32_t> *arc_map_a_out, Array1<int32_t> *arc_map_b_out) { NVTX_RANGE(K2_FUNC); int32_t num_key_bits = state_pair_to_state_.NumKeyBits(), num_value_bits = state_pair_to_state_.NumValueBits(); if (num_key_bits + num_value_bits == 64) { return FormatOutputTpl<Hash::GenericAccessor>(arc_map_a_out, arc_map_b_out); } else { return FormatOutputTpl<Hash::PackedAccessor>(arc_map_a_out, arc_map_b_out); } } template <typename AccessorT> FsaVec FormatOutputTpl(Array1<int32_t> *arc_map_a_out, Array1<int32_t> *arc_map_b_out) { NVTX_RANGE(K2_FUNC); int32_t num_states = iter_to_state_row_splits_cpu_.back(), num_iters = iter_to_state_row_splits_cpu_.size() - 1, num_fsas = b_fsas_.Dim0(); Array1<int32_t> row_splits1(c_, iter_to_state_row_splits_cpu_), row_ids1(c_, num_states); RowSplitsToRowIds(row_splits1, &row_ids1); const int32_t *b_fsas_row_ids1_data = b_fsas_.RowIds(1).Data(); int32_t *row_ids1_data = row_ids1.Data(); StateInfo *states_data = states_.Data(); K2_CHECK_EQ(num_states, states_.Dim()); /* currently, row_ids1 maps from state-index (in states_) to iteration index 0 <= t < num_iters. We next modify it so it maps from state to a number that encodes (iter, FSA-index), i.e. we modify it from iter -> iter * num_fsas + fsa_idx0. Later we'll reorder the rows so that each FSA has all its states together. */ K2_EVAL(c_, num_states, lambda_modify_row_ids, (int32_t i) -> void { int32_t iter = row_ids1_data[i]; StateInfo info = states_data[i]; // note: the FSA-index of the output is the same as that in b_fsas_, // but not necessarily in a_fsas_, thanks to b_to_a_map_. int32_t fsa_idx0 = b_fsas_row_ids1_data[info.b_fsas_state_idx01], new_row_id = iter * num_fsas + fsa_idx0; K2_DCHECK_LT(static_cast<uint32_t>(fsa_idx0), static_cast<uint32_t>(num_fsas)); row_ids1_data[i] = new_row_id; }); Array1<int32_t> row_ids2(row_ids1), // we'll later interpret this as the 2nd // level's row-ids. row_splits2(c_, num_iters * num_fsas + 1); RowIdsToRowSplits(row_ids2, &row_splits2); // We'll use 'fsaiter_new2old' to effectively transpose two axes, the // iteration and FSA axes. We want the FSA to be the more-slowly-varying // index, so we can have all states for FSA 0 first. Array1<int32_t> fsaiter_new2old(c_, num_iters * num_fsas); int32_t *fsaiter_new2old_data = fsaiter_new2old.Data(); K2_EVAL(c_, num_iters * num_fsas, lambda_set_reordering, (int32_t i) -> void { int32_t fsa_idx = i / num_iters, iter_idx = i % num_iters; int32_t old_i = iter_idx * num_fsas + fsa_idx; fsaiter_new2old_data[i] = old_i; }); Array1<int32_t> &row_ids3(arcs_row_ids_); Array1<int32_t> row_splits3(c_, num_states + 1); RowIdsToRowSplits(row_ids3, &row_splits3); RaggedShape layer2 = RaggedShape2(&row_splits2, &row_ids2, -1), layer3 = RaggedShape2(&row_splits3, &row_ids3, -1); Array1<int32_t> states_new2old, arcs_new2old; RaggedShape layer2_new = Index(layer2, 0, fsaiter_new2old, &states_new2old), layer3_new = Index(layer3, 0, states_new2old, &arcs_new2old); RaggedShape layer1_new = RegularRaggedShape(c_, num_fsas, num_iters); // We remove axis 1, which represents 'iteration-index' (this is not // something the user needs to know or care about). RaggedShape temp = ComposeRaggedShapes3(layer1_new, layer2_new, layer3_new); RaggedShape ans_shape = RemoveAxis(temp, 1); int32_t num_arcs = arcs_.Dim(); K2_CHECK_EQ(ans_shape.NumElements(), num_arcs); Array1<Arc> ans_values(c_, num_arcs); int32_t *arc_map_a_data = nullptr, *arc_map_b_data = nullptr; if (arc_map_a_out) { *arc_map_a_out = Array1<int32_t>(c_, num_arcs); arc_map_a_data = arc_map_a_out->Data(); } if (arc_map_b_out) { *arc_map_b_out = Array1<int32_t>(c_, num_arcs); arc_map_b_data = arc_map_b_out->Data(); } Array1<int32_t> states_old2new = InvertPermutation(states_new2old); ArcInfo *arc_info_data = arcs_.Data(); const Arc *a_arcs_data = a_fsas_.values.Data(), *b_arcs_data = b_fsas_.values.Data(); Arc *arcs_out_data = ans_values.Data(); const int32_t *arcs_new2old_data = arcs_new2old.Data(), *states_new2old_data = states_new2old.Data(), *states_old2new_data = states_old2new.Data(); const int32_t *ans_shape_row_ids2 = ans_shape.RowIds(2).Data(), *ans_shape_row_ids1 = ans_shape.RowIds(1).Data(), *ans_shape_row_splits1 = ans_shape.RowSplits(1).Data(); const int32_t *b_fsas_row_ids2_data = b_fsas_.RowIds(2).Data(); int32_t a_states_multiple = a_states_multiple_; AccessorT state_pair_to_state_acc = state_pair_to_state_.GetAccessor<AccessorT>(); // arc_idx012 here is w.r.t. ans_shape that currently has axes indexed // [fsa][state][arc]. K2_EVAL(c_, num_arcs, lambda_set_output_data, (int32_t new_arc_idx012) -> void { int32_t new_src_state_idx01 = ans_shape_row_ids2[new_arc_idx012], old_arc_idx012 = arcs_new2old_data[new_arc_idx012]; ArcInfo info = arc_info_data[old_arc_idx012]; int32_t fsa_idx0 = ans_shape_row_ids1[new_src_state_idx01]; Arc a_arc = a_arcs_data[info.a_arc_idx012], b_arc = b_arcs_data[info.b_arc_idx012]; if (arc_map_a_data) arc_map_a_data[new_arc_idx012] = info.a_arc_idx012; if (arc_map_b_data) arc_map_b_data[new_arc_idx012] = info.b_arc_idx012; int32_t new_dest_state_idx01; // index of the dest_state w.r.t // ans_shape if (a_arc.label == -1) { new_dest_state_idx01 = ans_shape_row_splits1[fsa_idx0 + 1] - 1; } else { // first work out old_dest_state_idx01, which is the index (into // states_) of the dest-state. int32_t b_src_state_idx01 = b_fsas_row_ids2_data[info.b_arc_idx012], b_dest_state_idx01 = b_src_state_idx01 + b_arc.dest_state - b_arc.src_state, a_dest_state_idx1 = a_arc.dest_state; uint64_t hash_key = (((uint64_t)a_dest_state_idx1) * a_states_multiple) + b_dest_state_idx01; uint64_t value = 0; bool ans = state_pair_to_state_acc.Find(hash_key, &value); K2_CHECK_EQ(ans, true); int32_t old_dest_state_idx01 = static_cast<uint32_t>(value); new_dest_state_idx01 = states_old2new_data[old_dest_state_idx01]; } int32_t fsa_idx0x = ans_shape_row_splits1[fsa_idx0], dest_state_idx1 = new_dest_state_idx01 - fsa_idx0x, src_state_idx1 = new_src_state_idx01 - fsa_idx0x; Arc out_arc; out_arc.src_state = src_state_idx1; out_arc.dest_state = dest_state_idx1; K2_CHECK_EQ(a_arc.label, b_arc.label); out_arc.label = a_arc.label; out_arc.score = a_arc.score + b_arc.score; arcs_out_data[new_arc_idx012] = out_arc; }); return Ragged<Arc>(ans_shape, ans_values); } void Forward() { NVTX_RANGE(K2_FUNC); for (int32_t t = 0; ; t++) { K2_CHECK_EQ(t + 2, int32_t(iter_to_state_row_splits_cpu_.size())); int32_t state_begin = iter_to_state_row_splits_cpu_[t], state_end = iter_to_state_row_splits_cpu_[t + 1], num_states = state_end - state_begin; if (num_states == 0) { // It saves a little processing later to remove the last, empty, // iteration-index. iter_to_state_row_splits_cpu_.pop_back(); break; // Nothing left to process. } // We need to process output-states numbered state_begin..state_end-1. // Row 0 of num_arcs will contain the num_arcs leaving each state // in b in this batch; row 1 will contain (num_arcs in a * num_arcs in b). // If the total of row 1 is small enough and we're using the device, // we'll process all pairs of arcs; otherwise we'll do a logarithmic // search. Array2<int32_t> num_arcs(c_, 2, num_states + 1); auto num_arcs_acc = num_arcs.Accessor(); StateInfo *states_data = states_.Data(); const int32_t *a_fsas_row_splits2_data = a_fsas_.RowSplits(2).Data(), *b_fsas_row_splits2_data = b_fsas_.RowSplits(2).Data(); K2_EVAL(c_, num_states, lambda_find_num_arcs, (int32_t i) -> void { int32_t state_idx = state_begin + i; StateInfo info = states_data[state_idx]; int32_t b_fsas_state_idx01 = info.b_fsas_state_idx01, b_start_arc = b_fsas_row_splits2_data[b_fsas_state_idx01], b_end_arc = b_fsas_row_splits2_data[b_fsas_state_idx01 + 1], b_num_arcs = b_end_arc - b_start_arc; num_arcs_acc(0, i) = b_num_arcs; int32_t a_fsas_state_idx01 = info.a_fsas_state_idx01, a_start_arc = a_fsas_row_splits2_data[a_fsas_state_idx01], a_end_arc = a_fsas_row_splits2_data[a_fsas_state_idx01 + 1], a_num_arcs = a_end_arc - a_start_arc; num_arcs_acc(1, i) = b_num_arcs * a_num_arcs; }); Array1<int32_t> row_splits_ab = num_arcs.Row(1), num_arcs_b = num_arcs.Row(0); ExclusiveSum(row_splits_ab, &row_splits_ab); // tot_ab is total of (num-arcs from state a * num-arcs from state b). int32_t tot_ab = row_splits_ab[num_states], cutoff = 1 << 30; // Eventually I'll make cutoff smaller, like // 16384, and implement the other branch. if (tot_ab > cutoff) { K2_LOG(FATAL) << "Problem size is too large for un-sorted intersection, " "please make sure one input is arc-sorted and use sorted_match_a=true."; } // The following is a bound on how big we might need the hash to be, assuming // all arc-pairs match, which of course they won't, but it's safe. For large // problems you should be using sorted_match_a=true. PossiblyResizeHash(4 * (states_.Dim() + tot_ab), states_.Dim() + tot_ab); int32_t num_key_bits = state_pair_to_state_.NumKeyBits(), num_value_bits = state_pair_to_state_.NumValueBits(); if (num_key_bits == 32 && num_value_bits == 32) { ForwardOneIter<Hash::Accessor<32> >(t, tot_ab, num_arcs_b, row_splits_ab); } else if (num_key_bits + num_value_bits == 64) { ForwardOneIter<Hash::GenericAccessor>(t, tot_ab, num_arcs_b, row_splits_ab); } else { ForwardOneIter<Hash::PackedAccessor>(t, tot_ab, num_arcs_b, row_splits_ab); } } } /* This is a piece of the code in Forward() that was broken out because it needs to be templated on the hash accessor type. It does the last part of the intersection algorithm for a single iteration. @param [in] t The iteration of the algorithm, dictates the batch of states we need to process. @param [in] tot_ab The total number of arcs we need to process, which equals the sum of (total number of arcs leaving state in a) * (total number of arcs leaving state in b) for each state-pair (a,b) that we need to process. @param [in] num_arcs_b An array indexed by an index i such that (i + state_begin) is an index into states_, that gives the number of arcs leaving the "b" state in that state-pair (from b_fsas_). The last element of this array is undefined. @param [in] row_splits_ab The exclusive-sum of the products (total number of arcs leaving state in a) * (total number of arcs leaving state in b) for each state-pair (a,b) that we need to process; dimension is 1 + (num states that we need to process). */ template <typename HashAccessorT> void ForwardOneIter(int32_t t, int32_t tot_ab, const Array1<int32_t> &num_arcs_b, const Array1<int32_t> &row_splits_ab) { NVTX_RANGE(K2_FUNC); int32_t state_begin = iter_to_state_row_splits_cpu_[t], state_end = iter_to_state_row_splits_cpu_[t + 1]; const Arc *a_arcs_data = a_fsas_.values.Data(), *b_arcs_data = b_fsas_.values.Data(); int32_t key_bits = state_pair_to_state_.NumKeyBits(), a_states_multiple = a_states_multiple_, value_bits = state_pair_to_state_.NumValueBits(); // `value_max` is the limit for how large values in the hash can be. uint64_t value_max = ((uint64_t)1) << value_bits; HashAccessorT state_pair_to_state_acc = state_pair_to_state_.GetAccessor<HashAccessorT>(); // Note: we can actually resolve the next failure fairly easily now; // we'll do it when needed. K2_CHECK_GT(value_max, (uint64_t)tot_ab) << "Problem size too large " "for hash table... redesign or reduce problem size."; Array1<int32_t> row_ids_ab(c_, tot_ab); RowSplitsToRowIds(row_splits_ab, &row_ids_ab); const int32_t *row_ids_ab_data = row_ids_ab.Data(), *row_splits_ab_data = row_splits_ab.Data(), *num_arcs_b_data = num_arcs_b.Data(); const int32_t *b_fsas_row_ids1_data = b_fsas_.RowIds(1).Data(); // arcs_newstates_renumbering serves two purposes: // - we'll keep some subset of the `tot_ab` arcs. // - some subset of the dest-states of those arcs will be "new" dest-states // that need to be assigned a state-id. // To avoid sequential kernels for computing Old2New() and computing New2Old(), // we combine those two renumberings into one. Renumbering arcs_newstates_renumbering(c_, tot_ab * 2); char *keep_arc_data = arcs_newstates_renumbering.Keep().Data(), *new_dest_state_data = keep_arc_data + tot_ab; const int32_t *a_fsas_row_splits2 = a_fsas_.RowSplits(2).Data(), *b_fsas_row_splits2 = b_fsas_.RowSplits(2).Data(); StateInfo *states_data = states_.Data(); K2_EVAL(c_, tot_ab, lambda_set_keep_arc_newstate, (int32_t i) -> void { // state_i is the index into the block of ostates that we're // processing, the actual state index is state_i + state_begin. int32_t state_i = row_ids_ab_data[i], // arc_pair_idx encodes a_arc_idx2 and b_arc_idx2 arc_pair_idx = i - row_splits_ab_data[state_i], state_idx = state_i + state_begin; StateInfo sinfo = states_data[state_idx]; int32_t num_arcs_b = num_arcs_b_data[state_i], a_arc_idx2 = arc_pair_idx / num_arcs_b, b_arc_idx2 = arc_pair_idx % num_arcs_b; // the idx2's above are w.r.t. a_fsas_ and b_fsas_. int32_t a_arc_idx01x = a_fsas_row_splits2[sinfo.a_fsas_state_idx01], b_arc_idx01x = b_fsas_row_splits2[sinfo.b_fsas_state_idx01], a_arc_idx012 = a_arc_idx01x + a_arc_idx2, b_arc_idx012 = b_arc_idx01x + b_arc_idx2; // Not treating epsilons specially here, see documentation for // IntersectDevice() in [currently] fsa_algo.h. int keep_arc = (a_arcs_data[a_arc_idx012].label == b_arcs_data[b_arc_idx012].label); keep_arc_data[i] = (char)keep_arc; int new_dest_state = 0; if (keep_arc && a_arcs_data[a_arc_idx012].label != -1) { // investigate whether the dest-state is new (not currently allocated // a state-id). We don't allocate ids for the final-state, so skip this // if label is -1. int32_t b_dest_state_idx1 = b_arcs_data[b_arc_idx012].dest_state, b_dest_state_idx01 = b_dest_state_idx1 + sinfo.b_fsas_state_idx01 - b_arcs_data[b_arc_idx012].src_state, a_dest_state_idx1 = a_arcs_data[a_arc_idx012].dest_state; uint64_t hash_key = (((uint64_t)a_dest_state_idx1) * a_states_multiple) + b_dest_state_idx01, hash_value = i; // If it was successfully inserted, then this arc is assigned // responsibility for creating the state-id for its destination // state. // The value `hash_value` that we insert into the hash is temporary, // and will be replaced below with the index into states_. if (state_pair_to_state_acc.Insert(hash_key, hash_value)) { new_dest_state = 1; } } new_dest_state_data[i] = (char)new_dest_state; }); // When reading the code below, remember this code is a little unusual // because we have combined the renumberings for arcs and new-states // into one. int32_t num_kept_arcs = arcs_newstates_renumbering.Old2New(true)[tot_ab], num_kept_tot = arcs_newstates_renumbering.New2Old().Dim(), num_kept_states = num_kept_tot - num_kept_arcs; int32_t next_state_end = state_end + num_kept_states; iter_to_state_row_splits_cpu_.push_back(next_state_end); states_.Resize(next_state_end); // Note: this Resize() won't actually reallocate each time. states_data = states_.Data(); // In case it changed (unlikely) Array1<int32_t> states_new2old = arcs_newstates_renumbering.New2Old().Arange(num_kept_arcs, num_kept_tot); const int32_t *states_new2old_data = states_new2old.Data(), *b_to_a_map_data = b_to_a_map_.Data(), *a_fsas_row_splits1_data = a_fsas_.RowSplits(1).Data(); // set new elements of `states_data`, setting up the StateInfo on the next // frame and setting the state indexes in the hash (to be looked up when // creating the arcs. K2_EVAL(c_, num_kept_states, lambda_set_states_data, (int32_t i) -> void { // the reason for the "- tot_ab" is that this was in the second half of // the array of 'kept' of size tot_ab * 2. int32_t arc_i = states_new2old_data[i] - tot_ab; // The code below repeats what we did when processing arcs in the // previous lambda (now just for a small subset of arcs). // src_state_i is the index into the block of ostates that we're // processing, the actual state index is state_i + state_begin. int32_t src_state_i = row_ids_ab_data[arc_i], // arc_pair_idx encodes a_arc_idx2 and b_arc_idx2 arc_pair_idx = arc_i - row_splits_ab_data[src_state_i], src_state_idx = src_state_i + state_begin; StateInfo src_sinfo = states_data[src_state_idx]; int32_t num_arcs_b = num_arcs_b_data[src_state_i], a_arc_idx2 = arc_pair_idx / num_arcs_b, b_arc_idx2 = arc_pair_idx % num_arcs_b; // the idx2's above are w.r.t. a_fsas_ and b_fsas_. int32_t a_arc_idx01x = a_fsas_row_splits2[src_sinfo.a_fsas_state_idx01], b_arc_idx01x = b_fsas_row_splits2[src_sinfo.b_fsas_state_idx01], a_arc_idx012 = a_arc_idx01x + a_arc_idx2, b_arc_idx012 = b_arc_idx01x + b_arc_idx2; Arc b_arc = b_arcs_data[b_arc_idx012], a_arc = a_arcs_data[a_arc_idx012]; K2_DCHECK_EQ(a_arc.label, b_arc.label); int32_t b_dest_state_idx1 = b_arcs_data[b_arc_idx012].dest_state, b_dest_state_idx01 = b_dest_state_idx1 + src_sinfo.b_fsas_state_idx01 - b_arcs_data[b_arc_idx012].src_state, b_fsa_idx0 = b_fsas_row_ids1_data[b_dest_state_idx01], a_dest_state_idx1 = a_arcs_data[a_arc_idx012].dest_state, a_dest_state_idx01 = a_fsas_row_splits1_data[b_to_a_map_data[b_fsa_idx0]] + a_dest_state_idx1; uint64_t hash_key = (((uint64_t)a_dest_state_idx1) * a_states_multiple) + b_dest_state_idx01; uint64_t value, *key_value_location = nullptr; bool ans = state_pair_to_state_acc.Find(hash_key, &value, &key_value_location); K2_DCHECK(ans); K2_DCHECK_EQ(value, (uint64_t)arc_i); int32_t dest_state_idx = state_end + i; state_pair_to_state_acc.SetValue(key_value_location, hash_key, (uint64_t)dest_state_idx); StateInfo dest_sinfo; dest_sinfo.a_fsas_state_idx01 = a_dest_state_idx01; dest_sinfo.b_fsas_state_idx01 = b_dest_state_idx01; states_data[dest_state_idx] = dest_sinfo; }); int32_t old_num_arcs = arcs_.Dim(), new_num_arcs = old_num_arcs + num_kept_arcs; if (static_cast<uint64_t>(tot_ab) >= value_max || static_cast<uint64_t>(next_state_end) >= value_max) { K2_LOG(FATAL) << "Problem size is too large for this code: a_states_multiple=" << a_states_multiple_ << ", key_bits=" << key_bits << ", value_bits=" << value_bits << ", value_max=" << value_max << ", tot_ab=" << tot_ab << ", next_state_end=" << next_state_end; } arcs_.Resize(new_num_arcs); arcs_row_ids_.Resize(new_num_arcs); ArcInfo *arcs_data = arcs_.Data(); int32_t *arcs_row_ids_data = arcs_row_ids_.Data(); const int32_t *arcs_new2old_data = arcs_newstates_renumbering.New2Old().Data(); K2_EVAL(c_, num_kept_arcs, lambda_set_arc_info, (int32_t new_arc_i) -> void { // 0 <= old_arc_i < tot_ab. int32_t old_arc_i = arcs_new2old_data[new_arc_i]; // The code below repeats what we did when processing arcs in the // previous lambdas (we do this for all arcs that were kept). // src_state_i is the index into the block of ostates that we're // processing, the actual state index is src_state_i + state_begin. int32_t src_state_i = row_ids_ab_data[old_arc_i]; // arc_pair_idx encodes a_arc_idx2 and b_arc_idx2 int32_t arc_pair_idx = old_arc_i - row_splits_ab_data[src_state_i], src_state_idx = src_state_i + state_begin; StateInfo src_sinfo = states_data[src_state_idx]; int32_t num_arcs_b = num_arcs_b_data[src_state_i], a_arc_idx2 = arc_pair_idx / num_arcs_b, b_arc_idx2 = arc_pair_idx % num_arcs_b; // the idx2's above are w.r.t. a_fsas_ and b_fsas_. int32_t a_arc_idx01x = a_fsas_row_splits2[src_sinfo.a_fsas_state_idx01], b_arc_idx01x = b_fsas_row_splits2[src_sinfo.b_fsas_state_idx01], a_arc_idx012 = a_arc_idx01x + a_arc_idx2, b_arc_idx012 = b_arc_idx01x + b_arc_idx2; Arc b_arc = b_arcs_data[b_arc_idx012], a_arc = a_arcs_data[a_arc_idx012]; K2_DCHECK_EQ(a_arc.label, b_arc.label); //int32_t dest_state_idx = -1; if (a_arc.label != -1) { int32_t b_dest_state_idx1 = b_arcs_data[b_arc_idx012].dest_state, b_dest_state_idx01 = b_dest_state_idx1 + src_sinfo.b_fsas_state_idx01 - b_arcs_data[b_arc_idx012].src_state, a_dest_state_idx1 = a_arcs_data[a_arc_idx012].dest_state; uint64_t hash_key = (((uint64_t)a_dest_state_idx1) * a_states_multiple) + b_dest_state_idx01; uint64_t value = 0; bool ans = state_pair_to_state_acc.Find(hash_key, &value); // dest_state_idx = static_cast<uint32_t>(value); } // else leave dest_state_idx at -1; it's a final-state and we // allocate their state-ids at the end. // Actually we no longer need dest_state_idx, it will be obtained // directly from the hash when we format the output. ArcInfo info; info.a_arc_idx012 = a_arc_idx012; info.b_arc_idx012 = b_arc_idx012; arcs_data[old_num_arcs + new_arc_i] = info; arcs_row_ids_data[old_num_arcs + new_arc_i] = src_state_idx; }); } /* This function ensures that the hash `state_pair_to_state_` has an array with at least `min_num_buckets` buckets, and NumValueBits() large enough to contain at least `min_supported_values` values The number of bits allocated for the key will not be changed (this was set to the required value in the constructor). @param [in] min_num_buckets The minimum number of buckets required; the actual number chosen will be a power of 2 that is >= min_num_buckets. CAUTION: this number should be considerably larger than the maximum number of key/value pairs you might want to store in the hash (or it will get too full). @param [in] min_supported_values The user declares that the hash must have enough bits allocated to values that it can store values 0 <= v < min_supported_values. In general this requires that min_supported_values < (1 << state_pair_to_state_.NumValueBits()), the strictly-less-than being necessary because (1<<num_value_bits)-1 is not allowed as a value if (1<<num_key_bits)-1 is allowed as a key, which condition we are too lazy to check. */ void PossiblyResizeHash(int32_t min_num_buckets, int32_t min_supported_values) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_buckets, 0); int32_t cur_num_buckets = state_pair_to_state_.NumBuckets(), cur_num_key_bits = state_pair_to_state_.NumKeyBits(), cur_num_value_bits = state_pair_to_state_.NumValueBits(), num_buckets = std::max<int32_t>( RoundUpToNearestPowerOfTwo(min_num_buckets), cur_num_buckets), num_value_bits = std::max<int32_t>( NumBitsNeededFor(min_supported_values), cur_num_value_bits); if (num_value_bits != cur_num_value_bits || num_buckets != cur_num_buckets) { state_pair_to_state_.Resize(num_buckets, cur_num_key_bits, num_value_bits); } } void ForwardSortedA() { NVTX_RANGE(K2_FUNC); for (int32_t t = 0; ; t++) { K2_CHECK_EQ(t + 2, int32_t(iter_to_state_row_splits_cpu_.size())); int32_t state_begin = iter_to_state_row_splits_cpu_[t], state_end = iter_to_state_row_splits_cpu_[t + 1], num_states = state_end - state_begin; if (num_states == 0) { // It saves a little processing later to remove the last, empty, // iteration-index. iter_to_state_row_splits_cpu_.pop_back(); break; } // We need to process output-states numbered state_begin..state_end-1. // num_arcs_b will contain the number of arcs leaving the state in b_fsas_, // i.e. the state with index StateInfo::b_fsas_state_idx01. Array1<int32_t> num_arcs_b(c_, num_states + 1); int32_t *num_arcs_b_data = num_arcs_b.Data(); StateInfo *states_data = states_.Data(); const int32_t *a_fsas_row_splits2_data = a_fsas_.RowSplits(2).Data(), *b_fsas_row_splits2_data = b_fsas_.RowSplits(2).Data(); K2_EVAL(c_, num_states, lambda_find_num_arcs_b, (int32_t i) -> void { int32_t state_idx = state_begin + i; StateInfo info = states_data[state_idx]; int32_t b_fsas_state_idx01 = info.b_fsas_state_idx01, b_start_arc = b_fsas_row_splits2_data[b_fsas_state_idx01], b_end_arc = b_fsas_row_splits2_data[b_fsas_state_idx01 + 1], b_num_arcs = b_end_arc - b_start_arc; num_arcs_b_data[i] = b_num_arcs; }); ExclusiveSum(num_arcs_b, &num_arcs_b); int32_t num_b_arcs = num_arcs_b.Back(); Array1<int32_t> b_arc_to_state(c_, num_b_arcs); RowSplitsToRowIds(num_arcs_b, &b_arc_to_state); int32_t *b_arc_to_state_data = b_arc_to_state.Data(); /* We now know, for each state-pair we need to process, the total number of arcs leaving the state in b. We need to figure out the range of matching arcs leaving the state in a. */ Array1<int32_t> first_matching_a_arc_idx012(c_, num_b_arcs); int32_t *first_matching_a_arc_idx012_data = first_matching_a_arc_idx012.Data(); // The + 1 is because we'll do an exclusive sum. Array1<int32_t> num_matching_a_arcs(c_, num_b_arcs + 1); int32_t *num_matching_a_arcs_data = num_matching_a_arcs.Data(); const Arc *a_arcs_data = a_fsas_.values.Data(), *b_arcs_data = b_fsas_.values.Data(); int32_t key_bits = state_pair_to_state_.NumKeyBits(), value_bits = state_pair_to_state_.NumValueBits(); if (c_->GetDeviceType() == kCuda) { #ifdef K2_WITH_CUDA namespace cg = cooperative_groups; constexpr int log_thread_group_size = 2, thread_group_size = (1 << log_thread_group_size); // 4 static_assert(thread_group_size > 1, "Bad thread_group_size"); // the "* 2" below is because pairs of thread groups handle the // (beginning, end) of ranges of arcs in a_fsas_; and we need // these groups to be within the same warp so we can sync them. static_assert(thread_group_size * 2 <= 32, "thread_group_size too large"); auto lambda_find_ranges = [=] __device__( cg::thread_block_tile<thread_group_size> g, // or auto g.. int32_t *shared_data, // points to shared data for this block of // threads int32_t idx01_doubled) -> void { // thread_group_type is 0 if we're finding the beginning of the range // of matching arcs, and 1 if we're finding the end of the range of // matching arcs. // 0 <= idx01 < num_b_arcs is an index into the list of arcs we're // processing; the array's shape has (row_splits,row_ids) == // (num_arcs_b, b_arc_to-state). int32_t arc_idx01 = idx01_doubled / 2, thread_group_type = idx01_doubled % 2; // the idx01 is into the list of arcs in b that we're processing. // 0 <= state_idx0 < num_states. int32_t state_idx0 = b_arc_to_state_data[arc_idx01], arc_idx1x = num_arcs_b_data[state_idx0], arc_idx1 = arc_idx01 - arc_idx1x; // state_idx is an index into states_. int32_t state_idx = state_begin + state_idx0; StateInfo info = states_data[state_idx]; int32_t b_begin_arc_idx01x = b_fsas_row_splits2_data[info.b_fsas_state_idx01], b_arc_idx012 = b_begin_arc_idx01x + arc_idx1; // ignore the apparent name mismatch setting b_arc_idx012 above; // arc_idx1 is an idx1 w.r.t. a different array than b_fsas_. K2_DCHECK_LT(b_arc_idx012, b_fsas_row_splits2_data[info.b_fsas_state_idx01+1]); int32_t a_begin_arc_idx012 = a_fsas_row_splits2_data[info.a_fsas_state_idx01], a_end_arc_idx012 = a_fsas_row_splits2_data[info.a_fsas_state_idx01 + 1]; int32_t thread_idx = g.thread_rank(), num_threads = g.size(); // = thread_group_size. // We convert to uint64_t so we can add 1 without wrapping around; // this way, even-numbered thread groups (i.e. groups of size // thread_group_size) find the beginning of the range of arcs in a, // and odd-numbered thread groups find the end of the range of arcs. uint64_t label = static_cast<uint64_t>(static_cast<uint32_t>( b_arcs_data[b_arc_idx012].label)) + static_cast<uint64_t>(thread_group_type); // We are now searching for the lowest arc-index i in the range // a_begin_arc_idx012 <= i <= a_end_arc_idx012, where // arcs_data[i].label >= `label`, where we treat the labels of arcs // indexed i >= a_end_arc_idx012 as infinitely large. int32_t range_len = a_end_arc_idx012 - a_begin_arc_idx012, // > 0 log_range_len = 31 - __clz(range_len | 1), num_iters = 1 + log_range_len / log_thread_group_size; // suppose log_thread_group_size=2, thread_group_size=4. // Then: // 0 <= range_len < 4 -> num_iters is 1 // 4 <= range_len < 16 -> num_iters is 2 // Note: at 4 and 16, we need num_iters to be (2,3) // respectively because a_end_arc_idx012 is a value we need // to include in the search. // "per_thread_range" is the length of the interval of arcs that each thread // 0 <= thread_idx < num_threads is currently responsible for. // At this point, the group of threads is searching an interval // [interval_start ... interval_start+(per_thread_range*thread_group_size)]. // for the lowest index i such that // (i>= a_end_arc_idx012 ? UINT64_MAX : (uint32_t)a_arcs_data[i]) >= label // and such an i must exist in this range because the range includes a_end_arc_idx012 // (checked in K2_DCHECK below) int32_t per_thread_range = 1 << ((num_iters - 1) * log_thread_group_size); // > 0 int32_t interval_start = a_begin_arc_idx012; K2_DCHECK_GT(interval_start + per_thread_range * thread_group_size, a_end_arc_idx012); while (per_thread_range > 0) { // this_thread_start is the beginning of the range of arcs that this // thread is responsible for searching. int32_t this_thread_start = interval_start + (thread_idx * per_thread_range), this_thread_last = this_thread_start + per_thread_range - 1; // last_label is the label on the last arc in the range that this // thread is responsible for. We ensure that the range of arcs // we are searching (which, remember, includes a_end_arc_idx012) // always have at least one arc whose label (taken as +infty // for out-of-range arcs) is >= `label`. So `last_label` for // the last thread will always be >= `label`. uint64_t last_label = (this_thread_last >= a_end_arc_idx012 ? static_cast<uint64_t>(-1) : static_cast<uint64_t>(static_cast<uint32_t>( a_arcs_data[this_thread_last].label))), prev_last_label = g.shfl_up(last_label, 1); // Note: prev_last_label is the last_label for the previous thread, // and it's a don't-care value which will be ignored if this // thread_idx == 0. // Exactly one thread in the group will satisfy the following // conditions. Note: for the last thread in the thread group, // the condition "label < end_label" will always be true, because // label < UINT64_MAX. if ((thread_idx == 0 || prev_last_label < label) && last_label >= label) { *shared_data = this_thread_start; } g.sync(); interval_start = *shared_data; // broadcast to all threads.. per_thread_range = per_thread_range >> log_thread_group_size; } // OK, now all threads in the group should share the variable // `interval_start`. We construct a thread_block_tile of double // the size, so we can broadcast the lower and upper bounds of // the range of matching arcs in a (look above for "thread_group_type" // for more explanation). cg::thread_block_tile<thread_group_size*2> g_double = cg::tiled_partition<thread_group_size*2>(cg::this_thread_block()); int32_t lower_bound, upper_bound; if (thread_idx == 0) { // only the 1st thread from each of the 2 groups // participates. lower_bound = g_double.shfl(interval_start, 0); upper_bound = g_double.shfl(interval_start, thread_group_size); if (g_double.thread_rank() == 0) { // equiv. to: // (thread_group_type == 0) /* K2_DCHECK_LE(lower_bound, upper_bound); K2_DCHECK_LE(a_begin_arc_idx012, lower_bound); K2_DCHECK_LE(upper_bound, a_end_arc_idx012); K2_DCHECK(lower_bound == a_end_arc_idx012 || a_arcs_data[lower_bound].label >= label); K2_DCHECK(lower_bound == a_begin_arc_idx012 || a_arcs_data[lower_bound - 1].label < label); K2_DCHECK(upper_bound == a_end_arc_idx012 || uint32_t(a_arcs_data[upper_bound].label) > uint32_t(label)); */ if (upper_bound != a_begin_arc_idx012) { K2_DCHECK_LE(uint32_t(a_arcs_data[upper_bound - 1].label), uint32_t(label)); } first_matching_a_arc_idx012_data[arc_idx01] = lower_bound; } else { // g_double.thread_rank() == thread_group_size num_matching_a_arcs_data[arc_idx01] = upper_bound - lower_bound; } } }; EvalGroupDevice<thread_group_size, int32_t>( c_, num_b_arcs * 2, lambda_find_ranges); #else K2_LOG(FATAL) << "Unreachable code"; #endif } else { // Use regular binary search. K2_EVAL(c_, num_b_arcs, lambda_find_ranges_cpu, (int32_t arc_idx01) -> void { // the idx01 is into the list of arcs in b that we're processing.. // 0 <= state_idx0 < num_states. // state_idx is an index into states_. int32_t state_idx0 = b_arc_to_state_data[arc_idx01], arc_idx1x = num_arcs_b_data[state_idx0], arc_idx1 = arc_idx01 - arc_idx1x, state_idx = state_begin + state_idx0; StateInfo info = states_data[state_idx]; int32_t b_begin_arc_idx01x = b_fsas_row_splits2_data[info.b_fsas_state_idx01], b_arc_idx012 = b_begin_arc_idx01x + arc_idx1; // ignore the apparent name mismatch setting b_arc_idx012 above; // arc_idx1 is an idx1 w.r.t. a different array than b_fsas_. K2_DCHECK_LT(b_arc_idx012, b_fsas_row_splits2_data[info.b_fsas_state_idx01 + 1]); int32_t a_begin_arc_idx012 = a_fsas_row_splits2_data[info.a_fsas_state_idx01], a_end_arc_idx012 = a_fsas_row_splits2_data[info.a_fsas_state_idx01 + 1]; uint32_t label = static_cast<uint32_t>(b_arcs_data[b_arc_idx012].label); int32_t begin = a_begin_arc_idx012, end = a_end_arc_idx012; // We are looking for the first index begin <= i < end such that // a_arcs[i].label >= label. while (begin < end) { int32_t mid = (begin + end) / 2; assert(mid < end); // temp? uint32_t a_label = uint32_t(a_arcs_data[mid].label); if (a_label < label) { begin = mid + 1; } else { end = mid; } } if (begin < a_end_arc_idx012) { K2_CHECK_GE((uint32_t)a_arcs_data[begin].label, label); } if (begin - 1 > a_begin_arc_idx012) { K2_CHECK_LT((uint32_t)a_arcs_data[begin-1].label, label); } // "range_begin" is the "begin" of the possibly-empty range of arc-indexes // in a that matches `label` int32_t range_begin = begin, range_end = begin; // The following linear search will probably be faster than // logarithmic search in the normal case where there are not many // matching arcs. In the unusual case where there are many matching // arcs per state, it won't dominate the running time of the entire // algorithm. while (range_end < a_end_arc_idx012 && uint32_t(a_arcs_data[range_end].label) == label) range_end++; first_matching_a_arc_idx012_data[arc_idx01] = range_begin; num_matching_a_arcs_data[arc_idx01] = range_end - range_begin; }); } ExclusiveSum(num_matching_a_arcs, &num_matching_a_arcs); int32_t tot_matched_arcs = num_matching_a_arcs.Back(); { int32_t max_possible_states = states_.Dim() + tot_matched_arcs; PossiblyResizeHash(4 * max_possible_states, max_possible_states); } int32_t num_key_bits = state_pair_to_state_.NumKeyBits(), num_value_bits = state_pair_to_state_.NumValueBits(); if (num_key_bits == 32 && num_value_bits == 32) { ForwardSortedAOneIter<Hash::Accessor<32> >( t, num_arcs_b, b_arc_to_state, num_matching_a_arcs, first_matching_a_arc_idx012, tot_matched_arcs); } else if (num_key_bits + num_value_bits == 64) { ForwardSortedAOneIter<Hash::GenericAccessor>( t, num_arcs_b, b_arc_to_state, num_matching_a_arcs, first_matching_a_arc_idx012, tot_matched_arcs); } else { ForwardSortedAOneIter<Hash::PackedAccessor>( t, num_arcs_b, b_arc_to_state, num_matching_a_arcs, first_matching_a_arc_idx012, tot_matched_arcs); } } } /* This is some code that was broken out of ForwardSortedA() because it needed to be templated on the hash accessor type. Does the last part of a single iteration of the algorithm. @param [in] t The iteration index >= 0, representing the batch of states that we are processing arcs leaving from. @param [in] num_arcs_b_row_splits An array of shape equal to 1 + num_states, where num_states is the number of states we're processing on this iteration (see the variable in the code), with is the exclusive-sum of the number of arcs leaving the states in b (from "b" members of the state-pair we're processing) @param [in] b_arc_to_state The result of turning `num_arcs_b_row_splits` into a row-ids array. Each element corresponds to an arc in b_fsas_ that we are processing. @param [in] matching_a_arcs_row_splits An array of size b_arc_to_state.Dim() + 1, which is the exclusive sum of the number of matching arcs in a_fsas_ for a particular arc in b_fsas_ that we are processing. @param [in] first_matching_a_arc_idx012 An array of size b_arc_to-state.Dim(), giving the index of the first matching arc in a_fsas_ that matches the corresponding arc in b_fsas_. @param [in] tot_matched_arcs Must equal matching_a_arcs_row_splits.Back() */ template <typename HashAccessorT> void ForwardSortedAOneIter( int32_t t, const Array1<int32_t> &num_arcs_b_row_splits, const Array1<int32_t> &b_arc_to_state, const Array1<int32_t> &matching_a_arcs_row_splits, const Array1<int32_t> &first_matching_a_arc_idx012, int32_t tot_matched_arcs) { NVTX_RANGE(K2_FUNC); HashAccessorT state_pair_to_state_acc = state_pair_to_state_.GetAccessor<HashAccessorT>(); const Arc *a_arcs_data = a_fsas_.values.Data(), *b_arcs_data = b_fsas_.values.Data(); int32_t state_begin = iter_to_state_row_splits_cpu_[t], state_end = iter_to_state_row_splits_cpu_[t + 1], a_states_multiple = a_states_multiple_; Array1<int32_t> matched_arc_to_b_arc(c_, tot_matched_arcs); RowSplitsToRowIds(matching_a_arcs_row_splits, &matched_arc_to_b_arc); const int32_t *matched_arc_to_b_arc_data = matched_arc_to_b_arc.Data(), *b_arc_to_state_data = b_arc_to_state.Data(), *num_arcs_b_row_splits_data = num_arcs_b_row_splits.Data(); Renumbering new_state_renumbering(c_, tot_matched_arcs); // We'll write '1' where the arc-pair leads to a new state (exactly one // such arc will have a '1', for each newly produced state. char *new_state_renumbering_keep_data = new_state_renumbering.Keep().Data(); int32_t old_num_arcs = arcs_.Dim(), new_num_arcs = old_num_arcs + tot_matched_arcs; arcs_.Resize(new_num_arcs); arcs_row_ids_.Resize(new_num_arcs); ArcInfo *new_arcs_data = arcs_.Data() + old_num_arcs; int32_t *new_arcs_row_ids_data = arcs_row_ids_.Data() + old_num_arcs; // `hash_keys_value_locations` will be written to only for // arcs that are responsible for creating a new state; it points to // the key/value location in the hash corresponding to that new // state, to which we'll later write the state_id (idx into states_). Array1<uint64_t*> hash_key_value_locations(c_, tot_matched_arcs); uint64_t **hash_key_value_locations_data = hash_key_value_locations.Data(); // We'll write to a_state_idx01_temp only for arcs that are // responsible for creating new destination-state (i.e. we'll write at // the same locations as hash_key_value_locations, where // new_state_renumbering_keep_data == 1). It's the idx01 of the // dest-state, in a, of the arc. Array1<int32_t> a_dest_state_idx01_temp(c_, tot_matched_arcs); int32_t *a_dest_state_idx01_temp_data = a_dest_state_idx01_temp.Data(); const int32_t *matching_a_arcs_row_splits_data = matching_a_arcs_row_splits.Data(), *first_matching_a_arc_idx012_data = first_matching_a_arc_idx012.Data(), *a_fsas_row_splits2_data = a_fsas_.RowSplits(2).Data(), *b_fsas_row_splits2_data = b_fsas_.RowSplits(2).Data(); StateInfo *states_data = states_.Data(); K2_EVAL(c_, tot_matched_arcs, lambda_set_arcs_and_new_state, (int32_t idx012) -> void { // `idx012` is into an ragged tensor that we haven't physically // constructed, containing the new arcs we are adding on this frame; // its shape's 1st layer is formed by (num_arcs_b, b_arc_to_state), // and its 2nd layer is formed by (matching_a_arcs_row_splits, // matched_arc_to_b_arc). int32_t b_arc_idx01 = matched_arc_to_b_arc_data[idx012], matched_arc_idx01x = matching_a_arcs_row_splits_data[b_arc_idx01], matched_arc_idx2 = idx012 - matched_arc_idx01x, state_idx0 = b_arc_to_state_data[b_arc_idx01], b_arc_idx0x = num_arcs_b_row_splits_data[state_idx0], b_arc_idx1 = b_arc_idx01 - b_arc_idx0x; int32_t state_idx = state_begin + state_idx0; // into states_ StateInfo sinfo = states_data[state_idx]; int32_t b_fsas_state_idx01 = sinfo.b_fsas_state_idx01, b_begin_arc_idx01x = b_fsas_row_splits2_data[b_fsas_state_idx01], b_arc_idx012 = b_begin_arc_idx01x + b_arc_idx1; // ignore the apparent name mismatch setting b_arc_idx012; arc_idx1 // is an idx1 w.r.t. a different array than b_fsas_. int32_t first_matching_a_arc_idx012 = first_matching_a_arc_idx012_data[b_arc_idx01], a_arc_idx012 = first_matching_a_arc_idx012 + matched_arc_idx2; Arc b_arc = b_arcs_data[b_arc_idx012], a_arc = a_arcs_data[a_arc_idx012]; K2_CHECK_EQ(b_arc.label, a_arc.label); char new_dest_state = 0; // int32_t dest_state_idx = -1; if (a_arcs_data[a_arc_idx012].label != -1) { // investigate whether the dest-state is new (not currently // allocated a state-id). We don't allocate state-ids for the // final-state yet, so skip this if label is -1. int32_t b_dest_state_idx1 = b_arc.dest_state, b_dest_state_idx01 = b_dest_state_idx1 + sinfo.b_fsas_state_idx01 - b_arc.src_state, a_dest_state_idx1 = a_arc.dest_state; uint64_t hash_key = (((uint64_t)a_dest_state_idx1) * a_states_multiple) + b_dest_state_idx01, hash_value = 0, // actually it's a don't-care. *hash_key_value_location = nullptr; // If it was successfully inserted, then this arc is assigned // responsibility for creating the state-id for its destination // state. We'll assign the value below in // lambda_allocate_new_state_ids. if (state_pair_to_state_acc.Insert(hash_key, hash_value, nullptr, &hash_key_value_location)) { hash_key_value_locations_data[idx012] = hash_key_value_location; int32_t a_dest_state_idx01 = a_dest_state_idx1 + (sinfo.a_fsas_state_idx01 - a_arc.src_state); a_dest_state_idx01_temp_data[idx012] = a_dest_state_idx01; new_dest_state = (char)1; } } ArcInfo arc_info; arc_info.a_arc_idx012 = a_arc_idx012; arc_info.b_arc_idx012 = b_arc_idx012; new_arcs_data[idx012] = arc_info; new_arcs_row_ids_data[idx012] = state_idx; new_state_renumbering_keep_data[idx012] = new_dest_state; }); int32_t num_new_states = new_state_renumbering.New2Old().Dim(); const int32_t *new_state_renumbering_new2old_data = new_state_renumbering.New2Old().Data(); K2_DCHECK_EQ(states_.Dim(), state_end); int32_t next_state_end = state_end + num_new_states; iter_to_state_row_splits_cpu_.push_back(next_state_end); states_.Resize(next_state_end); // Note: this Resize() won't actually // reallocate each time. K2_CHECK_EQ(uint64_t(next_state_end) >> state_pair_to_state_.NumValueBits(), 0); int32_t num_kept_key_bits = 64 - state_pair_to_state_.NumValueBits(); states_data = states_.Data(); // In case it changed (unlikely) const int32_t *b_fsas_row_ids1_data = b_fsas_.RowIds(1).Data(), *a_fsas_row_splits1_data = a_fsas_.RowSplits(1).Data(); // The next lambda modifies state_pair_to_state_, replacing the temporary // values in the hash with the newly allocated state-ids. K2_EVAL(c_, num_new_states, lambda_allocate_new_state_ids, (int32_t i) -> void { int32_t new_state_idx = state_end + i; // `arc_idx` below is the index into the matched arcs on this frame, // with 0 <= new_arc_idx < tot_matched_arcs. int32_t new_arc_idx = new_state_renumbering_new2old_data[i]; uint64_t *hash_key_value_location = hash_key_value_locations_data[new_arc_idx]; // The next assertion depends on knowledge of the implementation of // the hash. If in future we change details of the hash // implementation and it fails, it can be removed. // We're checking that we inserted `hash_value = 0` above. K2_DCHECK_EQ(*hash_key_value_location >> num_kept_key_bits, 0); uint64_t key = state_pair_to_state_acc.SetValue(hash_key_value_location, new_state_idx); uint32_t b_state_idx01 = key % a_states_multiple, a_state_idx01 = a_dest_state_idx01_temp_data[new_arc_idx]; // a_state_idx01 is not stored in `key`, because we store it // as the idx1. StateInfo info { (int32_t)a_state_idx01, (int32_t)b_state_idx01 }; states_data[new_state_idx] = info; }); } ~DeviceIntersector() { // Prevent crash in destructor of hash (at exit, it still contains values, by design). state_pair_to_state_.Destroy(); } ContextPtr c_; FsaVec a_fsas_; // a_fsas_: decoding graphs // Note: a_fsas_ has 3 axes. bool sorted_match_a_; // If true, we'll require a_fsas_ to be arc-sorted; and // we'll use a matching approach that won't blow up in // memory or time when a_fsas_ has states with very // high out-degree. FsaVec b_fsas_; // map from fsa-index in b_fsas_ to the fsa-index in a_fsas_ that we want to // intersect it with. Array1<int32_t> b_to_a_map_; // iter_to_state_row_splits_cpu_, which is resized on each iteration of the // algorithm, is a row-splits array that maps from iteration index to // state_idx (index into states_). std::vector<int32_t> iter_to_state_row_splits_cpu_; // states_ is a resizable array of StateInfo that conceptually is the elements // of a ragged array indexed [iter][state], with row_splits1 == // iter_to_state_row_splits_cpu_. Array1<StateInfo> states_; // final_states_ is an array of StateInfo, of dimension <= b_fsas_.Dim0(), // that contains the final state-pairs of each composed FSA that has initial // state-pairs. These will be added to the end of states_ after composition // has finished. Array1<StateInfo> final_states_; // arcs_ is a resizable array of ArcInfo that conceptually is the elements // of a ragged array indexed [iter][state][arc], with row_splits1 == iter_to_state_row_splits_cpu_ // and row_ids2 == arcs_row_ids_. Array1<ArcInfo> arcs_; // arcs_row_ids_, which always maintained as having the same size as `arcs_`, // maps from the output arc to the corresponding ostate index that the arc // leaves from (index into states_). Actually this may be redundant. Array1<int32_t> arcs_row_ids_; // The hash maps from state-pair, as: // state_pair = (a_fsas_state_idx1 * a_states_multiple) + b_fsas_state_idx01 // to indexes into the states_ array (numbered 0,1,2,...), or to -1 // in cases where the state-pair is a pair of final-states. // // We name the values in the hash, which, as we mentioned, are indexes into // the states_ array, as`output_state_idx01`; the shape of the ragged array // which this is an index into, is given by // row_splits==iter_to_state_row_splits_cpu_. // // We ensure that a_states_multiple_ >= b_fsas_.TotSize(1) in order to ensure // uniqueness of the hashed values; and we also make sure a_states_multiple_ // is odd, which ensures the states in a_fsas_ also affect the low bits of the // hash value. int32_t a_states_multiple_; // This hash will also contain -1 as values in cases where the dest-state is a // final-state (these are allocated right at the beginning); and inside of // Forward() and ForwardSortedA() it will also contain temporary quantities // for newly created states, while we are working out the newly created // state-ids. Hash state_pair_to_state_; }; FsaVec IntersectDevice(FsaVec &a_fsas, int32_t properties_a, FsaVec &b_fsas, int32_t properties_b, const Array1<int32_t> &b_to_a_map, Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b, bool sorted_match_a) { NVTX_RANGE("IntersectDevice"); K2_CHECK_NE(properties_a & kFsaPropertiesValid, 0); K2_CHECK_NE(properties_b & kFsaPropertiesValid, 0); if (sorted_match_a && ((properties_a & kFsaPropertiesArcSorted) == 0)) { K2_LOG(FATAL) << "If you provide sorted_match_a=true, a_fsas " "must be arc-sorted, but (according to the properties) " "it is not."; } K2_CHECK_EQ(a_fsas.NumAxes(), 3); K2_CHECK_EQ(b_fsas.NumAxes(), 3); K2_CHECK_EQ(b_to_a_map.Dim(), b_fsas.Dim0()); K2_CHECK_LT(static_cast<uint32_t>(MaxValue(b_to_a_map)), static_cast<uint32_t>(a_fsas.Dim0())); DeviceIntersector intersector(a_fsas, b_fsas, b_to_a_map, sorted_match_a); intersector.Intersect(); return intersector.FormatOutput(arc_map_a, arc_map_b); } } // namespace k2
the_stack
#include <iostream> #include <stdio.h> #include <util/helper_math.h> namespace dart { // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <typename DepthType> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = depthIn[index]; if (depth >= range.x && depth <= range.y) { vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x), (v - pp.y)*(depth/fl.y), depth, 1.0f); } else { vertOut[index].w = 0; } } template <typename DepthType> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = scale*depthIn[index]; if (depth >= range.x && depth <= range.y) { vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x), (v - pp.y)*(depth/fl.y), depth, 1.0f); } else { vertOut[index].w = 0; } } template <typename DepthType> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float4 * Kinv, const float2 range) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = depthIn[index]; if (depth >= range.x && depth <= range.y) { const float4 p = make_float4( u, v, depth, 1); float4 vert = make_float4( dot(Kinv[0],p), dot(Kinv[1],p), dot(Kinv[2],p), dot(Kinv[3],p)); vert /= vert.w; vert.w = 1; vert.z = -vert.z; vertOut[index] = vert; } else { vertOut[index].w = 0; } } template <typename DepthType> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float4 * Kinv, const float2 range, const float scale) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = scale*depthIn[index]; if (depth >= range.x && depth <= range.y) { const float4 p = make_float4( u, v, depth, 1); float4 vert = make_float4( dot(Kinv[0],p), dot(Kinv[1],p), dot(Kinv[2],p), dot(Kinv[3],p)); vert /= vert.w; vert.w = 1; vert.z = -vert.z; vertOut[index] = vert; } else { vertOut[index].w = 0; } } template <typename DepthType, int iters> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * cameraParams, const float2 range) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = depthIn[index]; if (depth >= range.x && depth <= range.y) { // http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html const float &fx = cameraParams[0]; const float &fy = cameraParams[1]; const float &cx = cameraParams[2]; const float &cy = cameraParams[3]; const float &k1 = cameraParams[4]; const float &k2 = cameraParams[5]; const float &p1 = cameraParams[6]; const float &p2 = cameraParams[7]; const float &k3 = cameraParams[8]; float xp, yp, xpp, ypp; xpp = xp = (u - cx) / fx; ypp = yp = (v - cy) / fy; #pragma unroll for (int i=0; i<iters; ++i) { float r2 = xp*xp + yp*yp; float r4 = r2*r2; float r6 = r4*r2; float denom = 1 + k1*r2 + k2*r4 + k3*r6; float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp); float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp; xp = (xpp - dxp)/denom; yp = (ypp - dyp)/denom; } vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f); } else { vertOut[index].w = 0; } } template <typename DepthType, int iters> __global__ void gpu_depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * cameraParams, const float2 range, const float scale) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; const int index = u + v*width; if (u >= width || v >= height) return; float depth = scale*depthIn[index]; if (depth >= range.x && depth <= range.y) { // http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html const float& fx = cameraParams[0]; const float& fy = cameraParams[1]; const float& cx = cameraParams[2]; const float& cy = cameraParams[3]; const float& k1 = cameraParams[4]; const float& k2 = cameraParams[5]; const float& p1 = cameraParams[6]; const float& p2 = cameraParams[7]; const float& k3 = cameraParams[8]; float xp, yp, xpp, ypp; xpp = xp = (u - cx) / fx; ypp = yp = (v - cy) / fy; #pragma unroll for (int i=0; i<iters; ++i) { float r2 = xp*xp + yp*yp; float r4 = r2*r2; float r6 = r4*r2; float denom = 1 + k1*r2 + k2*r4 + k3*r6; float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp); float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp; xp = (xpp - dxp)/denom; yp = (ypp - dyp)/denom; } vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f); } else { vertOut[index].w = 0; } } __global__ void gpu_verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const int index = x + y*width; const float4 & v = vertIn[index]; // // don't process invalid vertices if ( v.w == 0) { normOut[index] = make_float4(0); return; } const float4 & vLeft = vertIn[ x == 0 ? index : index-1]; const float4 & vRight = vertIn[ x == width-1 ? index : index+1]; const float4 & vUp = vertIn[ y == 0 ? index : index-width]; const float4 & vDown = vertIn[ y == height-1 ? index : index+width]; const float3 vX = make_float3( (vRight.w == 0 ? v : vRight) - (vLeft.w == 0 ? v : vLeft) ); const float3 vY = make_float3( (vDown.w == 0 ? v : vDown) - (vUp.w == 0 ? v : vUp) ); const float3 n = cross(vY,vX); const float len2 = dot(n,n); if (len2 > 0) { const float invLen = 1.0f / (float)sqrtf(len2); normOut[index] = make_float4(n.x*invLen,n.y*invLen,n.z*invLen,1); } else { normOut[index] = make_float4(0); } } __global__ void gpu_eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const int index = x + y*width; // check vertex validity float4& v = verts[index]; if ( v.w == 0) { return; } // check normal threshold const float4& n = norms[index]; if (dot(make_float3(n),planeNormal) < epsNorm) { return; } // check distance threshold if (abs(dot(make_float3(v),planeNormal) - planeD) < epsDist ) { v.w = -1; } } __global__ void gpu_cropBox(float4 * verts, const int width, const int height, const float3 boxMin, const float3 boxMax) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const int index = x + y*width; // check vertex validity float4& v = verts[index]; if ( v.w == 0) { return; } if (v.x < boxMin.x || v.x > boxMax.x || v.y < boxMin.y || v.y > boxMax.y || v.z < boxMin.z || v.z > boxMax.z) { v.w = -1; } } __global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) return; const int index = x + y*width; int m = mask[index]; if (m == 0) { verts[index].w = -1; } } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- template <typename DepthType> void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range); } template <typename DepthType> void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range, scale); } template <typename DepthType> void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range); } template <typename DepthType> void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range, const float scale) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range, scale); } void verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_verticesToNormals<<<grid,block>>>(vertIn,normOut,width,height); } void eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_eliminatePlane<<<grid,block>>>(verts,norms,width,height,planeNormal,planeD,epsDist,epsNorm); } void cropBox(float4 * verts, const int width, const int height, const float3 & boxMin, const float3 & boxMax) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_cropBox<<<grid,block>>>(verts,width,height,boxMin,boxMax); } void maskPointCloud(float4 * verts, const int width, const int height, const int * mask) { dim3 block(16,8,1); dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y )); gpu_maskPointCloud<<<grid,block>>>(verts,width,height,mask); } #define COMPILE_DEPTH_TYPE(type) \ template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range); \ template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale); \ template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range); \ template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range, const float scale); COMPILE_DEPTH_TYPE(float) COMPILE_DEPTH_TYPE(ushort) }
the_stack
* \test Testing the BLAS level 2 routines in the ViennaCL BLAS-like shared library **/ // include necessary system headers #include <iostream> #include <vector> // Some helper functions for this tutorial: #include "viennacl.hpp" #include "viennacl/vector.hpp" template<typename ScalarType> ScalarType diff(ScalarType const & s1, ScalarType const & s2) { if (s1 > s2 || s1 < s2) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return ScalarType(0); } template<typename ScalarType, typename ViennaCLVectorType> ScalarType diff(std::vector<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec) { std::vector<ScalarType> v2_cpu(vcl_vec.size()); viennacl::backend::finish(); viennacl::copy(vcl_vec, v2_cpu); ScalarType inf_norm = 0; for (unsigned int i=0;i<v1.size(); ++i) { if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 ) v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ); else v2_cpu[i] = 0.0; if (v2_cpu[i] > inf_norm) inf_norm = v2_cpu[i]; } return inf_norm; } template<typename T, typename U, typename EpsilonT> void check(T const & t, U const & u, EpsilonT eps) { EpsilonT rel_error = std::fabs(static_cast<EpsilonT>(diff(t,u))); if (rel_error > eps) { std::cerr << "Relative error: " << rel_error << std::endl; std::cerr << "Aborting!" << std::endl; exit(EXIT_FAILURE); } std::cout << "SUCCESS "; } int main() { std::size_t size1 = 13; // at least 7 std::size_t size2 = 11; // at least 7 float eps_float = 1e-5f; double eps_double = 1e-12; ViennaCLBackend my_backend; ViennaCLBackendCreate(&my_backend); std::vector<float> ref_float_x(size1); for (std::size_t i=0; i<size1; ++i) ref_float_x[i] = static_cast<float>(i); std::vector<float> ref_float_y(size2); for (std::size_t i=0; i<size2; ++i) ref_float_y[i] = static_cast<float>(size2 - i); std::vector<float> ref_float_A(size1*size2); for (std::size_t i=0; i<size1*size2; ++i) ref_float_A[i] = static_cast<float>(3*i); std::vector<float> ref_float_B(size1*size2); for (std::size_t i=0; i<size1*size2; ++i) ref_float_B[i] = static_cast<float>(2*i); std::vector<double> ref_double_x(size1, 1.0); for (std::size_t i=0; i<size1; ++i) ref_double_x[i] = static_cast<double>(i); std::vector<double> ref_double_y(size2, 2.0); for (std::size_t i=0; i<size2; ++i) ref_double_y[i] = static_cast<double>(size2 - i); std::vector<double> ref_double_A(size1*size2, 3.0); for (std::size_t i=0; i<size1*size2; ++i) ref_double_A[i] = static_cast<double>(3*i); std::vector<double> ref_double_B(size1*size2, 4.0); for (std::size_t i=0; i<size1*size2; ++i) ref_double_B[i] = static_cast<double>(2*i); // Host setup viennacl::vector<float> host_float_x = viennacl::scalar_vector<float>(size1, 1.0f, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1; ++i) host_float_x[i] = float(i); viennacl::vector<float> host_float_y = viennacl::scalar_vector<float>(size2, 2.0f, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size2; ++i) host_float_y[i] = float(size2 - i); viennacl::vector<float> host_float_A = viennacl::scalar_vector<float>(size1*size2, 3.0f, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) host_float_A[i] = float(3*i); viennacl::vector<float> host_float_B = viennacl::scalar_vector<float>(size1*size2, 4.0f, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) host_float_B[i] = float(2*i); viennacl::vector<double> host_double_x = viennacl::scalar_vector<double>(size1, 1.0, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1; ++i) host_double_x[i] = double(i); viennacl::vector<double> host_double_y = viennacl::scalar_vector<double>(size2, 2.0, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size2; ++i) host_double_y[i] = double(size2 - i); viennacl::vector<double> host_double_A = viennacl::scalar_vector<double>(size1*size2, 3.0, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) host_double_A[i] = double(3*i); viennacl::vector<double> host_double_B = viennacl::scalar_vector<double>(size1*size2, 4.0, viennacl::context(viennacl::MAIN_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) host_double_B[i] = double(2*i); // CUDA setup #ifdef VIENNACL_WITH_CUDA viennacl::vector<float> cuda_float_x = viennacl::scalar_vector<float>(size1, 1.0f, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1; ++i) cuda_float_x[i] = float(i); viennacl::vector<float> cuda_float_y = viennacl::scalar_vector<float>(size2, 2.0f, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size2; ++i) cuda_float_y[i] = float(size2 - i); viennacl::vector<float> cuda_float_A = viennacl::scalar_vector<float>(size1*size2, 3.0f, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) cuda_float_A[i] = float(3*i); viennacl::vector<float> cuda_float_B = viennacl::scalar_vector<float>(size1*size2, 4.0f, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) cuda_float_B[i] = float(2*i); viennacl::vector<double> cuda_double_x = viennacl::scalar_vector<double>(size1, 1.0, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1; ++i) cuda_double_x[i] = double(i); viennacl::vector<double> cuda_double_y = viennacl::scalar_vector<double>(size2, 2.0, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size2; ++i) cuda_double_y[i] = double(size2 - i); viennacl::vector<double> cuda_double_A = viennacl::scalar_vector<double>(size1*size2, 3.0, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) cuda_double_A[i] = double(3*i); viennacl::vector<double> cuda_double_B = viennacl::scalar_vector<double>(size1*size2, 4.0, viennacl::context(viennacl::CUDA_MEMORY)); for (std::size_t i=0; i<size1*size2; ++i) cuda_double_B[i] = double(2*i); #endif // OpenCL setup #ifdef VIENNACL_WITH_OPENCL ViennaCLInt context_id = 0; viennacl::vector<float> opencl_float_x = viennacl::scalar_vector<float>(size1, 1.0f, viennacl::context(viennacl::ocl::get_context(context_id))); for (std::size_t i=0; i<size1; ++i) opencl_float_x[i] = float(i); viennacl::vector<float> opencl_float_y = viennacl::scalar_vector<float>(size2, 2.0f, viennacl::context(viennacl::ocl::get_context(context_id))); for (std::size_t i=0; i<size2; ++i) opencl_float_y[i] = float(size2 - i); viennacl::vector<float> opencl_float_A = viennacl::scalar_vector<float>(size1*size2, 3.0f, viennacl::context(viennacl::ocl::get_context(context_id))); for (std::size_t i=0; i<size1*size2; ++i) opencl_float_A[i] = float(3*i); viennacl::vector<float> opencl_float_B = viennacl::scalar_vector<float>(size1*size2, 4.0f, viennacl::context(viennacl::ocl::get_context(context_id))); for (std::size_t i=0; i<size1*size2; ++i) opencl_float_B[i] = float(2*i); viennacl::vector<double> *opencl_double_x = NULL; viennacl::vector<double> *opencl_double_y = NULL; viennacl::vector<double> *opencl_double_A = NULL; viennacl::vector<double> *opencl_double_B = NULL; if ( viennacl::ocl::current_device().double_support() ) { opencl_double_x = new viennacl::vector<double>(viennacl::scalar_vector<double>(size1, 1.0, viennacl::context(viennacl::ocl::get_context(context_id)))); for (std::size_t i=0; i<size1; ++i) (*opencl_double_x)[i] = double(i); opencl_double_y = new viennacl::vector<double>(viennacl::scalar_vector<double>(size2, 2.0, viennacl::context(viennacl::ocl::get_context(context_id)))); for (std::size_t i=0; i<size2; ++i) (*opencl_double_y)[i] = double(size2 - i); opencl_double_A = new viennacl::vector<double>(viennacl::scalar_vector<double>(size1*size2, 3.0, viennacl::context(viennacl::ocl::get_context(context_id)))); for (std::size_t i=0; i<size1*size2; ++i) (*opencl_double_A)[i] = double(3*i); opencl_double_B = new viennacl::vector<double>(viennacl::scalar_vector<double>(size1*size2, 4.0, viennacl::context(viennacl::ocl::get_context(context_id)))); for (std::size_t i=0; i<size1*size2; ++i) (*opencl_double_B)[i] = double(2*i); } ViennaCLBackendSetOpenCLContextID(my_backend, context_id); #endif // consistency checks: check(ref_float_x, host_float_x, eps_float); check(ref_float_y, host_float_y, eps_float); check(ref_float_A, host_float_A, eps_float); check(ref_float_B, host_float_B, eps_float); check(ref_double_x, host_double_x, eps_double); check(ref_double_y, host_double_y, eps_double); check(ref_double_A, host_double_A, eps_double); check(ref_double_B, host_double_B, eps_double); #ifdef VIENNACL_WITH_CUDA check(ref_float_x, cuda_float_x, eps_float); check(ref_float_y, cuda_float_y, eps_float); check(ref_float_A, cuda_float_A, eps_float); check(ref_float_B, cuda_float_B, eps_float); check(ref_double_x, cuda_double_x, eps_double); check(ref_double_y, cuda_double_y, eps_double); check(ref_double_A, cuda_double_A, eps_double); check(ref_double_B, cuda_double_B, eps_double); #endif #ifdef VIENNACL_WITH_OPENCL check(ref_float_x, opencl_float_x, eps_float); check(ref_float_y, opencl_float_y, eps_float); check(ref_float_A, opencl_float_A, eps_float); check(ref_float_B, opencl_float_B, eps_float); if ( viennacl::ocl::current_device().double_support() ) { check(ref_double_x, *opencl_double_x, eps_double); check(ref_double_y, *opencl_double_y, eps_double); check(ref_double_A, *opencl_double_A, eps_double); check(ref_double_B, *opencl_double_B, eps_double); } #endif // GEMV std::cout << std::endl << "-- Testing xGEMV..."; for (std::size_t i=0; i<size1/3; ++i) { ref_float_x[i * 2 + 1] *= 0.1234f; ref_double_x[i * 2 + 1] *= 0.1234; for (std::size_t j=0; j<size2/4; ++j) { ref_float_x[i * 2 + 1] += 3.1415f * ref_float_A[(2*i+2) * size2 + 3 * j + 1] * ref_float_y[j * 3 + 1]; ref_double_x[i * 2 + 1] += 3.1415 * ref_double_A[(2*i+2) * size2 + 3 * j + 1] * ref_double_y[j * 3 + 1]; } } std::cout << std::endl << "Host: "; ViennaCLHostSgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415f, viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_A), 2, 1, 2, 3, ViennaCLInt(size2), viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_y), 1, 3, 0.1234f, viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_float_x), 1, 2); check(ref_float_x, host_float_x, eps_float); ViennaCLHostDgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415, viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_A), 2, 1, 2, 3, ViennaCLInt(size2), viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_y), 1, 3, 0.1234, viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_double_x), 1, 2); check(ref_double_x, host_double_x, eps_double); #ifdef VIENNACL_WITH_CUDA std::cout << std::endl << "CUDA: "; ViennaCLCUDASgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415f, viennacl::cuda_arg(cuda_float_A), 2, 1, 2, 3, size2, viennacl::cuda_arg(cuda_float_y), 1, 3, 0.1234f, viennacl::cuda_arg(cuda_float_x), 1, 2); check(ref_float_x, cuda_float_x, eps_float); ViennaCLCUDADgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415, viennacl::cuda_arg(cuda_double_A), 2, 1, 2, 3, size2, viennacl::cuda_arg(cuda_double_y), 1, 3, 0.1234, viennacl::cuda_arg(cuda_double_x), 1, 2); check(ref_double_x, cuda_double_x, eps_double); #endif #ifdef VIENNACL_WITH_OPENCL std::cout << std::endl << "OpenCL: "; ViennaCLOpenCLSgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415f, viennacl::traits::opencl_handle(opencl_float_A), 2, 1, 2, 3, ViennaCLInt(size2), viennacl::traits::opencl_handle(opencl_float_y), 1, 3, 0.1234f, viennacl::traits::opencl_handle(opencl_float_x), 1, 2); check(ref_float_x, opencl_float_x, eps_float); if ( viennacl::ocl::current_device().double_support() ) { ViennaCLOpenCLDgemv(my_backend, ViennaCLRowMajor, ViennaCLNoTrans, ViennaCLInt(size1/3), ViennaCLInt(size2/4), 3.1415, viennacl::traits::opencl_handle(*opencl_double_A), 2, 1, 2, 3, ViennaCLInt(size2), viennacl::traits::opencl_handle(*opencl_double_y), 1, 3, 0.1234, viennacl::traits::opencl_handle(*opencl_double_x), 1, 2); check(ref_double_x, *opencl_double_x, eps_double); } #endif #ifdef VIENNACL_WITH_OPENCL delete opencl_double_x; delete opencl_double_y; delete opencl_double_A; delete opencl_double_B; #endif ViennaCLBackendDestroy(&my_backend); // // That's it. // std::cout << std::endl << "!!!! TEST COMPLETED SUCCESSFULLY !!!!" << std::endl; return EXIT_SUCCESS; }
the_stack
#define STR1(X) #X #define STR(X) STR1(X) #define STRINGIFY(X,Y) X ## Y #define CON(X,Y) STRINGIFY(X,Y) #define KDir kernels #include "includes/ourmacros.h" extern __shared__ type tile[]; __device__ __forceinline__ void fvinomgeneralolap_main_coars(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, const int acoars, const int bcoars, const int size, type alpha, type beta) { //if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1) //printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z); for(int i = 0; i < size; i++) { for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { tile[texpr1[Id]] = Atmp[aexpr[Id] + i*acoars]; } __syncthreads(); for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { Btmp[bexpr[Id] + i *bcoars] = alpha* tile[texpr2[Id]] + beta* Btmp[bexpr[Id] + i *bcoars]; } __syncthreads(); } } __device__ __forceinline__ void fvinomgeneralolap_rem_coars(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, const int acoars, const int bcoars, const int size, type alpha, type beta) { for(int i = 0; i < size; i++) { for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { tile[texpr1[Id]] = Atmp[aexpr[Id]+i * acoars]; } __syncthreads(); for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { int toffset2 = texpr2[Id]; if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr) Btmp[bexpr[Id]+i*bcoars] = alpha* tile[toffset2] + beta * Btmp[bexpr[Id]+i*bcoars]; } __syncthreads(); } } __device__ __forceinline__ void fvinomgeneralolap_main(const type * __restrict__ Atmp, type * Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int numelems_blk, type alpha, type beta) { //if(blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 1) // printf("ilimit = %d, olimit = %d, tbsize = %d, Atmp = %p, Btmp = %p, txpr2[10] = %d, numblocks = %d \n",ilimit, olimit, tb_size, Atmp, Btmp, texpr2[10], gridDim.x*gridDim.y*gridDim.z); for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { tile[texpr1[Id]] = Atmp[aexpr[Id]]; } __syncthreads(); for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { int toffset2 = texpr2[Id]; Btmp[bexpr[Id]] =alpha* tile[toffset2] + beta*Btmp[bexpr[Id]]; } } __device__ __forceinline__ void fvinomgeneralolap_rem(const type * __restrict__ Atmp, type * __restrict__ Btmp, const int tb_size, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int * __restrict__ texpr2, const int ilimit, const int olimit, const int rowinc, const int shm2, const int ilimitr, const int olimitr, const int numelems_blk, type alpha, type beta) { for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { tile[texpr1[Id]] = Atmp[aexpr[Id]]; } __syncthreads(); for(int Id=threadIdx.x; Id < numelems_blk; Id+= tb_size) { int toffset2 = texpr2[Id]; if(toffset2 % (shm2) < ilimitr && toffset2/shm2 < olimitr) Btmp[bexpr[Id]] = alpha* tile[toffset2] + beta*Btmp[bexpr[Id]]; } } #define FNAME fvigeneralolap.h #include "includes/macro.h" #undef FNAME #define FNAME fvigeneralolap_coars.h #include "includes/macro.h" #undef FNAME /* //general __global__ void fvinomgeneralolap_kernel (const int ndim, const type * A, type * B, const int ilimit, const int olimit,const int param2, const int param3, const int param4 , const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s , const int remainder1, const int remainder2, const int* __restrict__ offseti, const int* __restrict__ offseto, const int* __restrict__ tile1, const int* __restrict__ tile2, const int ilimitr, const int olimitr, const int inputrem, const int outputrem, const int rowinc, const int shm2, const int numelems_blk ) { int tmp; int val0 = blockIdx.x; int val1 = blockIdx.y; int aexpr =0, bexpr = 0; if(ndim > 1) aexpr = val0 * lda_s[0] + val1*lda_s[1], bexpr = val0 * ldb_s[0] + val1 * ldb_s[1]; else { aexpr = val0 * lda_s[0], bexpr = val0 * ldb_s[0]; } int idx; idx = blockIdx.z; int ii1 = -1 , iip2 = -1; if(param2 == 0) iip2 = blockIdx.x; else if(param2 == 1) iip2 = blockIdx.y; if(param3 == 0) ii1 = blockIdx.x; else if(param3 == 1) ii1 = blockIdx.y; #pragma unroll for(int i = 2; i < ndim; i++) { tmp = idx/idx_s[i]; int index = idx - tmp * idx_s[i]; aexpr += index * lda_s[i]; bexpr += index * ldb_s[i]; idx = tmp; if(i == param2) ii1 = index; else if(i == param3) iip2 = index; } const double *Atmp = A + aexpr; double *Btmp = B + bexpr; if(ii1 < inputrem && iip2 < outputrem) { fvinomgeneralolap_cuSharedMemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, numelems_blk ); } else if(ii1 >= inputrem && iip2 < outputrem) { //remainder in size1 fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimit, numelems_blk ); } else if(iip2 >= outputrem && ii1 < inputrem) { //remainder in size2 fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimit, olimitr, numelems_blk ); } else { fvinomgeneralolap_cuSharedMemRemTranspose_vec256(Atmp,Btmp, param4, offseti, offseto,tile1, tile2, ilimit, olimit, rowinc, shm2, ilimitr, olimitr, numelems_blk ); } return; //#undef ndim } */ void fvinomgeneralolap_CallerWrapper(int ndim, type * A, type * B,const int ilimit, const int olimit, const int blockAI, const int blockBI, const int numblocks, const int numthreads, const int shm , const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s, const int coarsa, const int coarsb, const int * idx_ss, const int shm2, const int* __restrict__ aexpr, const int* __restrict__ bexpr, const int* __restrict__ texpr1, const int* __restrict__ texpr2, const int ilimitr, const int olimitr, const int inputrem, const int outputrem, const int numelems_blk, const int size, type alpha, type beta ) { /* int second, third; if(ndim > 2) { second = idx_ss[1]; third = numblocks/(idx_ss[0]*idx_ss[1]); } else if(ndim > 1) { second = idx_ss[1]; third = 1;// numblocks/(idx_ss[0]*idx_ss[1]); } else { second = third = 1; } dim3 thread_blocks(idx_ss[0], second, third);*/ const int rowinc = (numthreads+ilimit-1)/ilimit; #ifdef printd printf("thread_blocks = %d, numthreads = %d, shm = %d\n", numblocks, numthreads, shm); printf("size = %d, ndim = %d, shm = %d\n", size, ndim, shm); #endif if(size > 0) { dim3 thread_blocks(numblocks/size, 1, 1); switch(ndim) { EXPANDDIMS(fvinomgeneralolap_coars_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa,coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, size, alpha, beta)) default: { // fvinomgeneralolap_coars_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, coarsa, coarsb, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk); } } } else { dim3 thread_blocks(numblocks, 1, 1); switch(ndim) { EXPANDDIMS(fvinomgeneralolap_kernel_, thread_blocks, numthreads, shm, (A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk, alpha, beta)) default: { // fvinomgeneralolap_kernel<<<thread_blocks, numthreads, shm>>>(ndim, A, B, ilimit, olimit, blockAI, blockBI, numthreads, lda_s,ldb_s, idx_s, aexpr, bexpr, texpr1, texpr2, ilimitr, olimitr, inputrem, outputrem, rowinc, shm2, numelems_blk); } } } } int ispresent(int a, int *array, int n) { for(int i = 0; i < n; i++) { if(array[i] == a) return 1; } return 0; } int getoff(int index,const int * dims,const int * stride, int n) { int ret = 0; for(int i = 0; i < n; i++) { int ii = index % dims[i]; ret += ii * stride[i]; index/= dims[i]; } return ret; } void makeconsecutive(int *tmp, int k) { int tmp2[20], permi[20], j,i; //printf("\n perm before: "); //for(i = 0; i < k; i++) printf("%d ", tmp[i]);//permi[i]; for(i = 0; i < k; i ++) tmp2[i] = tmp[i]; for(i = 0; i < k; i ++) { for(j = 0; j < k-i-1; j++) { if(tmp2[j] > tmp2[j+1]) { int tmp = tmp2[j]; tmp2[j] = tmp2[j+1]; tmp2[j+1] = tmp; } } } for(i = 0; i < k; i++) { for(j=0; j <k; j++) { if(tmp[i] == tmp2[j]) { permi[i] = j; break; } } } for(i = 0; i < k; i++) tmp[i] = permi[i]; } void swap(int array[], int ind1, int ind2); int cancoarsen(int *lda, int newndim) { if(newndim < 1) return -1; unsigned long vol = 1; for(int i = 0; i < newndim; i++) { vol *= lda[i]; } if(vol < 32*100) return -1; for(int i = 0; i < newndim; i++) { if(lda[i] >= 4 && lda[i] <= 31) return i; } /* for(int i = 0; i < newndim; i++) { if(lda[i] >= 2 && lda[i] <= 300) return i; }*/ return -1; } extern "C" void fvigeneralolap_transpose_kernel(int ndim, type *A, type *B, int *lda, const int *ldb, const int* params, const int * perm, const int* rperm, type alpha, type beta) { // int numBlocks = computeNumBlocksCode ; #ifdef printd printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]); printf("\nAll diff Params: %d \t %d \t %d\t %d\t %d\t %d\t %d\t %d\t %d \t%d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[10], params[11]); printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]); printf("\nR perm: %d \t %d \t %d\t %d\t %d\n", rperm[0], rperm[1], rperm[2], rperm[3], rperm[4]); #endif int alimit = params[3]; int blimit = params[4]; int blockA=params[0]; int blockB = params[11]; int ilimit = params[7]; //int olimit = params[8]; int i = 0, j = 0; int size = 1; for(i = 0; i < blimit; i++) { if(perm[i] > alimit) { size *= ldb[i]; } } if(perm[i] > alimit) { if(blockB == 1) size *= ldb[i]; else size*= blockB; } #ifdef printd printf("In .cu, alimit = %d, blimit = %d, bsize = %d, blockB = %d, blockA = %d\n", alimit, blimit, size, blockB, blockA); #endif //for(int y = 0; y < j; y++) //printf("bo[%d] = %d ",y, bo[y]); int olimit = size; #ifdef SLICE printf("\t%d\t%d\t", ilimit, olimit); #endif //exit(0); int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ; const int pad = ((ilimit %2)+1)%2; int *d_lda_s, *d_ldb_s, *d_idx_s; const int remainder1 = lda[params[3]] % blockA; int remainder2; remainder2 = lda[perm[params[4]]] % blockB; const int ilimitr = (ilimit * remainder1) / blockA; int olimitr = (olimit * remainder2) / blockB; int irem, orem; if(remainder1 == 0) irem = lda[alimit]; else irem = (lda[alimit] - remainder1)/blockA; if(remainder2 == 0) orem = lda[perm[blimit]]; else orem = (ldb[blimit] - remainder2)/blockB; if(perm[params[4]] == params[3]) { olimitr = olimit; remainder2 = 0; orem = lda[perm[blimit]]; } else { //remainder2 = lda[perm[params[4]]] % blockB; } #ifdef printd printf("\nrem1 = %d, rem2 = %d\n", remainder1, remainder2); printf("\nilimit = %d, olimit = %d, ilimitr = %d, olimitr = %d\n", ilimit, olimit, ilimitr, olimitr); #endif int *input_base, *output_base, *tile_base1, *tile_base2; int *aexpr, *bexpr, *texpr1, *texpr2; // int *ablock, *bblock, *d_ablock, *d_bblock;; int lda_s[20], ldb_s[20], idx_s[20], temp[20]; lda_s[0] = 1; ldb_s[0] = 1; idx_s[0] = 1; for(i = 1; i < alimit; i++) { idx_s[i] = 1; lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; } if(rperm[alimit] < blimit || rperm[alimit] == blimit && blockB == 1) idx_s[alimit] = 1; else{ idx_s[alimit] = (lda[alimit] + blockA - 1) / blockA; } lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; for(i = alimit+1; i < ndim; i++) { lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; if(rperm[i] < blimit) { idx_s[i] = 1;// (lda[i] + blockA - 1) / blockA; } else if(rperm[i] == blimit) { idx_s[i] = (lda[i] + blockB - 1) / blockB; } else { idx_s[i] = lda[i]; } } for(i = 0; i < ndim; i++) { temp[i] = ldb_s[rperm[i]]; #ifdef printd printf("Idx[%d] = %d\n", i, idx_s[i]); #endif } aexpr = (int*)malloc(ilimit* olimit * sizeof(int)); bexpr = (int*)malloc(ilimit * olimit * sizeof(int)); texpr1 = (int*)malloc(ilimit* olimit * sizeof(int)); texpr2 = (int*)malloc(ilimit * olimit* sizeof(int)); SAFECUDAMALLOC(&input_base,ilimit*olimit*sizeof(int)); SAFECUDAMALLOC(&output_base,ilimit*olimit*sizeof(int)); SAFECUDAMALLOC(&tile_base1, ilimit*olimit *sizeof(int)); SAFECUDAMALLOC(&tile_base2, ilimit*olimit*sizeof(int)); int outD[20], outD_s[20], B_s[20]; outD_s[0] = 1; outD[0] = ldb[0]; int inD[20], inD_s[20]; inD_s[0] = 1; inD[0] = lda[0]; //B_s[0] = ldb_s[rperm[0]]; B_s[0] = 1;//ldb_s[rperm[0]]; int permD_s[20], permD[20]; // permD_s[alimit+1] = outD_s[0]; //permD[alimit+1] = outD[0]; int OO_C = 0, C_C = 0; int onlyOut[20];int onlyOutI[20]; for(i = 0; i <= alimit; i++) { if(rperm[i] <= blimit) C_C++; } const int OI_C = alimit + 1; if(perm[0] > alimit) { OO_C++; onlyOut[0] = ldb[0]; onlyOutI[0] = 0; permD[0] = OI_C; } else { // C_C++; permD[0] = perm[0]; } for(i = 1; i < blimit; i++) { outD[i] = ldb[i]; outD_s[i] = outD_s[i-1] * outD[i-1]; // B_s[i] = ldb_s[rperm[i]]; B_s[i] = ldb_s[i]; if(perm[i] > alimit) { onlyOut[OO_C] = ldb[i]; onlyOutI[OO_C++] = i; permD[i] = OI_C + i; } else { // C_C++; permD[i] = perm[i]; } } if(blimit == 0) {i = 0; OO_C = 0;} if(blockB == 1) { outD[i] = ldb[i]; } else { outD[i] = blockB; } if(i > 0) { outD_s[i] = outD_s[i-1] * outD[i-1]; } else { outD_s[i] = 1; } B_s[i] = ldb_s[i]; if(perm[i] <= alimit) { // C_C++; permD[i] = perm[i]; } else { //if(blockB == 1) onlyOut[OO_C] = ldb[i]; //else //onlyOut[OO_C] = blockB; onlyOutI[OO_C++] = i; permD[i] = OI_C + i; } i++; for(j = 0; j < alimit; j++) { if(rperm[j] > blimit) { outD[i] = lda[j]; permD[i] = j; outD_s[i] = outD_s[i-1] * outD[i-1]; B_s[i] = ldb_s[rperm[j]]; i++; } else { //tmp [k++] = j; //C_C++; } } if(rperm[j] > blimit) { //printf("BI = %d, rperm[%d] = %d, blimit = %d\n", i, j, rperm[j], blimit); if(blockA == 1) { outD[i] = lda[j]; } else { outD[i] = blockA; } permD[i] = j; B_s[i] = ldb_s[rperm[j]]; outD_s[i] = outD_s[i-1] * outD[i-1]; i++; } else { //tmp [k++] = rperm[j]; //C_C++; } int BI = i; for(i = 1; i < alimit; i++) { inD[i] = lda[i]; inD_s[i] = inD_s[i-1] * inD[i-1] ; } if(alimit == 0) i = 0; if(blockA == 1) { inD[i] = lda[i]; } else { inD[i] = blockA; } if(i > 0) inD_s[i] = inD_s[i-1] * inD[i-1]; i++; for(j = 0; j < blimit; j++) { if(perm[j] > alimit) { inD[i] = ldb[j]; inD_s[i] = inD_s[i-1] * inD[i-1]; i++; } else{ // C_C++; } } if(perm[j] > alimit) { if(blockB == 1) { inD[i] = ldb[j]; } else { inD[i] = blockB; } inD_s[i] = inD_s[i-1] * inD[i-1]; i++; } else{ // C_C++; } int AI = i; makeconsecutive(permD, AI); //permD[0] = 1, permD[1] = 2, permD[2] = 0; //inD[0] = 32, inD[1] = 2, inD[2] = 30; //inD_s[0] = 1, inD_s[1] = 32, inD_s[2] = 64; for(i = 0; i < AI; i++) { permD_s[i] = inD_s[permD[i]]; } if(BI != AI) { printf("No. of dimensions in I and O non-matching...\n"); //return; } #ifdef printd printf("\nOO_C = %d, C_C = %d\n ", OO_C, C_C); printf("\nOUT_D: "); for(int i = 0; i < BI; i++) { printf("%d ",outD[i]); } printf("\nOUT_D_S: "); for(int i = 0; i < BI; i++) { printf("%d ",outD_s[i]); } printf("\nIn_D: "); for(int i = 0; i < AI; i++) { printf("%d ",inD[i]); } printf("\nIN_D_S: "); for(int i = 0; i < AI; i++) { printf("%d ", inD_s[i]); } printf("\n"); printf("\nB_S: "); for(int i = 0; i < BI; i++) { printf("%d ", B_s[i]); } printf("\nPerm_D: "); for(int i = 0; i < BI; i++) { printf("%d ",permD[i]); } printf("\n"); printf("\nPerm_S: "); for(int i = 0; i < BI; i++) { printf("%d ", permD_s[i]); } printf("\n"); printf("\n"); #endif for(int rowId=0; rowId < olimit; rowId++) { int tmp = rowId; int aoff=0,j; for(j = 0; j < OO_C; j++) { int dval = onlyOut[j]; int val = tmp%dval; tmp /= dval; aoff += val * lda_s[perm[onlyOutI[j]]]; } for(int colId=0; colId < ilimit; colId++) { aexpr[rowId*ilimit + colId] = aoff + colId; texpr1[rowId * (ilimit) + colId] = rowId * (ilimit+pad) + colId; int off = getoff(rowId * ilimit + colId, outD, permD_s, BI); texpr2[rowId * (ilimit) + colId] = off + pad * (off/ilimit); off = getoff(rowId * ilimit + colId, outD, B_s, AI); bexpr[rowId* ilimit + colId] = off; } } #ifdef printd printf("\n...A...\n"); for(int rowId=0; rowId < olimit; rowId++) { printf("%d ", aexpr[rowId]); //printf("\n"); //for(int colId=0; colId < ilimit; colId++) { // printf("%d ", bexpr[rowId * ilimit + colId]); // printf("%d ", texpr2[rowId * ilimit + colId]); } //printf("\n"); } printf("\n...B...\n"); for(int rowId=0; rowId < olimit; rowId++) { // printf("%d ", bexpr[rowId]); //printf("\n"); for(int colId=0; colId < ilimit; colId++) { printf("%d ", bexpr[rowId * ilimit + colId]); // printf("%d ", texpr2[rowId * ilimit + colId]); } printf("\n"); } printf("\n...T...\n"); for(int rowId=0; rowId < olimit; rowId++) { // printf("%d ", bexpr[rowId]); // printf("\n"); for(int colId=0; colId < ilimit; colId++) { // printf("%d ", bexpr[rowId * ilimit + colId]); printf("%d ", texpr2[rowId * ilimit + colId]); } printf("\n"); } #endif lda_s[params[3]] *= params[0];///lda_s[i-1] * lda[i-1]; temp[params[3]] *= params[0];// ldb_s[i-1] * ldb[i-1]; if(params[3] != perm[params[4]])//no double blocking { lda_s[perm[params[4]]] *= params[11];///lda_s[i-1] * lda[i-1]; temp[perm[params[4]]] *= params[11];// ldb_s[i-1] * ldb[i-1]; } int c = 0, d = 0; c = alimit + 1;//c = No. of dimensions to be removed from input for thread blocking, b = same for output but only for those which are not in input if(blockA > 1) c--; int ablockI, bblockI; //int dims[20]; ablockI = alimit-c; bblockI = perm[blimit]-c; int tempbblockI = bblockI; #ifdef printd printf("\nablockI = %d, bblockI = %d\n", ablockI, bblockI); #endif for(int i = c; i < ndim; i++) { if(((rperm[i] < blimit) || ((rperm[i] == blimit) && (blockB ==1)))) { idx_s[i] = 1;// idx_s[j]; /*for(int j = i+1; j < ndim-d; j ++) { idx_s[j-1] = idx_s[j]; lda_s[j-1] = lda_s[j]; temp[j-1] = temp[j]; }*/ d++; if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1)) tempbblockI--; } } bblockI = tempbblockI; int cnt = 0; for(int i = c; i < ndim; i++) { if(idx_s[i] == 1) { for(int j = i+1; j < ndim; j ++) { idx_s[j-1] = idx_s[j]; lda_s[j-1] = lda_s[j]; temp[j-1] = temp[j]; } cnt++; i--; } if(cnt > ndim) break; } /* for(int i = c; i < ndim; i++) { // dims[i] = lda[i]; if((rperm[i] < blimit || (rperm[i] == blimit && blockB ==1))) { for(j = i+1; j < ndim-d; j ++) { idx_s[j-1] = idx_s[j]; lda_s[j-1] = lda_s[j]; temp[j-1] = temp[j]; // dims[j-1] = lda[j]; } d++; if((i < bblockI + c) || (i == bblockI + c) && (blockB == 1)) bblockI--; if(i < alimit) ablockI--; } }*/ int newndim = ndim - (c + d); #ifdef printd printf("\nChanged ablockI = %d, bblockI = %d\n", ablockI, bblockI); #endif //Find the largest dimension and make it the first as only Dimx can have > 65k size /*int max = 0; for(int i = 1; i < newndim; i++) { if(idx_s[c+i] > idx_s[max+c]) max = i; } //printf("\nmax: %d ", max); if(max > c) { swap(idx_s, c, max+c); swap(lda_s, c, max+c); swap(temp, c, max+c); if(max == ablockI) ablockI = 0; else if(ablockI == 0) ablockI = max; if(max == bblockI) bblockI = 0; else if(bblockI ==0) bblockI = max; }*/ if(ablockI > 0)//move it to start, junk part { swap(idx_s, ablockI+c, c); swap(lda_s, ablockI+c, c); swap(temp, ablockI+c, c); } int bi = 0; if(bblockI == 0 && ablockI > 0) bi = ablockI+c; else bi = bblockI+c; if(bblockI >= 0 && bblockI != ablockI)//move it to start { swap(idx_s, bi, c+ (ablockI >=0)); swap(lda_s, bi, c+ (ablockI >=0)); swap(temp, bi, c+ (ablockI >=0)); } if(bblockI >= 0) { if(ablockI == bblockI || ablockI < 0) bblockI = 0; else bblockI = 1; } if(ablockI >=0) ablockI = 0; int nblkdims = 0; if(ablockI >= 0) nblkdims++; if((bblockI >= 0) && (bblockI != ablockI)) nblkdims++; #ifdef printd printf("\nIDx: "); for(int i = 0; i < newndim; i++) { printf("%d ",idx_s[i+c]); } printf("ndim = %d, c = %d, d = %d, newndim = %d, nblkdims = %d\n", ndim, c, d, newndim, nblkdims); #endif int acoars = 0, bcoars = 0; size = -1; #ifdef printd printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim); #endif #ifndef NOCOARSEN int cd = cancoarsen(idx_s+c+nblkdims, newndim-nblkdims); if(cd >= 0) { int offset = c + cd + nblkdims; acoars = lda_s[offset]; bcoars = temp[offset]; size = idx_s[offset]; for(int j = cd+1+nblkdims; j < newndim; j++) { idx_s[c+j-1] = idx_s[c+j]; lda_s[c+j-1] = lda_s[c+j]; temp[c+j-1] = temp[c+j]; } // ablockI--; // bblockI--; newndim--; } #ifdef printd printf("\nirem = %d, orem = %d, alimit = %d, blimit = %d, ablockI = %d, bblockI = %d, newdim = %d, c = %d, d = %d, olddim = %d, acoars = %d, bcoars = %d, cd = %d\n\n", irem, orem, alimit, blimit, ablockI, bblockI, newndim, c, d, ndim, acoars, bcoars, cd); #endif #endif SAFECUDAMALLOC(&d_lda_s,newndim*sizeof(int)); SAFECUDAMALLOC(&d_ldb_s,newndim*sizeof(int)); SAFECUDAMALLOC(&d_idx_s,newndim*sizeof(int)); SAFECUDAMEMCPY(d_idx_s, idx_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_lda_s, lda_s+c,newndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_ldb_s, temp+c,newndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(input_base, aexpr, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(output_base, bexpr, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(tile_base1, texpr1, ilimit*olimit*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(tile_base2, texpr2,ilimit* olimit*sizeof(int), cudaMemcpyHostToDevice); #ifdef MODEL { const int olimit = params[8]; int olimitr = (olimit * remainder2) / blockB; printf("\t%d\t%d\t", ilimit, olimit); printf("\t%d\t%d\t%d\t%d\t", ilimit/32, ilimit%32, olimit/32,olimit%32 ); double f1, f2, f3, f4, f; printf("\tf1=%lf\t", f1 = ((ilimit/32) * (olimit/32) + (double)(ilimit/32) * (olimit%32) /32+ (double)(ilimit%32) * (olimit/32) /32 + (double)(ilimit%32) * (olimit%32) /(32*32) )/ (int)(((ilimit+31)/32) * ((olimit+31)/32))); printf("\tf2=%lf\t", f2 = ((ilimitr/32) * (olimit/32) + (double)(ilimitr/32) * (olimit%32) /32+ (double)(ilimitr%32) * (olimit/32) /32 + (double)(ilimitr%32) * (olimit%32) /(32*32) )/ max(1,(int)(((ilimitr+31)/32) * ((olimit+31)/32)))); printf("\tf3=%lf\t", f3 = ((ilimit/32) * (olimitr/32) + (double)(ilimit/32) * (olimitr%32) /32+ (double)(ilimit%32) * (olimitr/32) /32 + (double)(ilimit%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((ilimit+31)/32) * ((olimitr+31)/32)))); printf("\tf4=%lf\t", f4 = ((ilimitr/32) * (olimitr/32) + (double)(ilimitr/32) * (olimitr%32) /32+ (double)(ilimitr%32) * (olimitr/32) /32 + (double)(ilimitr%32) * (olimitr%32) /(32*32) )/ max(1,(int)(((olimitr+31)/32) * ((olimitr+31)/32)))); printf("\t%d\t%d\t", lda[alimit], ldb[blimit]); int asize = lda[alimit]; int bsize = ldb[blimit]; printf("MKL \t%d\t%d\t%d\t%d\t", asize/blockA, asize%blockA, bsize/blockB,bsize%blockB ); //int amax = min(blockA, 32); //int bmax = min(blockB, 32); int amax = blockA; int bmax = blockB; printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)((asize/amax) * (bsize%bmax > 0) *f3)+ (double)((asize%amax > 0) * (bsize/bmax)*f2) + (double)((asize%amax>0) * (bsize%bmax > 0) *f4) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax))); //printf("\tf=%lf\t", f = ((asize/amax) * (bsize/bmax) *f1 + (double)(asize/amax) * (bsize%bmax) *f3/bmax+ (double)(asize%amax) * (bsize/bmax)*f2 /amax + (double)(asize%amax) * (bsize%bmax) *f4/(amax*bmax) )/ (int)(((asize+amax-1)/amax) * ((bsize+bmax-1)/bmax))); printf("\t%lf\t", f); } #endif #ifdef NOHTIME #include "includes/nohtimestart.h" #endif fvinomgeneralolap_CallerWrapper(newndim, A, B,ilimit,olimit, ablockI,bblockI ,numBlocks, params[2], (ilimit+pad) * olimit *sizeof(type) , d_lda_s,d_ldb_s,d_idx_s ,acoars,bcoars,idx_s+c, (ilimit+pad), input_base, output_base, tile_base1, tile_base2, ilimitr, olimitr, irem, orem, ilimit*olimit, size, alpha, beta); #ifdef NOHTIME #include "includes/nohtimestop.h" #endif {cudaError_t err = cudaGetLastError(); if(err != cudaSuccess){ printf("\nKernel ERROR in fvi_nomatch_generalolap: %s (line: %d)\n", cudaGetErrorString(err), __LINE__); //exit(-1); }} free(aexpr); free(bexpr); free(texpr1); free(texpr2); cudaFree(d_lda_s); cudaFree(d_ldb_s); cudaFree(d_idx_s); cudaFree(input_base); cudaFree(output_base); cudaFree(tile_base1); cudaFree(tile_base2); }
the_stack
#pragma once #include "MarchingCubesConstCuda.h" #include "ScalableTSDFVolumeCuda.h" #include <Cuda/Common/Palatte.h> #include <Cuda/Container/HashTableCudaDevice.cuh> #include <Cuda/Container/HashTableCudaKernel.cuh> #include <Cuda/Open3DCuda.h> #include <Cuda/Container/MemoryHeapCudaDevice.cuh> #include <Cuda/Container/MemoryHeapCudaKernel.cuh> namespace open3d { namespace cuda { /** Coordinate system conversions **/ __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::world_to_voxelf( const Vector3f &Xw) { return volume_to_voxelf(transform_world_to_volume_ * Xw); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::voxelf_to_world( const Vector3f &X) { return transform_volume_to_world_ * voxelf_to_volume(X); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::voxelf_to_volume( const Vector3f &X) { return Vector3f((X(0) + 0.5f) * voxel_length_, (X(1) + 0.5f) * voxel_length_, (X(2) + 0.5f) * voxel_length_); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::volume_to_voxelf( const Vector3f &Xv) { return Vector3f(Xv(0) * inv_voxel_length_ - 0.5f, Xv(1) * inv_voxel_length_ - 0.5f, Xv(2) * inv_voxel_length_ - 0.5f); } /** Voxel coordinate in global volume -> in subvolume **/ __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::voxel_locate_subvolume( const Vector3i &X) { return Vector3i((X(0) < 0 ? X(0) - (N_ - 1) : X(0)) / N_, (X(1) < 0 ? X(1) - (N_ - 1) : X(1)) / N_, (X(2) < 0 ? X(2) - (N_ - 1) : X(2)) / N_); } __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::voxelf_locate_subvolume(const Vector3f &X) { return Vector3i(int(floor(X(0) / N_)), int(floor(X(1) / N_)), int(floor(X(2) / N_))); } __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::voxel_global_to_local( const Vector3i &X, const Vector3i &Xsv) { return Vector3i(X(0) - Xsv(0) * N_, X(1) - Xsv(1) * N_, X(2) - Xsv(2) * N_); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::voxelf_global_to_local( const Vector3f &X, const Vector3i &Xsv) { return Vector3f(X(0) - Xsv(0) * N_, X(1) - Xsv(1) * N_, X(2) - Xsv(2) * N_); } __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::voxel_local_to_global( const Vector3i &Xlocal, const Vector3i &Xsv) { return Vector3i(Xlocal(0) + Xsv(0) * N_, Xlocal(1) + Xsv(1) * N_, Xlocal(2) + Xsv(2) * N_); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::voxelf_local_to_global( const Vector3f &Xlocal, const Vector3i &Xsv) { return Vector3f(Xlocal(0) + Xsv(0) * N_, Xlocal(1) + Xsv(1) * N_, Xlocal(2) + Xsv(2) * N_); } /** Query **/ __device__ UniformTSDFVolumeCudaDevice * ScalableTSDFVolumeCudaDevice::QuerySubvolume(const Vector3i &Xsv) { return hash_table_[Xsv]; } /** Unoptimized access and interpolation **/ __device__ float &ScalableTSDFVolumeCudaDevice::tsdf(const Vector3i &X) { Vector3i Xsv = voxel_locate_subvolume(X); UniformTSDFVolumeCudaDevice *subvolume = QuerySubvolume(Xsv); return subvolume == nullptr ? tsdf_dummy_ : subvolume->tsdf(voxel_global_to_local(X, Xsv)); } __device__ float &ScalableTSDFVolumeCudaDevice::logit(const Vector3i &X) { Vector3i Xsv = voxel_locate_subvolume(X); UniformTSDFVolumeCudaDevice *subvolume = QuerySubvolume(Xsv); return subvolume == nullptr ? logit_dummy_ : subvolume->logit(voxel_global_to_local(X, Xsv)); } __device__ uchar &ScalableTSDFVolumeCudaDevice::weight(const Vector3i &X) { Vector3i Xsv = voxel_locate_subvolume(X); UniformTSDFVolumeCudaDevice *subvolume = QuerySubvolume(Xsv); return subvolume == nullptr ? weight_dummy_ : subvolume->weight(voxel_global_to_local(X, Xsv)); } __device__ Vector3b &ScalableTSDFVolumeCudaDevice::color(const Vector3i &X) { Vector3i Xsv = voxel_locate_subvolume(X); UniformTSDFVolumeCudaDevice *subvolume = QuerySubvolume(Xsv); return subvolume == nullptr ? color_dummy_ : subvolume->color(voxel_global_to_local(X, Xsv)); } __device__ float ScalableTSDFVolumeCudaDevice::TSDFAt(const Vector3f &X) { Vector3i Xi = X.template cast<int>(); Vector3f r = Vector3f(X(0) - Xi(0), X(1) - Xi(1), X(2) - Xi(2)); return (1 - r(0)) * ((1 - r(1)) * ((1 - r(2)) * tsdf(Xi + Vector3i(0, 0, 0)) + r(2) * tsdf(Xi + Vector3i(0, 0, 1))) + r(1) * ((1 - r(2)) * tsdf(Xi + Vector3i(0, 1, 0)) + r(2) * tsdf(Xi + Vector3i(0, 1, 1)))) + r(0) * ((1 - r(1)) * ((1 - r(2)) * tsdf(Xi + Vector3i(1, 0, 0)) + r(2) * tsdf(Xi + Vector3i(1, 0, 1))) + r(1) * ((1 - r(2)) * tsdf(Xi + Vector3i(1, 1, 0)) + r(2) * tsdf(Xi + Vector3i(1, 1, 1)))); } __device__ float ScalableTSDFVolumeCudaDevice::LogitAt(const Vector3f &X) { Vector3i Xround = Vector3i(round(X(0)), round(X(1)), round(X(2))); return logit(Xround); } __device__ uchar ScalableTSDFVolumeCudaDevice::WeightAt(const Vector3f &X) { Vector3i Xi = X.template cast<int>(); Vector3f r = Vector3f(X(0) - Xi(0), X(1) - Xi(1), X(2) - Xi(2)); return uchar( (1 - r(0)) * ((1 - r(1)) * ((1 - r(2)) * weight(Xi + Vector3i(0, 0, 0)) + r(2) * weight(Xi + Vector3i(0, 0, 1))) + r(1) * ((1 - r(2)) * weight(Xi + Vector3i(0, 1, 0)) + r(2) * weight(Xi + Vector3i(0, 1, 1)))) + r(0) * ((1 - r(1)) * ((1 - r(2)) * weight(Xi + Vector3i(1, 0, 0)) + r(2) * weight(Xi + Vector3i(1, 0, 1))) + r(1) * ((1 - r(2)) * weight(Xi + Vector3i(1, 1, 0)) + r(2) * weight(Xi + Vector3i(1, 1, 1))))); } __device__ Vector3b ScalableTSDFVolumeCudaDevice::ColorAt(const Vector3f &X) { Vector3i Xi = X.template cast<int>(); Vector3f r = Vector3f(X(0) - Xi(0), X(1) - Xi(1), X(2) - Xi(2)); Vector3f colorf = (1 - r(0)) * ((1 - r(1)) * ((1 - r(2)) * color(Xi + Vector3i(0, 0, 0)) .template cast<float>() + r(2) * color(Xi + Vector3i(0, 0, 1)) .template cast<float>()) + r(1) * ((1 - r(2)) * color(Xi + Vector3i(0, 1, 0)) .template cast<float>() + r(2) * color(Xi + Vector3i(0, 1, 1)) .template cast<float>())) + r(0) * ((1 - r(1)) * ((1 - r(2)) * color(Xi + Vector3i(1, 0, 0)) .template cast<float>() + r(2) * color(Xi + Vector3i(1, 0, 1)) .template cast<float>()) + r(1) * ((1 - r(2)) * color(Xi + Vector3i(1, 1, 0)) .template cast<float>() + r(2) * color(Xi + Vector3i(1, 1, 1)) .template cast<float>())); return colorf.template saturate_cast<uchar>(); } __device__ Vector3f ScalableTSDFVolumeCudaDevice::GradientAt(const Vector3f &X) { Vector3f n = Vector3f::Zeros(); Vector3f X0 = X, X1 = X; const float half_gap = voxel_length_; #pragma unroll 1 for (size_t k = 0; k < 3; ++k) { X0(k) -= half_gap; X1(k) += half_gap; n(k) = (TSDFAt(X1) - TSDFAt(X0)) / voxel_length_; X0(k) = X(k); X1(k) = X(k); } return n; } /** Optimized access and interpolation **/ __device__ inline bool ScalableTSDFVolumeCudaDevice::OnBoundary( const Vector3i &Xlocal, bool for_gradient) { return for_gradient ? (Xlocal(0) == 0 || Xlocal(1) == 0 || Xlocal(2) == 0 || Xlocal(0) >= N_ - 2 || Xlocal(1) >= N_ - 2 || Xlocal(2) >= N_ - 2) : (Xlocal(0) == N_ - 1 || Xlocal(1) == N_ - 1 || Xlocal(2) == N_ - 1); } __device__ inline bool ScalableTSDFVolumeCudaDevice::OnBoundaryf( const Vector3f &Xlocal, bool for_gradient) { return for_gradient ? (Xlocal(0) < 1 || Xlocal(1) < 1 || Xlocal(2) < 1 || Xlocal(0) >= N_ - 2 || Xlocal(1) >= N_ - 2 || Xlocal(2) >= N_ - 2) : (Xlocal(0) >= N_ - 1 || Xlocal(1) >= N_ - 1 || Xlocal(2) >= N_ - 1); } __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::NeighborOffsetOfBoundaryVoxel( const Vector3i &Xlocal) { return Vector3i(Xlocal(0) < 0 ? -1 : (Xlocal(0) >= N_ ? 1 : 0), Xlocal(1) < 0 ? -1 : (Xlocal(1) >= N_ ? 1 : 0), Xlocal(2) < 0 ? -1 : (Xlocal(2) >= N_ ? 1 : 0)); } __device__ inline int ScalableTSDFVolumeCudaDevice::LinearizeNeighborOffset( const Vector3i &dXsv) { // return (dz + 1) * 9 + (dy + 1) * 3 + (dx + 1); return 9 * dXsv(2) + 3 * dXsv(1) + dXsv(0) + 13; } __device__ inline Vector3i ScalableTSDFVolumeCudaDevice::BoundaryVoxelInNeighbor(const Vector3i &Xlocal, const Vector3i &dXsv) { return Vector3i(Xlocal(0) - dXsv(0) * N_, Xlocal(1) - dXsv(1) * N_, Xlocal(2) - dXsv(2) * N_); } __device__ inline Vector3f ScalableTSDFVolumeCudaDevice::gradient( const Vector3i &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(-1 <= Xlocal(0) && Xlocal(0) <= N_); assert(-1 <= Xlocal(1) && Xlocal(1) <= N_); assert(-1 <= Xlocal(2) && Xlocal(2) <= N_); #endif Vector3f n = Vector3f::Zeros(); Vector3i X0 = Xlocal, X1 = Xlocal; #pragma unroll 1 for (size_t k = 0; k < 3; ++k) { X0(k) -= 1; X1(k) += 1; Vector3i dXsv0 = NeighborOffsetOfBoundaryVoxel(X0); Vector3i dXsv1 = NeighborOffsetOfBoundaryVoxel(X1); UniformTSDFVolumeCudaDevice *subvolume0 = cached_subvolumes[LinearizeNeighborOffset(dXsv0)]; UniformTSDFVolumeCudaDevice *subvolume1 = cached_subvolumes[LinearizeNeighborOffset(dXsv1)]; uchar weight0 = (subvolume0 == nullptr) ? 0 : subvolume0->weight( BoundaryVoxelInNeighbor(X0, dXsv0)); uchar weight1 = (subvolume1 == nullptr) ? 0 : subvolume1->weight( BoundaryVoxelInNeighbor(X1, dXsv1)); float tsdf0 = (subvolume0 == nullptr) ? 0 : subvolume0->tsdf(BoundaryVoxelInNeighbor(X0, dXsv0)); float tsdf1 = (subvolume1 == nullptr) ? 0 : subvolume1->tsdf(BoundaryVoxelInNeighbor(X1, dXsv1)); if (weight0 != 0 && weight1 != 0) { n(k) = tsdf1 - tsdf0; n(k) *= 0.5; } else if (weight1 != 0) { n(k) = tsdf1 - cached_subvolumes[13]->tsdf(Xlocal); } else if (weight0 != 0) { n(k) = cached_subvolumes[13]->tsdf(Xlocal) - tsdf0; } else { n(k) = 0; } X0(k) = X1(k) = Xlocal(k); } return n; } __device__ float ScalableTSDFVolumeCudaDevice::TSDFOnBoundaryAt( const Vector3f &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { /** X in range: [-1, N_ + 1) **/ #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(-1 <= Xlocal(0) && Xlocal(0) < N_ + 1); assert(-1 <= Xlocal(1) && Xlocal(1) < N_ + 1); assert(-1 <= Xlocal(2) && Xlocal(2) < N_ + 1); #endif const Vector3i Xlocali = Xlocal.template cast<int>(); Vector3f r = Vector3f(Xlocal(0) - Xlocali(0), Xlocal(1) - Xlocali(1), Xlocal(2) - Xlocali(2)); Vector3f rneg = Vector3f(1.0f - r(0), 1.0f - r(1), 1.0f - r(2)); float sum_weight_interp = 0; float sum_tsdf = 0; for (size_t k = 0; k < 8; ++k) { Vector3i offset_k = Vector3i(shift[k][0], shift[k][1], shift[k][2]); Vector3i Xlocali_k = Xlocali + offset_k; Vector3i dXsv_k = NeighborOffsetOfBoundaryVoxel(Xlocali_k); UniformTSDFVolumeCudaDevice *subvolume = cached_subvolumes[LinearizeNeighborOffset(dXsv_k)]; float tsdf_k = (subvolume == nullptr) ? 0.0f : subvolume->tsdf(BoundaryVoxelInNeighbor( Xlocali_k, dXsv_k)); float weight_interp_k = (subvolume == nullptr) ? 0.0f : (rneg(0) * (1 - offset_k(0)) + r(0) * offset_k(0)) * (rneg(1) * (1 - offset_k(1)) + r(1) * offset_k(1)) * (rneg(2) * (1 - offset_k(2)) + r(2) * offset_k(2)); sum_tsdf += weight_interp_k * tsdf_k; sum_weight_interp += weight_interp_k; } return sum_weight_interp > 0 ? sum_tsdf / sum_weight_interp : 0; } __device__ float ScalableTSDFVolumeCudaDevice::LogitOnBoundaryAt( const Vector3f &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { /** X in range: [-1, N_ + 1) **/ #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(-1 <= Xlocal(0) && Xlocal(0) < N_ + 1); assert(-1 <= Xlocal(1) && Xlocal(1) < N_ + 1); assert(-1 <= Xlocal(2) && Xlocal(2) < N_ + 1); #endif const Vector3i Xlocali = Xlocal.template cast<int>(); Vector3f r = Vector3f(Xlocal(0) - Xlocali(0), Xlocal(1) - Xlocali(1), Xlocal(2) - Xlocali(2)); Vector3f rneg = Vector3f(1.0f - r(0), 1.0f - r(1), 1.0f - r(2)); float sum_weight_interp = 0; float sum_logit = 0; for (size_t k = 0; k < 8; ++k) { Vector3i offset_k = Vector3i(shift[k][0], shift[k][1], shift[k][2]); Vector3i Xlocali_k = Xlocali + offset_k; Vector3i dXsv_k = NeighborOffsetOfBoundaryVoxel(Xlocali_k); UniformTSDFVolumeCudaDevice *subvolume = cached_subvolumes[LinearizeNeighborOffset(dXsv_k)]; float logit_k = (subvolume == nullptr) ? 0.0f : subvolume->logit(BoundaryVoxelInNeighbor( Xlocali_k, dXsv_k)); float weight_interp_k = (subvolume == nullptr) ? 0.0f : (rneg(0) * (1 - offset_k(0)) + r(0) * offset_k(0)) * (rneg(1) * (1 - offset_k(1)) + r(1) * offset_k(1)) * (rneg(2) * (1 - offset_k(2)) + r(2) * offset_k(2)); sum_logit += weight_interp_k * logit_k; sum_weight_interp += weight_interp_k; } return sum_weight_interp > 0 ? sum_logit / sum_weight_interp : 0; } __device__ uchar ScalableTSDFVolumeCudaDevice::WeightOnBoundaryAt( const Vector3f &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { /** X in range: [-1, N_ + 1) **/ #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(-1 <= Xlocal(0) && Xlocal(0) < N_ + 1); assert(-1 <= Xlocal(1) && Xlocal(1) < N_ + 1); assert(-1 <= Xlocal(2) && Xlocal(2) < N_ + 1); #endif const Vector3i Xlocali = Xlocal.template cast<int>(); Vector3f r = Vector3f(Xlocal(0) - Xlocali(0), Xlocal(1) - Xlocali(1), Xlocal(2) - Xlocali(2)); Vector3f rneg = Vector3f(1.0f - r(0), 1.0f - r(1), 1.0f - r(2)); float sum_weight_interp = 0; float sum_weight = 0; for (size_t k = 0; k < 8; ++k) { Vector3i offset_k = Vector3i(shift[k][0], shift[k][1], shift[k][2]); Vector3i Xlocali_k = Xlocali + offset_k; Vector3i dXsv_k = NeighborOffsetOfBoundaryVoxel(Xlocali_k); UniformTSDFVolumeCudaDevice *subvolume = cached_subvolumes[LinearizeNeighborOffset(dXsv_k)]; float weight_k = (subvolume == nullptr) ? 0.0f : subvolume->weight(BoundaryVoxelInNeighbor( Xlocali_k, dXsv_k)); float weight_interp_k = (subvolume == nullptr) ? 0.0f : (rneg(0) * (1 - offset_k(0)) + r(0) * offset_k(0)) * (rneg(1) * (1 - offset_k(1)) + r(1) * offset_k(1)) * (rneg(2) * (1 - offset_k(2)) + r(2) * offset_k(2)); sum_weight += weight_interp_k * weight_k; sum_weight_interp += weight_interp_k; } return sum_weight_interp > 0 ? uchar(fminf(sum_weight / sum_weight_interp, 255)) : uchar(0); } __device__ Vector3b ScalableTSDFVolumeCudaDevice::ColorOnBoundaryAt( const Vector3f &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { /** X in range: [-1, N_ + 1) **/ #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(-1 <= Xlocal(0) && Xlocal(0) < N_ + 1); assert(-1 <= Xlocal(1) && Xlocal(1) < N_ + 1); assert(-1 <= Xlocal(2) && Xlocal(2) < N_ + 1); #endif const Vector3i Xlocali = Xlocal.template cast<int>(); Vector3f r = Vector3f(Xlocal(0) - Xlocali(0), Xlocal(1) - Xlocali(1), Xlocal(2) - Xlocali(2)); Vector3f rneg = Vector3f(1.0f - r(0), 1.0f - r(1), 1.0f - r(2)); float sum_weight_interp = 0; Vector3f sum_color = Vector3f::Zeros(); for (size_t k = 0; k < 8; ++k) { Vector3i offset_k = Vector3i(shift[k][0], shift[k][1], shift[k][2]); Vector3i Xlocali_k = Xlocali + offset_k; Vector3i dXsv_k = NeighborOffsetOfBoundaryVoxel(Xlocali_k); UniformTSDFVolumeCudaDevice *subvolume = cached_subvolumes[LinearizeNeighborOffset(dXsv_k)]; Vector3f color_k = (subvolume == nullptr) ? Vector3f(0) : subvolume ->color(BoundaryVoxelInNeighbor( Xlocali_k, dXsv_k)) .template cast<float>(); float weight_interp_k = (subvolume == nullptr) ? 0.0f : (rneg(0) * (1 - offset_k(0)) + r(0) * offset_k(0)) * (rneg(1) * (1 - offset_k(1)) + r(1) * offset_k(1)) * (rneg(2) * (1 - offset_k(2)) + r(2) * offset_k(2)); sum_color += weight_interp_k * color_k; sum_weight_interp += weight_interp_k; } return sum_weight_interp > 0 ? (sum_color / sum_weight_interp).template cast<uchar>() : Vector3b(0); } __device__ Vector3f ScalableTSDFVolumeCudaDevice::GradientOnBoundaryAt( const Vector3f &Xlocal, UniformTSDFVolumeCudaDevice **cached_subvolumes) { Vector3f n = Vector3f::Zeros(); Vector3f X0 = Xlocal, X1 = Xlocal; const float half_gap = voxel_length_; #pragma unroll 1 for (size_t k = 0; k < 3; ++k) { X0(k) -= half_gap; X1(k) += half_gap; n(k) = (TSDFOnBoundaryAt(X1, cached_subvolumes) - TSDFOnBoundaryAt(X0, cached_subvolumes)); n(k) *= 0.5f; X0(k) = X1(k) = Xlocal(k); } return n; } __device__ void ScalableTSDFVolumeCudaDevice::ActivateSubvolume( const HashEntry<Vector3i> &entry) { int index = active_subvolume_entry_array_.push_back(entry); active_subvolume_indices_[entry.internal_addr] = index; } __device__ int ScalableTSDFVolumeCudaDevice::QueryActiveSubvolumeIndex( const Vector3i &key) { int internal_addr = hash_table_.GetInternalAddrByKey(key); return internal_addr == NULLPTR_CUDA ? NULLPTR_CUDA : active_subvolume_indices_[internal_addr]; } __device__ void ScalableTSDFVolumeCudaDevice::CacheNeighborSubvolumes( const Vector3i &Xsv, const Vector3i &dXsv, int *cached_subvolume_indices, UniformTSDFVolumeCudaDevice **cached_subvolumes) { Vector3i Xsv_neighbor = Xsv + dXsv; int k = LinearizeNeighborOffset(dXsv); #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(0 <= k && k < 27); #endif int neighbor_subvolume_idx = QueryActiveSubvolumeIndex(Xsv_neighbor); cached_subvolume_indices[k] = neighbor_subvolume_idx; /** Some of the subvolumes ARE maintained in hash_table, * but ARE NOT active (NOT in view frustum). * For speed, re-write this part with internal addr accessing. * (can be 0.1 ms faster) * For readablity, keep this. **/ cached_subvolumes[k] = neighbor_subvolume_idx == NULLPTR_CUDA ? nullptr : QuerySubvolume(Xsv_neighbor); #ifdef CUDA_DEBUG_ENABLE_ASSERTION if (neighbor_subvolume_idx == NULLPTR_CUDA) { assert(cached_subvolumes[k] == nullptr); } else { HashEntry<Vector3i> &entry = active_subvolume_entry_array_[neighbor_subvolume_idx]; assert(entry.key == Xsv_neighbor); assert(hash_table_.GetValuePtrByInternalAddr(entry.internal_addr) == cached_subvolumes[k]); } #endif } /** High level functions **/ __device__ void ScalableTSDFVolumeCudaDevice::TouchSubvolume( const Vector2i &p, ImageCudaDevice<float, 1> &depth, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { float d = depth.at(p(0), p(1))(0); if (d < 0.1f || d > 3.5f) return; Vector3f Xw_near = transform_camera_to_world * camera.InverseProjectPixel(p, fmaxf(d - sdf_trunc_, 0.1f)); Vector3i Xsv_near = voxelf_locate_subvolume(world_to_voxelf(Xw_near)); Vector3f Xw_far = transform_camera_to_world * camera.InverseProjectPixel(p, fminf(d + sdf_trunc_, 3.5f)); Vector3i Xsv_far = voxelf_locate_subvolume(world_to_voxelf(Xw_far)); // Vector3i Xsv_min = Vector3i(min(Xsv_near(0), Xsv_far(0)), // min(Xsv_near(1), Xsv_far(1)), // min(Xsv_near(2), Xsv_far(2))); // Vector3i Xsv_max = Vector3i(max(Xsv_near(0), Xsv_far(0)), // max(Xsv_near(1), Xsv_far(1)), // max(Xsv_near(2), Xsv_far(2))); // // for (int x = Xsv_min(0); x <= Xsv_max(0); ++x) { // for (int y = Xsv_min(1); y <= Xsv_max(1); ++y) { // for (int z = Xsv_min(2); z <= Xsv_max(2); ++z) { // hash_table_.New(Vector3i(x, y, z)); // } // } // } /** 3D line from Xsv_near to Xsv_far * https://en.wikipedia.org/wiki/Digital_differential_analyzer_(graphics_algorithm) * **/ Vector3i DXsv = Xsv_far - Xsv_near; Vector3i DXsv_abs = Vector3i(abs(DXsv(0)), abs(DXsv(1)), abs(DXsv(2))); int step = DXsv_abs(0) >= DXsv_abs(1) ? DXsv_abs(0) : DXsv_abs(1); step = DXsv_abs(2) >= step ? DXsv_abs(2) : step; Vector3f DXsv_normalized = DXsv.template cast<float>() * (1.0f / step); Vector3f Xsv_curr = Xsv_near.template cast<float>(); HashEntry<Vector3i> entry; for (int k = 0; k <= step; ++k) { hash_table_.New(Xsv_curr.template cast<int>()); Xsv_curr += DXsv_normalized; } } __device__ void ScalableTSDFVolumeCudaDevice::Integrate( const Vector3i &Xlocal, HashEntry<Vector3i> &entry, RGBDImageCudaDevice &rgbd, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { /** Projective data association - additional local to global transform **/ Vector3f X = voxelf_local_to_global(Xlocal.template cast<float>(), entry.key); Vector3f Xw = voxelf_to_world(X); Vector3f Xc = transform_camera_to_world.Inverse() * Xw; Vector2f p = camera.ProjectPoint(Xc); /** TSDF **/ if (!camera.IsPixelValid(p)) return; float d = rgbd.depth_.interp_at(p(0), p(1))(0); float tsdf = d - Xc(2); if (tsdf <= -sdf_trunc_) return; tsdf = fminf(tsdf / sdf_trunc_, 1.0f); Vector3b color = rgbd.color_raw_.at(int(p(0)), int(p(1))); UniformTSDFVolumeCudaDevice *subvolume = hash_table_.GetValuePtrByInternalAddr(entry.internal_addr); #ifdef CUDA_DEBUG_ENABLE_ASSERTION assert(subvolume != nullptr); #endif float &tsdf_sum = subvolume->tsdf(Xlocal); uchar &weight_sum = subvolume->weight(Xlocal); Vector3b &color_sum = subvolume->color(Xlocal); /// TODO: add logit update here /// Maybe we can replace weight with logit-related computation too float w0 = 1 / (weight_sum + 1.0f); float w1 = 1 - w0; tsdf_sum = tsdf * w0 + tsdf_sum * w1; color_sum = Vector3b(color(0) * w0 + color_sum(0) * w1, color(1) * w0 + color_sum(1) * w1, color(2) * w0 + color_sum(2) * w1); weight_sum = uchar(fminf(weight_sum + 1.0f, 255.0f)); } __device__ bool ScalableTSDFVolumeCudaDevice::RayCasting( const Vector2i &p, Vector3f &vertex, Vector3f &normal, Vector3b &color, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { Vector3f ret = Vector3f(0); Vector3f ray_c = camera.InverseProjectPixel(p, 1.0f).normalized(); /** TODO: throw it into parameters **/ const float t_min = 0.1f / ray_c(2); const float t_max = 3.0f / ray_c(2); const Vector3f camera_origin_v = transform_world_to_volume_ * (transform_camera_to_world * Vector3f(0)); const Vector3f ray_v = transform_world_to_volume_.Rotate( transform_camera_to_world.Rotate(ray_c)); float t_prev = 0, tsdf_prev = sdf_trunc_; const float block_step_size = N_ * voxel_length_; Vector3i Xsv_prev = Vector3i(INT_MIN, INT_MIN, INT_MIN); UniformTSDFVolumeCudaDevice *subvolume = nullptr; /** Do NOT use #pragma unroll: it will slow it down **/ float t_curr = t_min; while (t_curr < t_max) { Vector3f Xv_t = camera_origin_v + t_curr * ray_v; Vector3i X_t = volume_to_voxelf(Xv_t).template cast<int>(); Vector3i Xsv_t = voxel_locate_subvolume(X_t); Vector3i Xlocal_t = voxel_global_to_local(X_t, Xsv_t); subvolume = (Xsv_t == Xsv_prev) ? subvolume : QuerySubvolume(Xsv_t); bool is_subvolume_valid = subvolume != nullptr; float tsdf_curr = is_subvolume_valid ? subvolume->tsdf(Xlocal_t) : tsdf_prev; uchar weight_curr = is_subvolume_valid ? subvolume->weight(Xlocal_t) : 0; float step_size = is_subvolume_valid ? fmaxf(tsdf_curr * sdf_trunc_, voxel_length_) : block_step_size; /** Zero crossing **/ if (tsdf_prev > 0 && weight_curr > 0 && tsdf_curr <= 0) { float t_intersect = (t_curr * tsdf_prev - t_prev * tsdf_curr) / (tsdf_prev - tsdf_curr); Vector3f Xv_surface_t = camera_origin_v + t_intersect * ray_v; vertex = (transform_camera_to_world.Inverse() * (transform_volume_to_world_ * Xv_surface_t)); /**TODO(Akash): Unoptimized access for normals, how to improve * this/cache for individual accesses */ Vector3f X_surface_t = volume_to_voxelf(Xv_surface_t); normal = GradientAt(X_surface_t).normalized(); color = ColorAt(X_surface_t); return true; } tsdf_prev = tsdf_curr; t_prev = t_curr; t_curr += step_size; Xsv_prev = Xsv_t; } return false; } __device__ Vector3f ScalableTSDFVolumeCudaDevice::VolumeRendering( const Vector2i &p, PinholeCameraIntrinsicCuda &camera, TransformCuda &transform_camera_to_world) { Vector3f ret = Vector3f(0); float sample_weight = 0; Vector3f ray_c = camera.InverseProjectPixel(p, 1.0f).normalized(); /** TODO: throw it into parameters **/ const float t_min = 0.1f; const float t_max = 3.0f; const Vector3f camera_origin_v = transform_world_to_volume_ * (transform_camera_to_world * Vector3f(0)); const Vector3f ray_v = transform_world_to_volume_.Rotate( transform_camera_to_world.Rotate(ray_c)); float tsdf_prev = sdf_trunc_; const float block_step_size = N_ * voxel_length_; Vector3i Xsv_prev = Vector3i(INT_MIN, INT_MIN, INT_MIN); UniformTSDFVolumeCudaDevice *subvolume = nullptr; /** Do NOT use #pragma unroll: it will slow it down **/ float t_curr = t_min; while (t_curr < t_max) { Vector3f Xv_t = camera_origin_v + t_curr * ray_v; Vector3i X_t = volume_to_voxelf(Xv_t).template cast<int>(); Vector3i Xsv_t = voxel_locate_subvolume(X_t); Vector3i Xlocal_t = voxel_global_to_local(X_t, Xsv_t); subvolume = (Xsv_t == Xsv_prev) ? subvolume : QuerySubvolume(Xsv_t); bool is_subvolume_valid = subvolume != nullptr; uchar weight_curr = is_subvolume_valid ? subvolume->weight(Xlocal_t) : 0; float tsdf_curr = is_subvolume_valid ? subvolume->tsdf(Xlocal_t) : tsdf_prev; float step_size = is_subvolume_valid ? fmaxf(tsdf_curr * sdf_trunc_, voxel_length_) : block_step_size; if (weight_curr > 0) { float weight = expf(-fabsf(tsdf_curr)); ret += Jet(tsdf_curr, -0.5f, 0.5f) * weight; sample_weight += weight; } tsdf_prev = tsdf_curr; t_curr += step_size; Xsv_prev = Xsv_t; } return ret / float(sample_weight); } } // namespace cuda } // namespace open3d
the_stack
#define CUBEHASH_ROUNDS 16 /* this is r for CubeHashr/b */ #define CUBEHASH_BLOCKBYTES 32 /* this is b for CubeHashr/b */ #define TPB35 576 #define TPB50 1024 #define ROTATEUPWARDS7(a) ROTL32(a,7) #define ROTATEUPWARDS11(a) ROTL32(a,11) static __device__ __forceinline__ void rrounds(uint32_t x[2][2][2][2][2]) { int r; uint32_t x1[2][2][2][2]; for(r = 0; r < CUBEHASH_ROUNDS; r += 2) { /* "add x_0jklm into x_1jklm modulo 2^32" */ x1[0][0][0][0] = x[1][0][0][0][0] + x[0][0][0][0][0]; x1[0][0][0][1] = x[1][0][0][0][1] + x[0][0][0][0][1]; x1[0][0][1][0] = x[1][0][0][1][0] + x[0][0][0][1][0]; x1[0][0][1][1] = x[1][0][0][1][1] + x[0][0][0][1][1]; x1[0][1][0][0] = x[1][0][1][0][0] + x[0][0][1][0][0]; x1[0][1][0][1] = x[1][0][1][0][1] + x[0][0][1][0][1]; x1[0][1][1][0] = x[1][0][1][1][0] + x[0][0][1][1][0]; x1[0][1][1][1] = x[1][0][1][1][1] + x[0][0][1][1][1]; x1[1][0][0][0] = x[1][1][0][0][0] + x[0][1][0][0][0]; x1[1][0][0][1] = x[1][1][0][0][1] + x[0][1][0][0][1]; x1[1][0][1][0] = x[1][1][0][1][0] + x[0][1][0][1][0]; x1[1][0][1][1] = x[1][1][0][1][1] + x[0][1][0][1][1]; x1[1][1][0][0] = x[1][1][1][0][0] + x[0][1][1][0][0]; x1[1][1][0][1] = x[1][1][1][0][1] + x[0][1][1][0][1]; x1[1][1][1][0] = x[1][1][1][1][0] + x[0][1][1][1][0]; x1[1][1][1][1] = x[1][1][1][1][1] + x[0][1][1][1][1]; /* "rotate x_0jklm upwards by 7 bits" */ /* "xor x_1~jklm into x_0jklm" */ x[0][0][0][0][0] = ROTATEUPWARDS7(x[0][0][0][0][0]) ^ x1[1][0][0][0]; x[0][0][0][0][1] = ROTATEUPWARDS7(x[0][0][0][0][1]) ^ x1[1][0][0][1]; x[0][0][0][1][0] = ROTATEUPWARDS7(x[0][0][0][1][0]) ^ x1[1][0][1][0]; x[0][0][0][1][1] = ROTATEUPWARDS7(x[0][0][0][1][1]) ^ x1[1][0][1][1]; x[0][0][1][0][0] = ROTATEUPWARDS7(x[0][0][1][0][0]) ^ x1[1][1][0][0]; x[0][0][1][0][1] = ROTATEUPWARDS7(x[0][0][1][0][1]) ^ x1[1][1][0][1]; x[0][0][1][1][0] = ROTATEUPWARDS7(x[0][0][1][1][0]) ^ x1[1][1][1][0]; x[0][0][1][1][1] = ROTATEUPWARDS7(x[0][0][1][1][1]) ^ x1[1][1][1][1]; x[0][1][0][0][0] = ROTATEUPWARDS7(x[0][1][0][0][0]) ^ x1[0][0][0][0]; x[0][1][0][0][1] = ROTATEUPWARDS7(x[0][1][0][0][1]) ^ x1[0][0][0][1]; x[0][1][0][1][0] = ROTATEUPWARDS7(x[0][1][0][1][0]) ^ x1[0][0][1][0]; x[0][1][0][1][1] = ROTATEUPWARDS7(x[0][1][0][1][1]) ^ x1[0][0][1][1]; x[0][1][1][0][0] = ROTATEUPWARDS7(x[0][1][1][0][0]) ^ x1[0][1][0][0]; x[0][1][1][0][1] = ROTATEUPWARDS7(x[0][1][1][0][1]) ^ x1[0][1][0][1]; x[0][1][1][1][0] = ROTATEUPWARDS7(x[0][1][1][1][0]) ^ x1[0][1][1][0]; x[0][1][1][1][1] = ROTATEUPWARDS7(x[0][1][1][1][1]) ^ x1[0][1][1][1]; /* "add x_0jklm into x_1~jk~lm modulo 2^32" */ x[1][1][0][1][0] = x1[1][0][1][0] + x[0][0][0][0][0]; x[1][1][0][1][1] = x1[1][0][1][1] + x[0][0][0][0][1]; x[1][1][0][0][0] = x1[1][0][0][0] + x[0][0][0][1][0]; x[1][1][0][0][1] = x1[1][0][0][1] + x[0][0][0][1][1]; x[1][1][1][1][0] = x1[1][1][1][0] + x[0][0][1][0][0]; x[1][1][1][1][1] = x1[1][1][1][1] + x[0][0][1][0][1]; x[1][1][1][0][0] = x1[1][1][0][0] + x[0][0][1][1][0]; x[1][1][1][0][1] = x1[1][1][0][1] + x[0][0][1][1][1]; x[1][0][0][1][0] = x1[0][0][1][0] + x[0][1][0][0][0]; x[1][0][0][1][1] = x1[0][0][1][1] + x[0][1][0][0][1]; x[1][0][0][0][0] = x1[0][0][0][0] + x[0][1][0][1][0]; x[1][0][0][0][1] = x1[0][0][0][1] + x[0][1][0][1][1]; x[1][0][1][1][0] = x1[0][1][1][0] + x[0][1][1][0][0]; x[1][0][1][1][1] = x1[0][1][1][1] + x[0][1][1][0][1]; x[1][0][1][0][0] = x1[0][1][0][0] + x[0][1][1][1][0]; x[1][0][1][0][1] = x1[0][1][0][1] + x[0][1][1][1][1]; /* "rotate x_0jklm upwards by 11 bits" */ /* "xor x_1~j~k~lm into x_0jklm" */ x[0][0][0][0][0] = ROTATEUPWARDS11(x[0][0][0][0][0]) ^ x[1][1][1][1][0]; x[0][0][0][0][1] = ROTATEUPWARDS11(x[0][0][0][0][1]) ^ x[1][1][1][1][1]; x[0][0][0][1][0] = ROTATEUPWARDS11(x[0][0][0][1][0]) ^ x[1][1][1][0][0]; x[0][0][0][1][1] = ROTATEUPWARDS11(x[0][0][0][1][1]) ^ x[1][1][1][0][1]; x[0][0][1][0][0] = ROTATEUPWARDS11(x[0][0][1][0][0]) ^ x[1][1][0][1][0]; x[0][0][1][0][1] = ROTATEUPWARDS11(x[0][0][1][0][1]) ^ x[1][1][0][1][1]; x[0][0][1][1][0] = ROTATEUPWARDS11(x[0][0][1][1][0]) ^ x[1][1][0][0][0]; x[0][0][1][1][1] = ROTATEUPWARDS11(x[0][0][1][1][1]) ^ x[1][1][0][0][1]; x[0][1][0][0][0] = ROTATEUPWARDS11(x[0][1][0][0][0]) ^ x[1][0][1][1][0]; x[0][1][0][0][1] = ROTATEUPWARDS11(x[0][1][0][0][1]) ^ x[1][0][1][1][1]; x[0][1][0][1][0] = ROTATEUPWARDS11(x[0][1][0][1][0]) ^ x[1][0][1][0][0]; x[0][1][0][1][1] = ROTATEUPWARDS11(x[0][1][0][1][1]) ^ x[1][0][1][0][1]; x[0][1][1][0][0] = ROTATEUPWARDS11(x[0][1][1][0][0]) ^ x[1][0][0][1][0]; x[0][1][1][0][1] = ROTATEUPWARDS11(x[0][1][1][0][1]) ^ x[1][0][0][1][1]; x[0][1][1][1][0] = ROTATEUPWARDS11(x[0][1][1][1][0]) ^ x[1][0][0][0][0]; x[0][1][1][1][1] = ROTATEUPWARDS11(x[0][1][1][1][1]) ^ x[1][0][0][0][1]; /* "add x_0jklm into x_1~j~k~l~m modulo 2^32" */ x1[1][1][1][1] = x[1][1][1][1][1] + x[0][0][0][0][0]; x1[1][1][1][0] = x[1][1][1][1][0] + x[0][0][0][0][1]; x1[1][1][0][1] = x[1][1][1][0][1] + x[0][0][0][1][0]; x1[1][1][0][0] = x[1][1][1][0][0] + x[0][0][0][1][1]; x1[1][0][1][1] = x[1][1][0][1][1] + x[0][0][1][0][0]; x1[1][0][1][0] = x[1][1][0][1][0] + x[0][0][1][0][1]; x1[1][0][0][1] = x[1][1][0][0][1] + x[0][0][1][1][0]; x1[1][0][0][0] = x[1][1][0][0][0] + x[0][0][1][1][1]; x1[0][1][1][1] = x[1][0][1][1][1] + x[0][1][0][0][0]; x1[0][1][1][0] = x[1][0][1][1][0] + x[0][1][0][0][1]; x1[0][1][0][1] = x[1][0][1][0][1] + x[0][1][0][1][0]; x1[0][1][0][0] = x[1][0][1][0][0] + x[0][1][0][1][1]; x1[0][0][1][1] = x[1][0][0][1][1] + x[0][1][1][0][0]; x1[0][0][1][0] = x[1][0][0][1][0] + x[0][1][1][0][1]; x1[0][0][0][1] = x[1][0][0][0][1] + x[0][1][1][1][0]; x1[0][0][0][0] = x[1][0][0][0][0] + x[0][1][1][1][1]; /* "rotate x_0jklm upwards by 7 bits" */ /* "xor x_1j~k~l~m into x_0jklm" */ x[0][0][0][0][0] = ROTATEUPWARDS7(x[0][0][0][0][0]) ^ x1[0][1][1][1]; x[0][0][0][0][1] = ROTATEUPWARDS7(x[0][0][0][0][1]) ^ x1[0][1][1][0]; x[0][0][0][1][0] = ROTATEUPWARDS7(x[0][0][0][1][0]) ^ x1[0][1][0][1]; x[0][0][0][1][1] = ROTATEUPWARDS7(x[0][0][0][1][1]) ^ x1[0][1][0][0]; x[0][0][1][0][0] = ROTATEUPWARDS7(x[0][0][1][0][0]) ^ x1[0][0][1][1]; x[0][0][1][0][1] = ROTATEUPWARDS7(x[0][0][1][0][1]) ^ x1[0][0][1][0]; x[0][0][1][1][0] = ROTATEUPWARDS7(x[0][0][1][1][0]) ^ x1[0][0][0][1]; x[0][0][1][1][1] = ROTATEUPWARDS7(x[0][0][1][1][1]) ^ x1[0][0][0][0]; x[0][1][0][0][0] = ROTATEUPWARDS7(x[0][1][0][0][0]) ^ x1[1][1][1][1]; x[0][1][0][0][1] = ROTATEUPWARDS7(x[0][1][0][0][1]) ^ x1[1][1][1][0]; x[0][1][0][1][0] = ROTATEUPWARDS7(x[0][1][0][1][0]) ^ x1[1][1][0][1]; x[0][1][0][1][1] = ROTATEUPWARDS7(x[0][1][0][1][1]) ^ x1[1][1][0][0]; x[0][1][1][0][0] = ROTATEUPWARDS7(x[0][1][1][0][0]) ^ x1[1][0][1][1]; x[0][1][1][0][1] = ROTATEUPWARDS7(x[0][1][1][0][1]) ^ x1[1][0][1][0]; x[0][1][1][1][0] = ROTATEUPWARDS7(x[0][1][1][1][0]) ^ x1[1][0][0][1]; x[0][1][1][1][1] = ROTATEUPWARDS7(x[0][1][1][1][1]) ^ x1[1][0][0][0]; /* "add x_0jklm into x_1j~kl~m modulo 2^32" */ x[1][0][1][0][1] = x1[0][1][0][1] + x[0][0][0][0][0]; x[1][0][1][0][0] = x1[0][1][0][0] + x[0][0][0][0][1]; x[1][0][1][1][1] = x1[0][1][1][1] + x[0][0][0][1][0]; x[1][0][1][1][0] = x1[0][1][1][0] + x[0][0][0][1][1]; x[1][0][0][0][1] = x1[0][0][0][1] + x[0][0][1][0][0]; x[1][0][0][0][0] = x1[0][0][0][0] + x[0][0][1][0][1]; x[1][0][0][1][1] = x1[0][0][1][1] + x[0][0][1][1][0]; x[1][0][0][1][0] = x1[0][0][1][0] + x[0][0][1][1][1]; x[1][1][1][0][1] = x1[1][1][0][1] + x[0][1][0][0][0]; x[1][1][1][0][0] = x1[1][1][0][0] + x[0][1][0][0][1]; x[1][1][1][1][1] = x1[1][1][1][1] + x[0][1][0][1][0]; x[1][1][1][1][0] = x1[1][1][1][0] + x[0][1][0][1][1]; x[1][1][0][0][1] = x1[1][0][0][1] + x[0][1][1][0][0]; x[1][1][0][0][0] = x1[1][0][0][0] + x[0][1][1][0][1]; x[1][1][0][1][1] = x1[1][0][1][1] + x[0][1][1][1][0]; x[1][1][0][1][0] = x1[1][0][1][0] + x[0][1][1][1][1]; /* "rotate x_0jklm upwards by 11 bits" */ /* "xor x_1jkl~m into x_0jklm" */ x[0][0][0][0][0] = ROTATEUPWARDS11(x[0][0][0][0][0]) ^ x[1][0][0][0][1]; x[0][0][0][0][1] = ROTATEUPWARDS11(x[0][0][0][0][1]) ^ x[1][0][0][0][0]; x[0][0][0][1][0] = ROTATEUPWARDS11(x[0][0][0][1][0]) ^ x[1][0][0][1][1]; x[0][0][0][1][1] = ROTATEUPWARDS11(x[0][0][0][1][1]) ^ x[1][0][0][1][0]; x[0][0][1][0][0] = ROTATEUPWARDS11(x[0][0][1][0][0]) ^ x[1][0][1][0][1]; x[0][0][1][0][1] = ROTATEUPWARDS11(x[0][0][1][0][1]) ^ x[1][0][1][0][0]; x[0][0][1][1][0] = ROTATEUPWARDS11(x[0][0][1][1][0]) ^ x[1][0][1][1][1]; x[0][0][1][1][1] = ROTATEUPWARDS11(x[0][0][1][1][1]) ^ x[1][0][1][1][0]; x[0][1][0][0][0] = ROTATEUPWARDS11(x[0][1][0][0][0]) ^ x[1][1][0][0][1]; x[0][1][0][0][1] = ROTATEUPWARDS11(x[0][1][0][0][1]) ^ x[1][1][0][0][0]; x[0][1][0][1][0] = ROTATEUPWARDS11(x[0][1][0][1][0]) ^ x[1][1][0][1][1]; x[0][1][0][1][1] = ROTATEUPWARDS11(x[0][1][0][1][1]) ^ x[1][1][0][1][0]; x[0][1][1][0][0] = ROTATEUPWARDS11(x[0][1][1][0][0]) ^ x[1][1][1][0][1]; x[0][1][1][0][1] = ROTATEUPWARDS11(x[0][1][1][0][1]) ^ x[1][1][1][0][0]; x[0][1][1][1][0] = ROTATEUPWARDS11(x[0][1][1][1][0]) ^ x[1][1][1][1][1]; x[0][1][1][1][1] = ROTATEUPWARDS11(x[0][1][1][1][1]) ^ x[1][1][1][1][0]; } } static __device__ __forceinline__ void Final(uint32_t x[2][2][2][2][2], uint32_t *hashval) { /* "the integer 1 is xored into the last state word x_11111" */ x[1][1][1][1][1] ^= 1U; /* "the state is then transformed invertibly through 10r identical rounds" */ for(int i = 0; i < 10; ++i) rrounds(x); /* "output the first h/8 bytes of the state" */ hashval[0] = x[0][0][0][0][0]; hashval[1] = x[0][0][0][0][1]; hashval[2] = x[0][0][0][1][0]; hashval[3] = x[0][0][0][1][1]; hashval[4] = x[0][0][1][0][0]; hashval[5] = x[0][0][1][0][1]; hashval[6] = x[0][0][1][1][0]; hashval[7] = x[0][0][1][1][1]; } #if __CUDA_ARCH__ >= 500 __global__ __launch_bounds__(TPB50, 1) #else __global__ __launch_bounds__(TPB35, 1) #endif void cubehash256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread < threads) { #if __CUDA_ARCH__ >= 500 uint2 Hash[4]; Hash[0] = __ldg(&g_hash[thread]); Hash[1] = __ldg(&g_hash[thread + 1 * threads]); Hash[2] = __ldg(&g_hash[thread + 2 * threads]); Hash[3] = __ldg(&g_hash[thread + 3 * threads]); #else uint32_t Hash[8]; LOHI(Hash[0], Hash[1], __ldg(&((uint64_t*)g_hash)[thread])); LOHI(Hash[2], Hash[3], __ldg(&((uint64_t*)g_hash)[thread + 1 * threads])); LOHI(Hash[4], Hash[5], __ldg(&((uint64_t*)g_hash)[thread + 2 * threads])); LOHI(Hash[6], Hash[7], __ldg(&((uint64_t*)g_hash)[thread + 3 * threads])); #endif uint32_t x[2][2][2][2][2] = { 0xEA2BD4B4, 0xCCD6F29F, 0x63117E71, 0x35481EAE, 0x22512D5B, 0xE5D94E63, 0x7E624131, 0xF4CC12BE, 0xC2D0B696, 0x42AF2070, 0xD0720C35, 0x3361DA8C, 0x28CCECA4, 0x8EF8AD83, 0x4680AC00, 0x40E5FBAB, 0xD89041C3, 0x6107FBD5, 0x6C859D41, 0xF0B26679, 0x09392549, 0x5FA25603, 0x65C892FD, 0x93CB6285, 0x2AF2B5AE, 0x9E4B4E60, 0x774ABFDD, 0x85254725, 0x15815AEB, 0x4AB6AAD6, 0x9CDAF8AF, 0xD6032C0A }; #if __CUDA_ARCH__ >= 500 x[0][0][0][0][0] ^= Hash[0].x; x[0][0][0][0][1] ^= Hash[0].y; x[0][0][0][1][0] ^= Hash[1].x; x[0][0][0][1][1] ^= Hash[1].y; x[0][0][1][0][0] ^= Hash[2].x; x[0][0][1][0][1] ^= Hash[2].y; x[0][0][1][1][0] ^= Hash[3].x; x[0][0][1][1][1] ^= Hash[3].y; #else x[0][0][0][0][0] ^= Hash[0]; x[0][0][0][0][1] ^= Hash[1]; x[0][0][0][1][0] ^= Hash[2]; x[0][0][0][1][1] ^= Hash[3]; x[0][0][1][0][0] ^= Hash[4]; x[0][0][1][0][1] ^= Hash[5]; x[0][0][1][1][0] ^= Hash[6]; x[0][0][1][1][1] ^= Hash[7]; #endif rrounds(x); x[0][0][0][0][0] ^= 0x80U; rrounds(x); #if __CUDA_ARCH__ >= 500 Final(x, (uint32_t*)Hash); g_hash[thread] = Hash[0]; g_hash[1 * threads + thread] = Hash[1]; g_hash[2 * threads + thread] = Hash[2]; g_hash[3 * threads + thread] = Hash[3]; #else Final(x, Hash); ((uint64_t*)g_hash)[thread] = ((uint64_t*)Hash)[0]; ((uint64_t*)g_hash)[1 * threads + thread] = ((uint64_t*)Hash)[1]; ((uint64_t*)g_hash)[2 * threads + thread] = ((uint64_t*)Hash)[2]; ((uint64_t*)g_hash)[3 * threads + thread] = ((uint64_t*)Hash)[3]; #endif } } __host__ void cubehash256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint64_t *d_hash) { uint32_t tpb = TPB35; if(cuda_arch[thr_id] >= 500) tpb = TPB50; dim3 grid((threads + tpb - 1) / tpb); dim3 block(tpb); cubehash256_gpu_hash_32 << <grid, block, 0, gpustream[thr_id] >> > (threads, startNounce, (uint2*)d_hash); CUDA_SAFE_CALL(cudaGetLastError()); if(opt_debug) CUDA_SAFE_CALL(cudaDeviceSynchronize()); }
the_stack
* This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = {"cudaEventDefault", "cudaEventBlockingSync", "cudaEventDisableTiming", NULL}; const char *sDeviceSyncMethod[] = { "cudaDeviceScheduleAuto", "cudaDeviceScheduleSpin", "cudaDeviceScheduleYield", "INVALID", "cudaDeviceScheduleBlockingSync", NULL}; // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x, size) (((size_t)x + (size - 1)) & (~(size - 1))) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { printf("%d: %d %d\n", i, a[i], c); return false; } } return true; } inline void AllocateHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) if (bPinGenericMemory) { // allocate a generic page-aligned chunk of system memory #ifdef WIN32 printf( "> VirtualAlloc() allocating %4.2f Mbytes of (generic page-aligned " "system memory)\n", (float)nbytes / 1048576.0f); *pp_a = (int *)VirtualAlloc(NULL, (nbytes + MEMORY_ALIGNMENT), MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); #else printf( "> mmap() allocating %4.2f Mbytes (generic page-aligned system " "memory)\n", (float)nbytes / 1048576.0f); *pp_a = (int *)mmap(NULL, (nbytes + MEMORY_ALIGNMENT), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); #endif *ppAligned_a = (int *)ALIGN_UP(*pp_a, MEMORY_ALIGNMENT); printf( "> cudaHostRegister() registering %4.2f Mbytes of generic allocated " "system memory\n", (float)nbytes / 1048576.0f); // pin allocate memory checkCudaErrors( cudaHostRegister(*ppAligned_a, nbytes, cudaHostRegisterMapped)); } else #endif #endif { printf("> cudaMallocHost() allocating %4.2f Mbytes of system memory\n", (float)nbytes / 1048576.0f); // allocate host memory (pinned is required for achieve asynchronicity) checkCudaErrors(cudaMallocHost((void **)pp_a, nbytes)); *ppAligned_a = *pp_a; } } inline void FreeHostMemory(bool bPinGenericMemory, int **pp_a, int **ppAligned_a, int nbytes) { #if CUDART_VERSION >= 4000 #if !defined(__arm__) && !defined(__aarch64__) // CUDA 4.0 support pinning of generic host memory if (bPinGenericMemory) { // unpin and delete host memory checkCudaErrors(cudaHostUnregister(*ppAligned_a)); #ifdef WIN32 VirtualFree(*pp_a, 0, MEM_RELEASE); #else munmap(*pp_a, nbytes); #endif } else #endif #endif { cudaFreeHost(*pp_a); } } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL}; void printHelp() { printf("Usage: %s [options below]\n", sSDKsample); printf("\t--sync_method=n for CPU/GPU synchronization\n"); printf("\t n=%s\n", sSyncMethod[0]); printf("\t n=%s\n", sSyncMethod[1]); printf("\t n=%s\n", sSyncMethod[2]); printf("\t <Default> n=%s\n", sSyncMethod[4]); printf( "\t--use_generic_memory (default) use generic page-aligned for system " "memory\n"); printf( "\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate " "system memory\n"); } #if defined(__APPLE__) || defined(MACOSX) #define DEFAULT_PINNED_GENERIC_MEMORY false #else #define DEFAULT_PINNED_GENERIC_MEMORY true #endif int main(int argc, char **argv) { int cuda_device = 0; int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float elapsed_time, time_memcpy, time_kernel; // timing variables float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using cudaHostAlloc() bool bPinGenericMemory = DEFAULT_PINNED_GENERIC_MEMORY; // we want this to be the default behavior int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel printf("[ %s ]\n\n", sSDKsample); if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { printf("Device synchronization method set to = %s\n", sSyncMethod[device_sync_method]); printf("Setting reps to 100 to demonstrate steady state\n"); nreps = 100; } else { printf("Invalid command line option sync_method=\"%d\"\n", device_sync_method); return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_generic_memory")) { #if defined(__APPLE__) || defined(MACOSX) bPinGenericMemory = false; // Generic Pinning of System Paged memory not // currently supported on Mac OSX #else bPinGenericMemory = true; #endif } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { bPinGenericMemory = false; } printf("\n> "); cuda_device = findCudaDevice(argc, (const char **)argv); // check the compute capability of the device int num_devices = 0; checkCudaErrors(cudaGetDeviceCount(&num_devices)); if (0 == num_devices) { printf( "your system does not have a CUDA capable device, waiving test...\n"); return EXIT_WAIVED; } // check if the command-line chosen device ID is within range, exit if not if (cuda_device >= num_devices) { printf( "cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices - 1); return EXIT_FAILURE; } checkCudaErrors(cudaSetDevice(cuda_device)); // Checking for compute capabilities cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); niterations = 5; // Check if GPU can map host memory (Generic Method), if not then we override // bPinGenericMemory to be false if (bPinGenericMemory) { printf("Device: <%s> canMapHostMemory: %s\n", deviceProp.name, deviceProp.canMapHostMemory ? "Yes" : "No"); if (deviceProp.canMapHostMemory == 0) { printf( "Using cudaMallocHost, CUDA device does not support mapping of " "generic host memory\n"); bPinGenericMemory = false; } } // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); n = (int)rint((float)n / scale_factor); printf("> CUDA Capable: SM %d.%d hardware\n", deviceProp.major, deviceProp.minor); printf("> %d Multiprocessor(s) x %d (Cores/Multiprocessor) = %d (Cores)\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf("> scale_factor = %1.4f\n", 1.0f / scale_factor); printf("> array_size = %d\n\n", n); // enable use of blocking sync, to reduce CPU usage printf("> Using CPU/GPU Device Synchronization method (%s)\n", sDeviceSyncMethod[device_sync_method]); checkCudaErrors(cudaSetDeviceFlags( device_sync_method | (bPinGenericMemory ? cudaDeviceMapHost : 0))); // allocate host memory int c = 5; // value to which the array will be initialized int *h_a = 0; // pointer to the array data in host memory int *hAligned_a = 0; // pointer to the array data in host memory (aligned to // MEMORY_ALIGNMENT) // Allocate Host memory (could be using cudaMallocHost or VirtualAlloc/mmap if // using the new CUDA 4.0 features AllocateHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); // allocate device memory int *d_a = 0, *d_c = 0; // pointers to data and init value in the device memory checkCudaErrors(cudaMalloc((void **)&d_a, nbytes)); checkCudaErrors(cudaMemset(d_a, 0x0, nbytes)); checkCudaErrors(cudaMalloc((void **)&d_c, sizeof(int))); checkCudaErrors(cudaMemcpy(d_c, &c, sizeof(int), cudaMemcpyHostToDevice)); printf("\nStarting Test\n"); // allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *)malloc(nstreams * sizeof(cudaStream_t)); for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } // create CUDA event handles // use blocking sync cudaEvent_t start_event, stop_event; int eventflags = ((device_sync_method == cudaDeviceBlockingSync) ? cudaEventBlockingSync : cudaEventDefault); checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags)); checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags)); // time memcopy from device checkCudaErrors(cudaEventRecord(start_event, 0)); // record in stream-0, to // ensure that all previous // CUDA calls have // completed checkCudaErrors(cudaMemcpyAsync(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost, streams[0])); checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize( stop_event)); // block until the event is actually recorded checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event)); printf("memcopy:\t%.2f\n", time_memcpy); // time kernel threads = dim3(512, 1); blocks = dim3(n / threads.x, 1); checkCudaErrors(cudaEventRecord(start_event, 0)); init_array<<<blocks, threads, 0, streams[0]>>>(d_a, d_c, niterations); checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&time_kernel, start_event, stop_event)); printf("kernel:\t\t%.2f\n", time_kernel); ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads = dim3(512, 1); blocks = dim3(n / threads.x, 1); checkCudaErrors(cudaEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { init_array<<<blocks, threads>>>(d_a, d_c, niterations); checkCudaErrors( cudaMemcpy(hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost)); } checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("non-streamed:\t%.2f\n", elapsed_time / nreps); ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads = dim3(512, 1); blocks = dim3(n / (nstreams * threads.x), 1); memset(hAligned_a, 255, nbytes); // set host memory bits to all 1s, for testing correctness checkCudaErrors(cudaMemset( d_a, 0, nbytes)); // set device memory to all 0s, for testing correctness checkCudaErrors(cudaEventRecord(start_event, 0)); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion // of data for (int i = 0; i < nstreams; i++) { init_array<<<blocks, threads, 0, streams[i]>>>(d_a + i * n / nstreams, d_c, niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x // will only // commence executing when all previous CUDA calls in stream x have // completed for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaMemcpyAsync(hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, cudaMemcpyDeviceToHost, streams[i])); } } checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("%d streams:\t%.2f\n", nstreams, elapsed_time / nreps); // check whether the output is correct printf("-------------------------------\n"); bool bResults = correct_data(hAligned_a, n, c * nreps * niterations); // release resources for (int i = 0; i < nstreams; i++) { checkCudaErrors(cudaStreamDestroy(streams[i])); } checkCudaErrors(cudaEventDestroy(start_event)); checkCudaErrors(cudaEventDestroy(stop_event)); // Free cudaMallocHost or Generic Host allocated memory (from CUDA 4.0) FreeHostMemory(bPinGenericMemory, &h_a, &hAligned_a, nbytes); checkCudaErrors(cudaFree(d_a)); checkCudaErrors(cudaFree(d_c)); return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
the_stack
using namespace std; __device__ __host__ float de(Vec3 pos) ; __device__ __host__ Vec3 getNormal(Vec3 pos) { Vec3 xDir = Vec3(1,0,0); Vec3 yDir = Vec3(0,1,0); Vec3 zDir = Vec3(0,0,1); return Vec3::normalize(Vec3(de(pos+xDir)-de(pos-xDir), de(pos+yDir)-de(pos-yDir), de(pos+zDir)-de(pos-zDir))); } // distance "estimation" function __device__ __host__ float de(Vec3 pos) { float cutoff = 2; float power = 8; Vec3 z = pos; float dr = 1.0; float r = 0.0; for (int i = 0; i < 10 ; i++) { r = Vec3::length(z); if (r>cutoff) break; float theta = acosf(z.z/r); float phi = atanf(z.y); dr = powf( r, power-1.0f)*power*dr + 1.0f; float zr = powf( r,power); theta = theta*power; phi = phi*power; z = Vec3(zr) * Vec3(sinf(theta)*cosf(phi), sinf(phi)*sinf(theta), cosf(theta)); z = z + pos; } return 0.5f*logf(r)*r/dr; } __global__ void zero(float *arr, int size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= size) return; arr[i] = 0; } __global__ void computeUV(int height, int width, float *uvxArr, float *uvyArr) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= width) return; if(y >= height) return; float widthFloat = width; float heightFloat = height; float uvx = (tan(3.14159265 / 4.0)) * (2.0*x - widthFloat) / widthFloat; float uvy = (tan(3.14159265 / 4.0)) * ( heightFloat / widthFloat) * (2.0*y- heightFloat) / heightFloat; uvxArr[(y*width)+x] = uvx; uvyArr[(y*width)+x] = uvy; } // take a look at what operations are done in "de" __global__ void trace(int height, int width, float *uvxArr, float *uvyArr, float *distance, Vec3 lookDirection, Vec3 camUp, Vec3 camRight, Vec3 cameraLocation) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= width) return; if(y >= height) return; float uvx = uvxArr[(y*width)+x]; float uvy = uvyArr[(y*width)+x]; Vec3 rayDirection = Vec3::normalize(lookDirection + Vec3(uvx) * camUp + Vec3(uvy) * camRight); float totalDistance = 0; bool hit = false;; for(int iter= 0; iter < 128; iter++) { Vec3 p = cameraLocation + Vec3(totalDistance) * rayDirection; float currentDist = de(p); totalDistance += currentDist; if (totalDistance > 10) { totalDistance = INFINITY; break; } if (currentDist < 0.00001f) { hit = true; break; } } distance[(y*width)+x] = totalDistance; } __global__ void shade(int height, int width, float *uvxArr, float *uvyArr, float *distance, Vec3 lookDirection, Vec3 camUp, Vec3 camRight, Vec3 backgroundColor, Vec3 cameraLocation, float *rawR, float *rawG, float *rawB) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= height) return; if(y >= width) return; float uvx = uvxArr[(x*width)+y]; float uvy = uvyArr[(x*width)+y]; float dist = distance[(x*width)+y]; Vec3 ret = backgroundColor; if(dist != INFINITY) { dist-=0.0001f; Vec3 rayDirection = Vec3::normalize(lookDirection + Vec3(uvx) * camUp + Vec3(uvy) * camRight); Vec3 hitPoint = cameraLocation + Vec3(dist) * rayDirection; Vec3 normal = getNormal(hitPoint); float lamb = 0.6; float spec = 0.2; Vec3 objectColor = Vec3(0.8,0.2,0.8); Vec3 toLight = ( Vec3(2,2,1) - hitPoint); toLight = Vec3::normalize(toLight); Vec3 lambIn = Vec3(lamb) * fabsf(Vec3::dot(normal, toLight)); Vec3 specIn = Vec3(spec) * powf(fabsf(Vec3::dot(normal, Vec3::normalize(toLight - hitPoint))), 1); ret = clamp(((lambIn * clamp(objectColor)) + specIn)); } rawR[(x*width)+y] = ret.x; rawG[(x*width)+y] = ret.y; rawB[(x*width)+y] = ret.z; } __global__ void globalIllumination(int height, int width, float *uvxArr, float *uvyArr, float *distance, Vec3 lookDirection, Vec3 camUp, Vec3 camRight, Vec3 backgroundColor, Vec3 cameraLocation, float *rawR, float *rawG, float *rawB) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= width) return; if(y >= height) return; float uvx = uvxArr[(y*width)+x]; float uvy = uvyArr[(y*width)+x]; float dist = distance[(y*width)+x]; if(dist != INFINITY) { float gi; Vec3 rayDirection = Vec3::normalize(lookDirection + Vec3(uvx) * camUp + Vec3(uvy) * camRight); Vec3 hitPoint = cameraLocation + Vec3(dist) * rayDirection; Vec3 normal = getNormal(hitPoint); float totalDistance = 0; for(int i = 0; i < 10; i++) { Vec3 p = hitPoint + Vec3(totalDistance) * normal; float currentDist = de(p); totalDistance += currentDist; } gi = normalize(0.001,0,totalDistance); if (gi > 1) gi = 1; gi = 1-gi; rawR[(y*width)+x] *= gi; rawG[(y*width)+x] *= gi; rawB[(y*width)+x] *= gi; } } __global__ void downsample(int height, int width, int scale, float *rawR, float *rawG, float *rawB, float *imageR, float *imageG, float *imageB) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= width) return; if(y >= height) return; int inX = x*scale; int inY = y*scale; float outR = 0; float outG = 0; float outB = 0; for(int i = 0; i < scale; i++) { for(int j = 0; j < scale; j++) { outR += (rawR[((inY+i)*width*scale)+(inX+j)]); outG += (rawG[((inY+i)*width*scale)+(inX+j)]); outB += (rawB[((inY+i)*width*scale)+(inX+j)]); } } imageR[(y*width)+x] = outR/(float)(scale*scale); imageG[(y*width)+x] = outG/(float)(scale*scale); imageB[(y*width)+x] = outB/(float)(scale*scale); } int main() { int width = 1024; int height = 768; int scale = 2; int heightScale = scale*height; int widthScale = scale*width; float *rawR; float *rawG; float *rawB; float *uvxArr; float *uvyArr; float *imageR; float *imageG; float *imageB; float *d_imageR; float *d_imageG; float *d_imageB; float *distance; cudaMalloc(&rawR, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&rawG, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&rawB, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&uvxArr, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&uvyArr, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&distance, (width*scale)*(height*scale) * sizeof(float)); cudaMalloc(&d_imageR, width*height * sizeof(float)); cudaMalloc(&d_imageG, width*height * sizeof(float)); cudaMalloc(&d_imageB, width*height * sizeof(float)); cudaMemset(rawB, 0, (width*scale)*(height*scale)); cudaMemset(rawG, 0, (width*scale)*(height*scale)); cudaMemset(rawB, 0, (width*scale)*(height*scale)); cudaMemset(uvxArr, 0, (width*scale)*(height*scale)); cudaMemset(uvyArr, 0, (width*scale)*(height*scale)); cudaMemset(distance, 0, (width*scale)*(height*scale)); cudaMemset(d_imageR, 0, (width)*(height)); cudaMemset(d_imageG, 0, (width)*(height)); cudaMemset(d_imageB, 0, (width)*(height)); cudaMallocHost(&imageR, width*height * sizeof(float)); cudaMallocHost(&imageG, width*height * sizeof(float)); cudaMallocHost(&imageB, width*height * sizeof(float)); float max = 0; float min = 0; for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { imageR[((y)*width)+(x)] = 0; imageG[((y)*width)+(x)] = 0; imageB[((y)*width)+(x)] = 0; } } Vec3 cameraLocation = {1.2f,1.2f,1.2f}; Vec3 focus = {0,0,0}; Vec3 worldUp = Vec3(0,1,0); Vec3 lookDirection = Vec3::normalize(focus - cameraLocation); Vec3 camUp = Vec3::normalize(Vec3::cross(worldUp, lookDirection)); Vec3 camRight = Vec3::normalize(Vec3::cross(lookDirection, camUp)); // compute UV dim3 threadsUv; dim3 blocksUv; threadsUv.x = 32; threadsUv.y = 32; blocksUv.x = 1+ (widthScale/threadsUv.x); blocksUv.y = 1+ (heightScale/threadsUv.y); getErrorCuda((computeUV<<<blocksUv, threadsUv>>>(heightScale, widthScale, uvxArr, uvyArr))); // trace dim3 threadsTrace; dim3 blocksTrace; threadsTrace.x = 1024; threadsTrace.y = 1; blocksTrace.x = 1 + (widthScale/threadsTrace.x); blocksTrace.y = 1 + (heightScale/threadsTrace.y); getErrorCuda((trace<<<blocksTrace, threadsTrace>>>(heightScale, widthScale, uvxArr, uvyArr, distance, lookDirection, camUp, camRight, cameraLocation))); Vec3 backgroundColor = {0.3f}; // shade dim3 threadsShade; dim3 blocksShade; threadsShade.x = 32; threadsShade.y = 32; blocksShade.x = 1 + (heightScale/threadsShade.x); blocksShade.y = 1 + (widthScale/threadsShade.y); getErrorCuda((shade<<<blocksShade, threadsShade>>>(heightScale, widthScale, uvxArr, uvyArr, distance, lookDirection, camUp, camRight, backgroundColor, cameraLocation, rawR, rawG, rawB))); // global illumination dim3 threadsGi; dim3 blocksGi; threadsGi.x = 32; threadsGi.y = 32; blocksGi.x = 1+ (widthScale/threadsGi.x); blocksGi.y = 1+ (heightScale/threadsGi.y); getErrorCuda((globalIllumination<<<blocksGi, threadsGi>>>(heightScale, widthScale, uvxArr, uvyArr, distance, lookDirection, camUp, camRight, backgroundColor, cameraLocation, rawR, rawG, rawB))); // downsample dim3 threadsDs; dim3 blocksDs; threadsDs.x = 16; threadsDs.y = 16; blocksDs.x = 1+ (width/threadsDs.x); blocksDs.y = 1+ (height/threadsDs.y); getErrorCuda((downsample<<<blocksDs, threadsDs>>>(height, width, scale, rawR, rawG, rawB, d_imageR, d_imageG, d_imageB))); cudaDeviceSynchronize(); cudaMemcpy(imageR, d_imageR, sizeof(float)*height*width, cudaMemcpyDefault); cudaMemcpy(imageG, d_imageG, sizeof(float)*height*width, cudaMemcpyDefault); cudaMemcpy(imageB, d_imageB, sizeof(float)*height*width, cudaMemcpyDefault); std::cout << "writing image" << std::endl; writeImageRGB("test.png", width,height, imageR,imageG,imageB, "output"); return 0; }
the_stack
using namespace std; /* ------------------------ 2d Spreading Kernels ----------------------------*/ /* Kernels for NUptsdriven Method */ __global__ void Spread_2d_NUptsdriven(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT es_c, FLT es_beta, int *idxnupts, int pirange) { int xstart,ystart,xend,yend; int xx, yy, ix, iy; int outidx; FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; FLT x_rescaled, y_rescaled; FLT kervalue1, kervalue2; CUCPX cnow; for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){ x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange); cnow = c[idxnupts[i]]; xstart = ceil(x_rescaled - ns/2.0); ystart = ceil(y_rescaled - ns/2.0); xend = floor(x_rescaled + ns/2.0); yend = floor(y_rescaled + ns/2.0); FLT x1=(FLT)xstart-x_rescaled; FLT y1=(FLT)ystart-y_rescaled; eval_kernel_vec(ker1,x1,ns,es_c,es_beta); eval_kernel_vec(ker2,y1,ns,es_c,es_beta); for(yy=ystart; yy<=yend; yy++){ for(xx=xstart; xx<=xend; xx++){ ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx); iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy); outidx = ix+iy*nf1; kervalue1=ker1[xx-xstart]; kervalue2=ker2[yy-ystart]; atomicAdd(&fw[outidx].x, cnow.x*kervalue1*kervalue2); atomicAdd(&fw[outidx].y, cnow.y*kervalue1*kervalue2); } } } } __global__ void Spread_2d_NUptsdriven_Horner(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT sigma, int* idxnupts, int pirange) { int xx, yy, ix, iy; int outidx; FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; FLT ker1val, ker2val; FLT x_rescaled, y_rescaled; CUCPX cnow; for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){ x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange); cnow = c[idxnupts[i]]; int xstart = ceil(x_rescaled - ns/2.0); int ystart = ceil(y_rescaled - ns/2.0); int xend = floor(x_rescaled + ns/2.0); int yend = floor(y_rescaled + ns/2.0); FLT x1=(FLT)xstart-x_rescaled; FLT y1=(FLT)ystart-y_rescaled; eval_kernel_vec_Horner(ker1,x1,ns,sigma); eval_kernel_vec_Horner(ker2,y1,ns,sigma); for(yy=ystart; yy<=yend; yy++){ for(xx=xstart; xx<=xend; xx++){ ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx); iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy); outidx = ix+iy*nf1; ker1val=ker1[xx-xstart]; ker2val=ker2[yy-ystart]; FLT kervalue=ker1val*ker2val; atomicAdd(&fw[outidx].x, cnow.x*kervalue); atomicAdd(&fw[outidx].y, cnow.y*kervalue); } } } } /* Kernels for SubProb Method */ // SubProb properties __global__ void CalcBinSize_noghost_2d(int M, int nf1, int nf2, int bin_size_x, int bin_size_y, int nbinx, int nbiny, int* bin_size, FLT *x, FLT *y, int* sortidx, int pirange) { int binidx, binx, biny; int oldidx; FLT x_rescaled,y_rescaled; for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); binx = floor(x_rescaled/bin_size_x); binx = binx >= nbinx ? binx-1 : binx; binx = binx < 0 ? 0 : binx; biny = floor(y_rescaled/bin_size_y); biny = biny >= nbiny ? biny-1 : biny; biny = biny < 0 ? 0 : biny; binidx = binx+biny*nbinx; oldidx = atomicAdd(&bin_size[binidx], 1); sortidx[i] = oldidx; if(binx >= nbinx || biny >= nbiny){ sortidx[i] = -biny; } } } __global__ void CalcInvertofGlobalSortIdx_2d(int M, int bin_size_x, int bin_size_y, int nbinx,int nbiny, int* bin_startpts, int* sortidx, FLT *x, FLT *y, int* index, int pirange, int nf1, int nf2) { int binx, biny; int binidx; FLT x_rescaled, y_rescaled; for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); binx = floor(x_rescaled/bin_size_x); binx = binx >= nbinx ? binx-1 : binx; binx = binx < 0 ? 0 : binx; biny = floor(y_rescaled/bin_size_y); biny = biny >= nbiny ? biny-1 : biny; biny = biny < 0 ? 0 : biny; binidx = binx+biny*nbinx; index[bin_startpts[binidx]+sortidx[i]] = i; } } __global__ void Spread_2d_Subprob(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT es_c, FLT es_beta, FLT sigma, int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y, int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize, int nbinx, int nbiny, int* idxnupts, int pirange) { extern __shared__ CUCPX fwshared[]; int xstart,ystart,xend,yend; int subpidx=blockIdx.x; int bidx=subprob_to_bin[subpidx]; int binsubp_idx=subpidx-subprobstartpts[bidx]; int ix, iy; int outidx; int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize; int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize); int xoffset=(bidx % nbinx)*bin_size_x; int yoffset=(bidx / nbinx)*bin_size_y; int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)); FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; for(int i=threadIdx.x; i<N; i+=blockDim.x){ fwshared[i].x = 0.0; fwshared[i].y = 0.0; } __syncthreads(); FLT x_rescaled, y_rescaled; CUCPX cnow; for(int i=threadIdx.x; i<nupts; i+=blockDim.x){ int idx = ptstart+i; x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); cnow = c[idxnupts[idx]]; xstart = ceil(x_rescaled - ns/2.0)-xoffset; ystart = ceil(y_rescaled - ns/2.0)-yoffset; xend = floor(x_rescaled + ns/2.0)-xoffset; yend = floor(y_rescaled + ns/2.0)-yoffset; FLT x1=(FLT)xstart+xoffset - x_rescaled; FLT y1=(FLT)ystart+yoffset - y_rescaled; eval_kernel_vec(ker1,x1,ns,es_c,es_beta); eval_kernel_vec(ker2,y1,ns,es_c,es_beta); for(int yy=ystart; yy<=yend; yy++){ iy = yy+ceil(ns/2.0); if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2) || iy<0) break; for(int xx=xstart; xx<=xend; xx++){ ix = xx+ceil(ns/2.0); if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2) || ix<0) break; outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2); FLT kervalue1 = ker1[xx-xstart]; FLT kervalue2 = ker2[yy-ystart]; atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2); atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2); } } } __syncthreads(); /* write to global memory */ for(int k=threadIdx.x; k<N; k+=blockDim.x){ int i = k % (int) (bin_size_x+2*ceil(ns/2.0) ); int j = k /( bin_size_x+2*ceil(ns/2.0) ); ix = xoffset-ceil(ns/2.0)+i; iy = yoffset-ceil(ns/2.0)+j; if(ix < (nf1+ceil(ns/2.0)) && iy < (nf2+ceil(ns/2.0))){ ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix); iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy); outidx = ix+iy*nf1; int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2); atomicAdd(&fw[outidx].x, fwshared[sharedidx].x); atomicAdd(&fw[outidx].y, fwshared[sharedidx].y); } } } __global__ void Spread_2d_Subprob_Horner(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT sigma, int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y, int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize, int nbinx, int nbiny, int* idxnupts, int pirange) { extern __shared__ CUCPX fwshared[]; int xstart,ystart,xend,yend; int subpidx=blockIdx.x; int bidx=subprob_to_bin[subpidx]; int binsubp_idx=subpidx-subprobstartpts[bidx]; int ix, iy, outidx; int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize; int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize); int xoffset=(bidx % nbinx)*bin_size_x; int yoffset=(bidx / nbinx)*bin_size_y; int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)); FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; for(int i=threadIdx.x; i<N; i+=blockDim.x){ fwshared[i].x = 0.0; fwshared[i].y = 0.0; } __syncthreads(); FLT x_rescaled, y_rescaled; CUCPX cnow; for(int i=threadIdx.x; i<nupts; i+=blockDim.x){ int idx = ptstart+i; x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); cnow = c[idxnupts[idx]]; xstart = ceil(x_rescaled - ns/2.0)-xoffset; ystart = ceil(y_rescaled - ns/2.0)-yoffset; xend = floor(x_rescaled + ns/2.0)-xoffset; yend = floor(y_rescaled + ns/2.0)-yoffset; eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma); eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma); for(int yy=ystart; yy<=yend; yy++){ iy = yy+ceil(ns/2.0); if(iy >= (bin_size_y + (int) ceil(ns/2.0)*2) || iy<0) break; FLT kervalue2 = ker2[yy-ystart]; for(int xx=xstart; xx<=xend; xx++){ ix = xx+ceil(ns/2.0); if(ix >= (bin_size_x + (int) ceil(ns/2.0)*2) || ix<0) break; outidx = ix+iy*(bin_size_x+ (int) ceil(ns/2.0)*2); FLT kervalue1 = ker1[xx-xstart]; atomicAdd(&fwshared[outidx].x, cnow.x*kervalue1*kervalue2); atomicAdd(&fwshared[outidx].y, cnow.y*kervalue1*kervalue2); } } } __syncthreads(); /* write to global memory */ for(int k=threadIdx.x; k<N; k+=blockDim.x){ int i = k % (int) (bin_size_x+2*ceil(ns/2.0) ); int j = k /( bin_size_x+2*ceil(ns/2.0) ); ix = xoffset-ceil(ns/2.0)+i; iy = yoffset-ceil(ns/2.0)+j; if(ix < (nf1+ceil(ns/2.0)) && iy < (nf2+ceil(ns/2.0))){ ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix); iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy); outidx = ix+iy*nf1; int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2); atomicAdd(&fw[outidx].x, fwshared[sharedidx].x); atomicAdd(&fw[outidx].y, fwshared[sharedidx].y); } } } /* Kernels for Paul's Method */ __global__ void LocateFineGridPos_Paul(int M, int nf1, int nf2, int bin_size_x, int bin_size_y, int nbinx, int nbiny, int* bin_size, int ns, FLT *x, FLT *y, int* sortidx, int* finegridsize, int pirange) { int binidx, binx, biny; int oldidx; int xidx, yidx, finegrididx; FLT x_rescaled,y_rescaled; for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){ if(ns%2 == 0){ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); binx = floor(floor(x_rescaled)/bin_size_x); biny = floor(floor(y_rescaled)/bin_size_y); binidx = binx+biny*nbinx; xidx = floor(x_rescaled) - binx*bin_size_x; yidx = floor(y_rescaled) - biny*bin_size_y; finegrididx = binidx*bin_size_x*bin_size_y + xidx + yidx*bin_size_x; }else{ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); xidx = ceil(x_rescaled - 0.5); yidx = ceil(y_rescaled - 0.5); //xidx = (xidx == nf1) ? (xidx-nf1) : xidx; //yidx = (yidx == nf2) ? (yidx-nf2) : yidx; binx = floor(xidx/(float) bin_size_x); biny = floor(yidx/(float) bin_size_y); binidx = binx+biny*nbinx; xidx = xidx - binx*bin_size_x; yidx = yidx - biny*bin_size_y; finegrididx = binidx*bin_size_x*bin_size_y + xidx + yidx*bin_size_x; } oldidx = atomicAdd(&finegridsize[finegrididx], 1); sortidx[i] = oldidx; } } __global__ void CalcInvertofGlobalSortIdx_Paul(int nf1, int nf2, int M, int bin_size_x, int bin_size_y, int nbinx,int nbiny,int ns, FLT *x, FLT *y, int* finegridstartpts, int* sortidx, int* index, int pirange) { FLT x_rescaled, y_rescaled; int binx, biny, binidx, xidx, yidx, finegrididx; for(int i=threadIdx.x+blockIdx.x*blockDim.x; i<M; i+=gridDim.x*blockDim.x){ if(ns%2 == 0){ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); binx = floor(floor(x_rescaled)/bin_size_x); biny = floor(floor(y_rescaled)/bin_size_y); binidx = binx+biny*nbinx; xidx = floor(x_rescaled) - binx*bin_size_x; yidx = floor(y_rescaled) - biny*bin_size_y; finegrididx = binidx*bin_size_x*bin_size_y + xidx + yidx*bin_size_x; }else{ x_rescaled=RESCALE(x[i], nf1, pirange); y_rescaled=RESCALE(y[i], nf2, pirange); xidx = ceil(x_rescaled - 0.5); yidx = ceil(y_rescaled - 0.5); xidx = (xidx == nf1) ? xidx - nf1 : xidx; yidx = (yidx == nf2) ? yidx - nf2 : yidx; binx = floor(xidx/(float) bin_size_x); biny = floor(yidx/(float) bin_size_y); binidx = binx+biny*nbinx; xidx = xidx - binx*bin_size_x; yidx = yidx - biny*bin_size_y; finegrididx = binidx*bin_size_x*bin_size_y + xidx + yidx*bin_size_x; } index[finegridstartpts[finegrididx]+sortidx[i]] = i; } } __global__ void Spread_2d_Subprob_Paul(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT es_c, FLT es_beta, FLT sigma, int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y, int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize, int nbinx, int nbiny, int* idxnupts, int* fgstartpts, int* finegridsize, int pirange) { extern __shared__ CUCPX fwshared[]; int xstart,ystart,xend,yend; int subpidx=blockIdx.x; int bidx=subprob_to_bin[subpidx]; int binsubp_idx=subpidx-subprobstartpts[bidx]; int ix,iy,outidx; int xoffset=(bidx % nbinx)*bin_size_x; int yoffset=(bidx / nbinx)*bin_size_y; int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)); #if 0 FLT ker1[MAX_NSPREAD*10]; FLT ker2[MAX_NSPREAD*10]; #endif for(int i=threadIdx.x; i<N; i+=blockDim.x){ fwshared[i].x = 0.0; fwshared[i].y = 0.0; } __syncthreads(); FLT x_rescaled, y_rescaled; for(int i=threadIdx.x; i<bin_size_x*bin_size_y; i+=blockDim.x){ int fineidx = bidx*bin_size_x*bin_size_y+i; int idxstart = fgstartpts[fineidx]+binsubp_idx*maxsubprobsize; int nupts = min(maxsubprobsize,finegridsize[fineidx]-binsubp_idx* maxsubprobsize); if(nupts > 0){ x_rescaled = x[idxnupts[idxstart]]; y_rescaled = y[idxnupts[idxstart]]; xstart = ceil(x_rescaled - ns/2.0)-xoffset; ystart = ceil(y_rescaled - ns/2.0)-yoffset; xend = floor(x_rescaled + ns/2.0)-xoffset; yend = floor(y_rescaled + ns/2.0)-yoffset; #if 0 for(int m=0; m<nupts; m++){ int idx = idxstart+m; x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); eval_kernel_vec_Horner(ker1+m*MAX_NSPREAD,xstart+xoffset- x_rescaled,ns,sigma); eval_kernel_vec_Horner(ker2+m*MAX_NSPREAD,ystart+yoffset- y_rescaled,ns,sigma); } #endif for(int yy=ystart; yy<=yend; yy++){ FLT kervalue2[10]; for(int m=0; m<nupts; m++){ int idx = idxstart+m; #if 1 y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); FLT disy = abs(y_rescaled-(yy+yoffset)); kervalue2[m] = evaluate_kernel(disy, es_c, es_beta, ns); #else kervalue2[m] = ker2[m*MAX_NSPREAD+yy-ystart]; #endif } for(int xx=xstart; xx<=xend; xx++){ ix = xx+ceil(ns/2.0); iy = yy+ceil(ns/2.0); outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2); CUCPX updatevalue; updatevalue.x = 0.0; updatevalue.y = 0.0; for(int m=0; m<nupts; m++){ int idx = idxstart+m; #if 1 x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); FLT disx = abs(x_rescaled-(xx+xoffset)); FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta, ns); updatevalue.x += kervalue2[m]*kervalue1* c[idxnupts[idx]].x; updatevalue.y += kervalue2[m]*kervalue1* c[idxnupts[idx]].y; #else FLT kervalue1 = ker1[m*MAX_NSPREAD+xx-xstart]; updatevalue.x += kervalue1*kervalue2[m]* c[idxnupts[idx]].x; updatevalue.y += kervalue1*kervalue2[m]* c[idxnupts[idx]].y; #endif } atomicAdd(&fwshared[outidx].x, updatevalue.x); atomicAdd(&fwshared[outidx].y, updatevalue.y); } } } } __syncthreads(); /* write to global memory */ for(int k=threadIdx.x; k<N; k+=blockDim.x){ int i = k % (int) (bin_size_x+2*ceil(ns/2.0) ); int j = k /( bin_size_x+2*ceil(ns/2.0) ); ix = xoffset-ceil(ns/2.0)+i; iy = yoffset-ceil(ns/2.0)+j; if(ix < (nf1+ceil(ns/2.0)) && iy < (nf2+ceil(ns/2.0))){ ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix); iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy); outidx = ix+iy*nf1; int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2); atomicAdd(&fw[outidx].x, fwshared[sharedidx].x); atomicAdd(&fw[outidx].y, fwshared[sharedidx].y); } } } /* --------------------- 2d Interpolation Kernels ----------------------------*/ /* Kernels for NUptsdriven Method */ __global__ void Interp_2d_NUptsdriven(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT es_c, FLT es_beta, int* idxnupts, int pirange) { for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){ FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange); FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange); int xstart = ceil(x_rescaled - ns/2.0); int ystart = ceil(y_rescaled - ns/2.0); int xend = floor(x_rescaled + ns/2.0); int yend = floor(y_rescaled + ns/2.0); CUCPX cnow; cnow.x = 0.0; cnow.y = 0.0; for(int yy=ystart; yy<=yend; yy++){ FLT disy=abs(y_rescaled-yy); FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta, ns); for(int xx=xstart; xx<=xend; xx++){ int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx); int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy); int inidx = ix+iy*nf1; FLT disx=abs(x_rescaled-xx); FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta, ns); cnow.x += fw[inidx].x*kervalue1*kervalue2; cnow.y += fw[inidx].y*kervalue1*kervalue2; } } c[idxnupts[i]].x = cnow.x; c[idxnupts[i]].y = cnow.y; } } __global__ void Interp_2d_NUptsdriven_Horner(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT sigma, int* idxnupts, int pirange) { for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<M; i+=blockDim.x*gridDim.x){ FLT x_rescaled=RESCALE(x[idxnupts[i]], nf1, pirange); FLT y_rescaled=RESCALE(y[idxnupts[i]], nf2, pirange); int xstart = ceil(x_rescaled - ns/2.0); int ystart = ceil(y_rescaled - ns/2.0); int xend = floor(x_rescaled + ns/2.0); int yend = floor(y_rescaled + ns/2.0); CUCPX cnow; cnow.x = 0.0; cnow.y = 0.0; FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; eval_kernel_vec_Horner(ker1,xstart-x_rescaled,ns,sigma); eval_kernel_vec_Horner(ker2,ystart-y_rescaled,ns,sigma); for(int yy=ystart; yy<=yend; yy++){ FLT disy=abs(y_rescaled-yy); FLT kervalue2 = ker2[yy-ystart]; for(int xx=xstart; xx<=xend; xx++){ int ix = xx < 0 ? xx+nf1 : (xx>nf1-1 ? xx-nf1 : xx); int iy = yy < 0 ? yy+nf2 : (yy>nf2-1 ? yy-nf2 : yy); int inidx = ix+iy*nf1; FLT disx=abs(x_rescaled-xx); FLT kervalue1 = ker1[xx-xstart]; cnow.x += fw[inidx].x*kervalue1*kervalue2; cnow.y += fw[inidx].y*kervalue1*kervalue2; } } c[idxnupts[i]].x = cnow.x; c[idxnupts[i]].y = cnow.y; } } /* Kernels for Subprob Method */ __global__ void Interp_2d_Subprob(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT es_c, FLT es_beta, FLT sigma, int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y, int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize, int nbinx, int nbiny, int* idxnupts, int pirange) { extern __shared__ CUCPX fwshared[]; int xstart,ystart,xend,yend; int subpidx=blockIdx.x; int bidx=subprob_to_bin[subpidx]; int binsubp_idx=subpidx-subprobstartpts[bidx]; int ix, iy; int outidx; int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize; int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize); int xoffset=(bidx % nbinx)*bin_size_x; int yoffset=(bidx / nbinx)*bin_size_y; int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)); for(int k=threadIdx.x;k<N; k+=blockDim.x){ int i = k % (int) (bin_size_x+2*ceil(ns/2.0) ); int j = k /( bin_size_x+2*ceil(ns/2.0) ); ix = xoffset-ceil(ns/2.0)+i; iy = yoffset-ceil(ns/2.0)+j; if(ix < (nf1+ceil(ns/2.0)) && iy < (nf2+ceil(ns/2.0))){ ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix); iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy); outidx = ix+iy*nf1; int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2); fwshared[sharedidx].x = fw[outidx].x; fwshared[sharedidx].y = fw[outidx].y; } } __syncthreads(); FLT x_rescaled, y_rescaled; CUCPX cnow; for(int i=threadIdx.x; i<nupts; i+=blockDim.x){ int idx = ptstart+i; x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); cnow.x = 0.0; cnow.y = 0.0; xstart = ceil(x_rescaled - ns/2.0)-xoffset; ystart = ceil(y_rescaled - ns/2.0)-yoffset; xend = floor(x_rescaled + ns/2.0)-xoffset; yend = floor(y_rescaled + ns/2.0)-yoffset; for(int yy=ystart; yy<=yend; yy++){ FLT disy=abs(y_rescaled-(yy+yoffset)); FLT kervalue2 = evaluate_kernel(disy, es_c, es_beta, ns); for(int xx=xstart; xx<=xend; xx++){ ix = xx+ceil(ns/2.0); iy = yy+ceil(ns/2.0); outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2); FLT disx=abs(x_rescaled-(xx+xoffset)); FLT kervalue1 = evaluate_kernel(disx, es_c, es_beta, ns); cnow.x += fwshared[outidx].x*kervalue1*kervalue2; cnow.y += fwshared[outidx].y*kervalue1*kervalue2; } } c[idxnupts[idx]] = cnow; } } __global__ void Interp_2d_Subprob_Horner(FLT *x, FLT *y, CUCPX *c, CUCPX *fw, int M, const int ns, int nf1, int nf2, FLT sigma, int* binstartpts, int* bin_size, int bin_size_x, int bin_size_y, int* subprob_to_bin, int* subprobstartpts, int* numsubprob, int maxsubprobsize, int nbinx, int nbiny, int* idxnupts, int pirange) { extern __shared__ CUCPX fwshared[]; int xstart,ystart,xend,yend; int subpidx=blockIdx.x; int bidx=subprob_to_bin[subpidx]; int binsubp_idx=subpidx-subprobstartpts[bidx]; int ix, iy; int outidx; int ptstart=binstartpts[bidx]+binsubp_idx*maxsubprobsize; int nupts=min(maxsubprobsize, bin_size[bidx]-binsubp_idx*maxsubprobsize); int xoffset=(bidx % nbinx)*bin_size_x; int yoffset=(bidx / nbinx)*bin_size_y; int N = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2*ceil(ns/2.0)); for(int k=threadIdx.x;k<N; k+=blockDim.x){ int i = k % (int) (bin_size_x+2*ceil(ns/2.0) ); int j = k /( bin_size_x+2*ceil(ns/2.0) ); ix = xoffset-ceil(ns/2.0)+i; iy = yoffset-ceil(ns/2.0)+j; if(ix < (nf1+ceil(ns/2.0)) && iy < (nf2+ceil(ns/2.0))){ ix = ix < 0 ? ix+nf1 : (ix>nf1-1 ? ix-nf1 : ix); iy = iy < 0 ? iy+nf2 : (iy>nf2-1 ? iy-nf2 : iy); outidx = ix+iy*nf1; int sharedidx=i+j*(bin_size_x+ceil(ns/2.0)*2); fwshared[sharedidx].x = fw[outidx].x; fwshared[sharedidx].y = fw[outidx].y; } } __syncthreads(); FLT ker1[MAX_NSPREAD]; FLT ker2[MAX_NSPREAD]; FLT x_rescaled, y_rescaled; CUCPX cnow; for(int i=threadIdx.x; i<nupts; i+=blockDim.x){ int idx = ptstart+i; x_rescaled=RESCALE(x[idxnupts[idx]], nf1, pirange); y_rescaled=RESCALE(y[idxnupts[idx]], nf2, pirange); cnow.x = 0.0; cnow.y = 0.0; xstart = ceil(x_rescaled - ns/2.0)-xoffset; ystart = ceil(y_rescaled - ns/2.0)-yoffset; xend = floor(x_rescaled + ns/2.0)-xoffset; yend = floor(y_rescaled + ns/2.0)-yoffset; eval_kernel_vec_Horner(ker1,xstart+xoffset-x_rescaled,ns,sigma); eval_kernel_vec_Horner(ker2,ystart+yoffset-y_rescaled,ns,sigma); for(int yy=ystart; yy<=yend; yy++){ FLT disy=abs(y_rescaled-(yy+yoffset)); FLT kervalue2 = ker2[yy-ystart]; for(int xx=xstart; xx<=xend; xx++){ ix = xx+ceil(ns/2.0); iy = yy+ceil(ns/2.0); outidx = ix+iy*(bin_size_x+ceil(ns/2.0)*2); FLT kervalue1 = ker1[xx-xstart]; cnow.x += fwshared[outidx].x*kervalue1*kervalue2; cnow.y += fwshared[outidx].y*kervalue1*kervalue2; } } c[idxnupts[idx]] = cnow; } }
the_stack
#include "cupoch/geometry/kdtree_flann.h" #include "cupoch/geometry/kdtree_search_param.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/registration/colored_icp.h" #include "cupoch/utility/console.h" #include "cupoch/utility/eigen.h" using namespace cupoch; using namespace cupoch::registration; namespace { class PointCloudForColoredICP : public geometry::PointCloud { public: utility::device_vector<Eigen::Vector3f> color_gradient_; }; class TransformationEstimationForColoredICP : public TransformationEstimation { public: TransformationEstimationType GetTransformationEstimationType() const override { return type_; }; TransformationEstimationForColoredICP(float lambda_geometric = 0.968, float det_thresh = 1.0e-6) : lambda_geometric_(lambda_geometric), det_thresh_(det_thresh) { if (lambda_geometric_ < 0 || lambda_geometric_ > 1.0) lambda_geometric_ = 0.968; } ~TransformationEstimationForColoredICP() override {} public: float ComputeRMSE(const geometry::PointCloud &source, const geometry::PointCloud &target, const CorrespondenceSet &corres) const override; Eigen::Matrix4f ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, const CorrespondenceSet &corres) const override; public: float lambda_geometric_; float det_thresh_; private: const TransformationEstimationType type_ = TransformationEstimationType::ColoredICP; }; struct compute_color_gradient_functor { compute_color_gradient_functor(const Eigen::Vector3f *points, const Eigen::Vector3f *normals, const Eigen::Vector3f *colors, const int *indices, int knn) : points_(points), normals_(normals), colors_(colors), indices_(indices), knn_(knn){}; const Eigen::Vector3f *points_; const Eigen::Vector3f *normals_; const Eigen::Vector3f *colors_; const int *indices_; const int knn_; __device__ Eigen::Vector3f operator()(size_t idx) const { const Eigen::Vector3f vt = points_[idx]; const Eigen::Vector3f nt = normals_[idx]; float it = (colors_[idx](0) + colors_[idx](1) + colors_[idx](2)) / 3.0; Eigen::Matrix3f AtA; Eigen::Vector3f Atb; AtA.setZero(); Atb.setZero(); int nn = 0; for (size_t i = 1; i < knn_; ++i) { const int P_adj_idx = __ldg(&indices_[idx * knn_ + i]); if (P_adj_idx < 0) continue; const Eigen::Vector3f &vt_adj = points_[P_adj_idx]; const Eigen::Vector3f vt_proj = vt_adj - (vt_adj - vt).dot(nt) * nt; float it_adj = (colors_[P_adj_idx](0) + colors_[P_adj_idx](1) + colors_[P_adj_idx](2)) / 3.0; const Eigen::Vector3f vtmp = vt_proj - vt; AtA.noalias() += vtmp * vtmp.transpose(); Atb.noalias() += (it_adj - it) * vtmp; ++nn; } if (nn < 4) return Eigen::Vector3f::Zero(); // adds orthogonal constraint AtA.noalias() += (nn - 1) * (nn - 1) * nt * nt.transpose(); AtA.diagonal() += Eigen::Vector3f::Constant(1.0e-6); const Eigen::Vector3f x = AtA.inverse() * Atb; return x; } }; std::shared_ptr<PointCloudForColoredICP> InitializePointCloudForColoredICP( const geometry::PointCloud &target, const geometry::KDTreeSearchParamRadius &search_param) { utility::LogDebug("InitializePointCloudForColoredICP"); geometry::KDTreeFlann tree; tree.SetGeometry(target); auto output = std::make_shared<PointCloudForColoredICP>(); output->colors_ = target.colors_; output->normals_ = target.normals_; output->points_ = target.points_; size_t n_points = output->points_.size(); output->color_gradient_.resize(n_points, Eigen::Vector3f::Zero()); utility::device_vector<int> point_idx; utility::device_vector<float> point_squared_distance; tree.SearchRadius(output->points_, search_param.radius_, search_param.max_nn_, point_idx, point_squared_distance); compute_color_gradient_functor func( thrust::raw_pointer_cast(output->points_.data()), thrust::raw_pointer_cast(output->normals_.data()), thrust::raw_pointer_cast(output->colors_.data()), thrust::raw_pointer_cast(point_idx.data()), search_param.max_nn_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_points), output->color_gradient_.begin(), func); return output; } struct compute_jacobian_and_residual_functor : public utility::multiple_jacobians_residuals_functor<Eigen::Vector6f, 2> { compute_jacobian_and_residual_functor( const Eigen::Vector3f *source_points, const Eigen::Vector3f *source_colors, const Eigen::Vector3f *target_points, const Eigen::Vector3f *target_normals, const Eigen::Vector3f *target_colors, const Eigen::Vector3f *target_color_gradient, const Eigen::Vector2i *corres, float sqrt_lambda_geometric, float sqrt_lambda_photometric) : source_points_(source_points), source_colors_(source_colors), target_points_(target_points), target_normals_(target_normals), target_colors_(target_colors), target_color_gradient_(target_color_gradient), corres_(corres), sqrt_lambda_geometric_(sqrt_lambda_geometric), sqrt_lambda_photometric_(sqrt_lambda_photometric){}; const Eigen::Vector3f *source_points_; const Eigen::Vector3f *source_colors_; const Eigen::Vector3f *target_points_; const Eigen::Vector3f *target_normals_; const Eigen::Vector3f *target_colors_; const Eigen::Vector3f *target_color_gradient_; const Eigen::Vector2i *corres_; const float sqrt_lambda_geometric_; const float sqrt_lambda_photometric_; __device__ void operator()(int i, Eigen::Vector6f J_r[2], float r[2]) const { size_t cs = corres_[i][0]; size_t ct = corres_[i][1]; const Eigen::Vector3f vs = source_points_[cs]; const Eigen::Vector3f vt = target_points_[ct]; const Eigen::Vector3f nt = target_normals_[ct]; J_r[0].block<3, 1>(0, 0) = sqrt_lambda_geometric_ * vs.cross(nt); J_r[0].block<3, 1>(3, 0) = sqrt_lambda_geometric_ * nt; r[0] = sqrt_lambda_geometric_ * (vs - vt).dot(nt); // project vs into vt's tangential plane Eigen::Vector3f vs_proj = vs - (vs - vt).dot(nt) * nt; float is = (source_colors_[cs](0) + source_colors_[cs](1) + source_colors_[cs](2)) / 3.0; float it = (target_colors_[ct](0) + target_colors_[ct](1) + target_colors_[ct](2)) / 3.0; const Eigen::Vector3f &dit = target_color_gradient_[ct]; float is0_proj = (dit.dot(vs_proj - vt)) + it; const Eigen::Matrix3f M = (Eigen::Matrix3f() << 1.0 - nt(0) * nt(0), -nt(0) * nt(1), -nt(0) * nt(2), -nt(0) * nt(1), 1.0 - nt(1) * nt(1), -nt(1) * nt(2), -nt(0) * nt(2), -nt(1) * nt(2), 1.0 - nt(2) * nt(2)) .finished(); const Eigen::Vector3f &ditM = -dit.transpose() * M; J_r[1].block<3, 1>(0, 0) = sqrt_lambda_photometric_ * vs.cross(ditM); J_r[1].block<3, 1>(3, 0) = sqrt_lambda_photometric_ * ditM; r[1] = sqrt_lambda_photometric_ * (is - is0_proj); } }; Eigen::Matrix4f TransformationEstimationForColoredICP::ComputeTransformation( const geometry::PointCloud &source, const geometry::PointCloud &target, const CorrespondenceSet &corres) const { if (corres.empty() || target.HasNormals() == false || target.HasColors() == false || source.HasColors() == false) return Eigen::Matrix4f::Identity(); float sqrt_lambda_geometric = sqrt(lambda_geometric_); float lambda_photometric = 1.0 - lambda_geometric_; float sqrt_lambda_photometric = sqrt(lambda_photometric); const auto &target_c = (const PointCloudForColoredICP &)target; compute_jacobian_and_residual_functor func( thrust::raw_pointer_cast(source.points_.data()), thrust::raw_pointer_cast(source.colors_.data()), thrust::raw_pointer_cast(target.points_.data()), thrust::raw_pointer_cast(target.normals_.data()), thrust::raw_pointer_cast(target.colors_.data()), thrust::raw_pointer_cast(target_c.color_gradient_.data()), thrust::raw_pointer_cast(corres.data()), sqrt_lambda_geometric, sqrt_lambda_photometric); Eigen::Matrix6f JTJ; Eigen::Vector6f JTr; float r2; thrust::tie(JTJ, JTr, r2) = utility::ComputeJTJandJTr<Eigen::Matrix6f, Eigen::Vector6f, 2, compute_jacobian_and_residual_functor>( func, (int)corres.size()); bool is_success; Eigen::Matrix4f extrinsic; thrust::tie(is_success, extrinsic) = utility::SolveJacobianSystemAndObtainExtrinsicMatrix(JTJ, JTr, det_thresh_); return is_success ? extrinsic : Eigen::Matrix4f::Identity(); } struct diff_square_colored_functor { diff_square_colored_functor(const Eigen::Vector3f *source_points, const Eigen::Vector3f *source_colors, const Eigen::Vector3f *target_points, const Eigen::Vector3f *target_normals, const Eigen::Vector3f *target_colors, const Eigen::Vector3f *target_color_gradient, float sqrt_lambda_geometric, float sqrt_lambda_photometric) : source_points_(source_points), source_colors_(source_colors), target_points_(target_points), target_normals_(target_normals), target_colors_(target_colors), target_color_gradient_(target_color_gradient), sqrt_lambda_geometric_(sqrt_lambda_geometric), sqrt_lambda_photometric_(sqrt_lambda_photometric){}; const Eigen::Vector3f *source_points_; const Eigen::Vector3f *source_colors_; const Eigen::Vector3f *target_points_; const Eigen::Vector3f *target_normals_; const Eigen::Vector3f *target_colors_; const Eigen::Vector3f *target_color_gradient_; const float sqrt_lambda_geometric_; const float sqrt_lambda_photometric_; __device__ float operator()(const Eigen::Vector2i &corr) const { size_t cs = corr[0]; size_t ct = corr[1]; const Eigen::Vector3f vs = source_points_[cs]; const Eigen::Vector3f vt = target_points_[ct]; const Eigen::Vector3f nt = target_normals_[ct]; Eigen::Vector3f vs_proj = vs - (vs - vt).dot(nt) * nt; float is = (source_colors_[cs](0) + source_colors_[cs](1) + source_colors_[cs](2)) / 3.0; float it = (target_colors_[ct](0) + target_colors_[ct](1) + target_colors_[ct](2)) / 3.0; const Eigen::Vector3f &dit = target_color_gradient_[ct]; float is0_proj = (dit.dot(vs_proj - vt)) + it; float residual_geometric = sqrt_lambda_geometric_ * (vs - vt).dot(nt); float residual_photometric = sqrt_lambda_photometric_ * (is - is0_proj); return residual_geometric * residual_geometric + residual_photometric * residual_photometric; } }; float TransformationEstimationForColoredICP::ComputeRMSE( const geometry::PointCloud &source, const geometry::PointCloud &target, const CorrespondenceSet &corres) const { float sqrt_lambda_geometric = sqrt(lambda_geometric_); float lambda_photometric = 1.0 - lambda_geometric_; float sqrt_lambda_photometric = sqrt(lambda_photometric); const auto &target_c = (const PointCloudForColoredICP &)target; diff_square_colored_functor func( thrust::raw_pointer_cast(source.points_.data()), thrust::raw_pointer_cast(source.colors_.data()), thrust::raw_pointer_cast(target.points_.data()), thrust::raw_pointer_cast(target.normals_.data()), thrust::raw_pointer_cast(target.colors_.data()), thrust::raw_pointer_cast(target_c.color_gradient_.data()), sqrt_lambda_geometric, sqrt_lambda_photometric); const auto err = thrust::transform_reduce( utility::exec_policy(0)->on(0), corres.begin(), corres.end(), func, 0.0f, thrust::plus<float>()); return err; }; } // namespace RegistrationResult cupoch::registration::RegistrationColoredICP( const geometry::PointCloud &source, const geometry::PointCloud &target, float max_distance, const Eigen::Matrix4f &init /* = Eigen::Matrix4f::Identity()*/, const ICPConvergenceCriteria &criteria /* = ICPConvergenceCriteria()*/, float lambda_geometric /* = 0.968*/, float det_thresh /* = 1.0e-6 */) { auto target_c = InitializePointCloudForColoredICP( target, geometry::KDTreeSearchParamRadius(max_distance * 2.0, 30)); return RegistrationICP( source, *target_c, max_distance, init, TransformationEstimationForColoredICP(lambda_geometric, det_thresh), criteria); }
the_stack
namespace pcl { namespace device { __device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short2> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_Z); } __device__ __forceinline__ float readTsdf (int x, int y, int z, pcl::gpu::tsdf_buffer buffer) const { const short2* tmp_pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]); short2* pos = const_cast<short2*> (tmp_pos); shift_tsdf_pointer(&pos, buffer); return unpack_tsdf (*pos); } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time, pcl::gpu::tsdf_buffer buffer) const { return interpolateTrilineary (origin + dir * time, buffer); } __device__ __forceinline__ float interpolateTrilineary (const float3& point, pcl::gpu::tsdf_buffer buffer) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= buffer.voxels_size.x - 1) return numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= buffer.voxels_size.y - 1) return numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= buffer.voxels_size.z - 1) return numeric_limits<float>::quiet_NaN (); /* //OLD CODE float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * a * b * c; */ //NEW CODE float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; }; float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; }; float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; }; float res = (1 - a) * ( (1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * c ) + b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * c ) ) + a * ( (1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * c ) + b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * c ) ) ; return res; } __device__ __forceinline__ void operator () (pcl::gpu::tsdf_buffer buffer) const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, buffer.voxels_size.x - 1)); g.y = max (0, min (g.y, buffer.voxels_size.y - 1)); g.z = max (0, min (g.z, buffer.voxels_size.z - 1)); float tsdf = readTsdf (g.x, g.y, g.z, buffer); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z, buffer); if (tsdf_prev < 0.f && tsdf >= 0.f) break; if (tsdf_prev >= 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step, buffer); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr, buffer); if (isnan (Ft)) break; //float Ts = time_curr - time_step * Ft/(Ftdt - Ft); float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < buffer.voxels_size.x - 2 && g.y < buffer.voxels_size.y - 2 && g.z < buffer.voxels_size.z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t, buffer); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t, buffer); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t, buffer); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t, buffer); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t, buffer); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t, buffer); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } /* for(;;) */ } }; __global__ void rayCastKernel (const RayCaster rc, pcl::gpu::tsdf_buffer buffer) { rc (buffer); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short2>& volume, const pcl::gpu::tsdf_buffer* buffer, MapArr& vmap, MapArr& nmap) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / buffer->voxels_size.x; rc.cell_size.y = volume_size.y / buffer->voxels_size.y; rc.cell_size.z = volume_size.z / buffer->voxels_size.z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); rayCastKernel<<<grid, block>>>(rc, *buffer); cudaSafeCall (cudaGetLastError ()); //cudaSafeCall(cudaDeviceSynchronize()); }
the_stack
#include <stdio.h> #include "dev_evaluate_gravity.cu" #include "n_per_cell.h" #ifdef __DEVICE_EMULATION__ #endif inline int n_norm(int n, int j) { n = ((n-1)/j) * j + j; if (n == 0) n = j; return n; } double get_time(); extern "C" { int n_alloc; int cuda_interaction_list_len; int cuda_interaction_node_len; int cuda_interaction_leaf_len; int cuda_interaction_node_list; int cuda_interaction_leaf_list; int cuda_n_node; int cuda_n_leaf; int3 *dev_interaction_list_len; int2 *dev_interaction_node_len; int2 *dev_interaction_leaf_len; int *dev_interaction_node_list; int *dev_interaction_leaf_list; int *dev_n_node; int *dev_n_leaf; void initCUDA() { // CUT_DEVICE_INIT(); } void allocateCUDAarray(void** pos, int n) { CUDA_SAFE_CALL(cudaMalloc((void**)pos, n)); CUT_CHECK_ERROR("cudaMalloc failed!\n"); } void deleteCUDAarray(void* pos) { CUDA_SAFE_CALL(cudaFree((void*)pos)); CUT_CHECK_ERROR("cudaFree failed!\n"); } void copyArrayToDevice(void* device, const void* host, int n) { CUDA_SAFE_CALL(cudaMemcpy(device, host, n, cudaMemcpyHostToDevice)); CUT_CHECK_ERROR("cudaMemcpy (host->device) failed!\n"); } void copyArrayFromDevice(void* host, const void* device, int n) { CUDA_SAFE_CALL(cudaMemcpy(host, device, n, cudaMemcpyDeviceToHost)); CUT_CHECK_ERROR("cudaMemcpy (device->host) failed!\n"); } void threadSync() { cudaThreadSynchronize(); } #define SAFE_ALLOC(what, oldv, newv) {\ if ((oldv) < (newv)) { \ if ((oldv) > 0) { \ deleteCUDAarray((void*)(what)); \ } \ allocateCUDAarray((void**)&(what), max(int(1.3*(oldv)), (int)(newv)) ); \ n_alloc++; \ oldv = max(int(1.3*(oldv)), (int)(newv)); \ } } double host_evaluate_gravity(float inv_opening_angle_in, float softening_squared_in, int n_bodies_in, float4 *bodies_pos, float4 *bodies_grav, int n_children, int4 *children, int n_nodes_in, float4 root_pos_in, float4 root_com_in, float4 *node_pos, float4 *node_com, float4 *node_Qu, float4 *node_Qd, float4 *Oct1, float4 *Oct2, float2 *Oct3, int *n_in_node, int *node_bodies_offset, int n_cells_in, float4 *cell_pos, float4 *cell_com, int *n_in_cell, int *cell_bodies_offset) { double t_begin = get_time(); bodies_pos_tex.addressMode[0] = cudaAddressModeWrap; bodies_pos_tex.addressMode[1] = cudaAddressModeWrap; bodies_pos_tex.filterMode = cudaFilterModePoint; bodies_pos_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, bodies_pos_tex, bodies_pos, n_bodies_in * sizeof(float4))); /***************************************************/ children_tex.addressMode[0] = cudaAddressModeWrap; children_tex.addressMode[1] = cudaAddressModeWrap; children_tex.filterMode = cudaFilterModePoint; children_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, children_tex, children, n_children * sizeof(int4))); /***************************************************/ node_bodies_offset_tex.addressMode[0] = cudaAddressModeWrap; node_bodies_offset_tex.addressMode[1] = cudaAddressModeWrap; node_bodies_offset_tex.filterMode = cudaFilterModePoint; node_bodies_offset_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, node_bodies_offset_tex, node_bodies_offset, n_nodes_in * sizeof(int))); cell_bodies_offset_tex.addressMode[0] = cudaAddressModeWrap; cell_bodies_offset_tex.addressMode[1] = cudaAddressModeWrap; cell_bodies_offset_tex.filterMode = cudaFilterModePoint; cell_bodies_offset_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, cell_bodies_offset_tex, cell_bodies_offset, n_cells_in * sizeof(int))); /***************************************************/ node_pos_tex.addressMode[0] = cudaAddressModeWrap; node_pos_tex.addressMode[1] = cudaAddressModeWrap; node_pos_tex.filterMode = cudaFilterModePoint; node_pos_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, node_pos_tex, node_pos, n_nodes_in * sizeof(float4))); node_com_tex.addressMode[0] = cudaAddressModeWrap; node_com_tex.addressMode[1] = cudaAddressModeWrap; node_com_tex.filterMode = cudaFilterModePoint; node_com_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, node_com_tex, node_com, n_nodes_in * sizeof(float4))); node_Qu_tex.addressMode[0] = cudaAddressModeWrap; node_Qu_tex.addressMode[1] = cudaAddressModeWrap; node_Qu_tex.filterMode = cudaFilterModePoint; node_Qu_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, node_Qu_tex, node_Qu, n_nodes_in * sizeof(float4))); node_Qd_tex.addressMode[0] = cudaAddressModeWrap; node_Qd_tex.addressMode[1] = cudaAddressModeWrap; node_Qd_tex.filterMode = cudaFilterModePoint; node_Qd_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, node_Qd_tex, node_Qd, n_nodes_in * sizeof(float4))); Oct1_tex.addressMode[0] = cudaAddressModeWrap; Oct1_tex.addressMode[1] = cudaAddressModeWrap; Oct1_tex.filterMode = cudaFilterModePoint; Oct1_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, Oct1_tex, Oct1, n_nodes_in * sizeof(float4))); Oct2_tex.addressMode[0] = cudaAddressModeWrap; Oct2_tex.addressMode[1] = cudaAddressModeWrap; Oct2_tex.filterMode = cudaFilterModePoint; Oct2_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, Oct2_tex, Oct2, n_nodes_in * sizeof(float4))); Oct3_tex.addressMode[0] = cudaAddressModeWrap; Oct3_tex.addressMode[1] = cudaAddressModeWrap; Oct3_tex.filterMode = cudaFilterModePoint; Oct3_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, Oct3_tex, Oct3, n_nodes_in * sizeof(float2))); n_in_node_tex.addressMode[0] = cudaAddressModeWrap; n_in_node_tex.addressMode[1] = cudaAddressModeWrap; n_in_node_tex.filterMode = cudaFilterModePoint; n_in_node_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, n_in_node_tex, n_in_node, n_nodes_in * sizeof(int))); /***************************************************/ cell_pos_tex.addressMode[0] = cudaAddressModeWrap; cell_pos_tex.addressMode[1] = cudaAddressModeWrap; cell_pos_tex.filterMode = cudaFilterModePoint; cell_pos_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, cell_pos_tex, cell_pos, n_cells_in * sizeof(float4))); cell_com_tex.addressMode[0] = cudaAddressModeWrap; cell_com_tex.addressMode[1] = cudaAddressModeWrap; cell_com_tex.filterMode = cudaFilterModePoint; cell_com_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, cell_com_tex, cell_com, n_cells_in * sizeof(float4))); n_in_cell_tex.addressMode[0] = cudaAddressModeWrap; n_in_cell_tex.addressMode[1] = cudaAddressModeWrap; n_in_cell_tex.filterMode = cudaFilterModePoint; n_in_cell_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, n_in_cell_tex, n_in_cell, n_cells_in * sizeof(int))); /***************************************************/ #if CUDART_VERSION < 5000 CUDA_SAFE_CALL(cudaMemcpyToSymbol("inv_opening_angle", &inv_opening_angle_in, sizeof(float), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("softening_squared", &softening_squared_in, sizeof(float), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("root_pos", &root_pos_in, sizeof(float4), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("root_com", &root_com_in, sizeof(float4), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("n_nodes", &n_nodes_in, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("n_cells", &n_cells_in, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol("n_bodies", &n_bodies_in, sizeof(int), 0, cudaMemcpyHostToDevice)); #else CUDA_SAFE_CALL(cudaMemcpyToSymbol(inv_opening_angle, &inv_opening_angle_in, sizeof(float), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(softening_squared, &softening_squared_in, sizeof(float), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(root_pos, &root_pos_in, sizeof(float4), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(root_com, &root_com_in, sizeof(float4), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(n_nodes, &n_nodes_in, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(n_cells, &n_cells_in, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(n_bodies, &n_bodies_in, sizeof(int), 0, cudaMemcpyHostToDevice)); #endif /***************************************************/ /***** building interaction list *****/ int p = 128; int n_cells_dev = n_cells_in; if (n_cells_dev < p) p = n_cells_dev; else n_cells_dev = n_norm(n_cells_dev, p); dim3 threads(p,1,1); dim3 grid(n_cells_dev/p, 1, 1); /*** computing interaction list length ***/ int3 *hst_interaction_list_len; // int3 *dev_interaction_list_len = NULL SAFE_ALLOC(dev_interaction_list_len, cuda_interaction_list_len, n_cells_in * sizeof(int3)); // allocateCUDAarray((void**)&dev_interaction_list_len, n_cells_in * sizeof(int3)); hst_interaction_list_len = (int3*)malloc(n_cells_in * sizeof(int3)); double t1 = get_time(); double dt_ilen = 0; fprintf(stderr, " compute_interaction_list_len ... "); dev_compute_interaction_list_len<<<grid, threads>>>(dev_interaction_list_len); threadSync(); fprintf(stderr, " done in %lf seconds \n", (dt_ilen = get_time() - t1)); /************************/ /*** computing offset ***/ /************************/ copyArrayFromDevice(hst_interaction_list_len, dev_interaction_list_len, n_cells_in * sizeof(int3)); int* hst_n_in_cell = (int*)malloc(n_cells_in * sizeof(int)); copyArrayFromDevice(hst_n_in_cell, n_in_cell, n_cells_in * sizeof(int)); int2 *hst_interaction_node_len; //, *dev_interaction_node_len; int2 *hst_interaction_leaf_len; //, *dev_interaction_leaf_len; hst_interaction_node_len = (int2*)malloc((n_cells_in + 1) * sizeof(int2)); hst_interaction_leaf_len = (int2*)malloc((n_cells_in + 1) * sizeof(int2)); SAFE_ALLOC(dev_interaction_node_len, cuda_interaction_node_len, n_cells_in * sizeof(int2)); SAFE_ALLOC(dev_interaction_leaf_len, cuda_interaction_leaf_len, n_cells_in * sizeof(int2)); // allocateCUDAarray((void**)&dev_interaction_node_len, n_cells_in * sizeof(int2)); // allocateCUDAarray((void**)&dev_interaction_leaf_len, n_cells_in * sizeof(int2)); long long int n_io = 0; int n_interacting_nodes_total = 0, n_interacting_leaves_total = 0; int n_interacting_nodes_max = 0; int n_interacting_leaves_max = 0; int n_blocks = 0; for (int i = 0; i < n_cells_in; i += p*NBLOCKS) { n_blocks += 1; int n_in_block = min(n_cells_in - i, p*NBLOCKS); // fprintf(stderr, " block %d n_in_block= %d cell_offset= %d\n", // n_blocks, n_in_block, i); hst_interaction_node_len[i].x = 0; hst_interaction_leaf_len[i].x = 0; int n_interacting_nodes = 0, n_interacting_leaves = 0; for (int j = 0; j < n_in_block; j++) { int3 val = hst_interaction_list_len[i+j]; n_interacting_nodes += val.x; n_interacting_leaves += val.y; n_io += val.z; hst_interaction_node_len[i+j+1].x = hst_interaction_node_len[i+j].x + val.x; hst_interaction_leaf_len[i+j+1].x = hst_interaction_leaf_len[i+j].x + val.y; hst_interaction_node_len[i+j].y = val.x; hst_interaction_leaf_len[i+j].y = val.y; } n_interacting_nodes_max = max(n_interacting_nodes_max, n_interacting_nodes); n_interacting_leaves_max = max(n_interacting_leaves_max, n_interacting_leaves); n_interacting_nodes_total += n_interacting_nodes; n_interacting_leaves_total += n_interacting_leaves; } copyArrayToDevice(dev_interaction_node_len, hst_interaction_node_len, n_cells_in * sizeof(int2)); copyArrayToDevice(dev_interaction_leaf_len, hst_interaction_leaf_len, n_cells_in * sizeof(int2)); free(hst_n_in_cell); fprintf(stderr, " *****************************************************\n"); fprintf(stderr, " n_blocks= %d n_in_block= %d\n", n_blocks, p*NBLOCKS); fprintf(stderr, " #interacting nodes= %d [max in block= %d]\n", n_interacting_nodes_total, n_interacting_nodes_max); fprintf(stderr, " #interacting leaves= %d [max in block= %d]\n", n_interacting_leaves_total, n_interacting_leaves_max); fprintf(stderr, " read+write_len= %lg GB (%lg GB/s)\n", n_io*4.0/pow(1024.0,3.0), n_io*4.0/pow(1024.0,3.0)/dt_ilen); fprintf(stderr, " *****************************************************\n"); /*********************************/ /*** building interaction list ***/ /*********************************/ // int *dev_interaction_node_list, *dev_interaction_leaf_list; // allocateCUDAarray((void**)&dev_interaction_node_list, n_interacting_nodes_max * sizeof(int)); // allocateCUDAarray((void**)&dev_interaction_leaf_list, n_interacting_leaves_max * sizeof(int)); if(n_interacting_nodes_max <= 0) n_interacting_nodes_max = 10; if(n_interacting_leaves_max <= 0) n_interacting_leaves_max = 10; SAFE_ALLOC(dev_interaction_node_list, cuda_interaction_node_list, n_interacting_nodes_max * sizeof(int)); SAFE_ALLOC(dev_interaction_leaf_list, cuda_interaction_leaf_list, n_interacting_leaves_max * sizeof(int)); int *hst_n_node; int *hst_n_leaf; SAFE_ALLOC(dev_n_node, cuda_n_node, n_bodies_in * sizeof(int)); SAFE_ALLOC(dev_n_leaf, cuda_n_leaf, n_bodies_in * sizeof(int)); // allocateCUDAarray((void**)&dev_n_node, n_bodies_in * sizeof(int)); // allocateCUDAarray((void**)&dev_n_leaf, n_bodies_in * sizeof(int)); hst_n_node = (int*)malloc(n_bodies_in * sizeof(int)); hst_n_leaf = (int*)malloc(n_bodies_in * sizeof(int)); double dt_ibuild = 0, dt_node = 0, dt_leaf = 0; int cur_block = 0; for (int i = 0; i < n_cells_in; i += p*NBLOCKS) { cur_block++; int n_in_block = min(n_cells_in - i, p*NBLOCKS); dim3 threads(p, 1, 1); dim3 grid(n_norm(n_in_block, p)/p, 1, 1); if (n_in_block < p) { threads.x = n_in_block; grid.x = 1; } fprintf(stderr, " block %d out of %d\n", cur_block, n_blocks); // fprintf(stderr, " n_in_block= %d cell_offset= %d\n", // n_in_block, i); // fprintf(stderr, "threads= [%d %d %d]\n", threads.x, threads.y, threads.z); // fprintf(stderr, " grid= [%d %d %d]\n", grid.x, grid.y, grid.z); double dt_ibuild0 = 0; t1 = get_time(); fprintf(stderr, " build_interaction_list ... "); dev_build_interaction_list<<<grid, threads>>>(i, dev_interaction_node_list, dev_interaction_node_len, dev_interaction_leaf_list, dev_interaction_leaf_len); threadSync(); dt_ibuild0 = get_time() - t1; dt_ibuild += dt_ibuild0; fprintf(stderr, " done in %lf seconds [%lf] \n", dt_ibuild0, dt_ibuild); /***************************************************/ interaction_node_tex.addressMode[0] = cudaAddressModeWrap; interaction_node_tex.addressMode[1] = cudaAddressModeWrap; interaction_node_tex.filterMode = cudaFilterModePoint; interaction_node_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, interaction_node_tex, dev_interaction_node_list, n_interacting_nodes_max * sizeof(int))); interaction_leaf_tex.addressMode[0] = cudaAddressModeWrap; interaction_leaf_tex.addressMode[1] = cudaAddressModeWrap; interaction_leaf_tex.filterMode = cudaFilterModePoint; interaction_leaf_tex.normalized = false; CUDA_SAFE_CALL(cudaBindTexture(0, interaction_leaf_tex, dev_interaction_leaf_list, n_interacting_leaves_max * sizeof(int))); /***************************************************/ int p_node = NTHREADS; int p_leaf = NCRIT; dim3 thread_node(p_node, 1, 1); dim3 thread_leaf(p_leaf, 1, 1); dim3 grid_node(n_in_block, 1, 1); dim3 grid_leaf(n_in_block, 1, 1); int shared_mem_size_leaf = 3 * p_leaf * sizeof(float4); int shared_mem_size_node = p_node * (3*sizeof(float4) + sizeof(float2)); double dt_node0 = 0; fprintf(stderr, " evaluate_gravity_node ... "); t1 = get_time(); dev_evaluate_gravity_node<<<grid_node, thread_node, shared_mem_size_node>>>(i, bodies_grav, dev_n_node, dev_interaction_node_len); threadSync(); dt_node0 = get_time() - t1; dt_node += dt_node0; fprintf(stderr, " done in %lf seconds [%lf] \n", dt_node0, dt_node); double dt_leaf0 = 0; fprintf(stderr, " evaluate_gravity_leaf ... "); t1 = get_time(); dev_evaluate_gravity_leaf<<<grid_leaf, thread_leaf, shared_mem_size_leaf>>>(i, bodies_grav, dev_n_leaf, dev_interaction_leaf_len); threadSync(); dt_leaf0 = get_time() - t1; dt_leaf += dt_leaf0; fprintf(stderr, " done in %lf seconds [%lf] \n", dt_leaf0, dt_leaf); CUDA_SAFE_CALL(cudaUnbindTexture(interaction_leaf_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(interaction_node_tex)); } copyArrayFromDevice(hst_n_node, dev_n_node, n_bodies_in * sizeof(int)); copyArrayFromDevice(hst_n_leaf, dev_n_leaf, n_bodies_in * sizeof(int)); long long n_leaf = 0, n_node = 0; for (int i = 0; i < n_bodies_in; i++) { n_node += hst_n_node[i]; n_leaf += hst_n_leaf[i]; } double flops_per_node = 57; double flops_per_leaf = 21; double flops_per_node1 = 57 + 16; double flops_per_leaf1 = 21 + 16; #ifdef OCTUPOLE flops_per_node += 108; flops_per_node += 108; flops_per_node1 += 108; flops_per_node1 += 108; #endif fprintf(stderr, " *****************************************************\n"); fprintf(stderr, " #interacting nodes= %d [max in block= %d]\n", n_interacting_nodes_total, n_interacting_nodes_max); fprintf(stderr, " #interacting leaves= %d [max in block= %d]\n", n_interacting_leaves_total, n_interacting_leaves_max); fprintf(stderr, " read+write_len= %lg GB (%lg GB/s)\n", n_io*4.0/pow(1024.0,3.0), n_io*4.0/pow(1024.0,3.0)/dt_ilen); n_io += n_interacting_nodes_total + n_interacting_leaves_total; fprintf(stderr, " read+write_bld= %lg GB (%lg GB/s)\n", n_io*4.0/pow(1024.0,3.0), n_io*4.0/pow(1024.0,3.0)/dt_ibuild); fprintf(stderr, " interaction statistics: \n"); fprintf(stderr, " n_nodes= %ld (%lg [%lg] GLFOPS)\n", n_node, n_node*flops_per_node/dt_node/1e9, n_node*flops_per_node1/dt_node/1e9); fprintf(stderr, " n_leaves= %ld (%lg [%lg] GFLOPS)\n", n_leaf, n_leaf*flops_per_leaf/dt_leaf/1e9, n_leaf*flops_per_leaf1/dt_leaf/1e9); fprintf(stderr, " *****************************************************\n"); /***************************************************/ CUDA_SAFE_CALL(cudaUnbindTexture(n_in_cell_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(cell_pos_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(cell_com_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(n_in_node_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(node_Qu_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(node_Qd_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(Oct1_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(Oct2_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(Oct3_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(node_com_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(node_pos_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(cell_bodies_offset_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(node_bodies_offset_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(children_tex)); CUDA_SAFE_CALL(cudaUnbindTexture(bodies_pos_tex)); /***************************************************/ free(hst_n_node); free(hst_n_leaf); // deleteCUDAarray((void*)dev_n_node); // deleteCUDAarray((void*)dev_n_leaf); free(hst_interaction_list_len); // deleteCUDAarray((void*)dev_interaction_list_len); free(hst_interaction_node_len); free(hst_interaction_leaf_len); // deleteCUDAarray((void*)dev_interaction_node_len); // deleteCUDAarray((void*)dev_interaction_leaf_len); // deleteCUDAarray((void*)dev_interaction_node_list); // deleteCUDAarray((void*)dev_interaction_leaf_list); return get_time() - t_begin; } };
the_stack
template <typename FloatT> AdamTransformGradientUpdater<FloatT>::AdamTransformGradientUpdater( const size_t source_vector_dim, const size_t target_vector_dim, Streams* const streams, const FloatT beta1, const FloatT beta2, const FloatT epsilon) : TransformGradientUpdater<FloatT>( epsilon, {new TransformStorage<FloatT>(source_vector_dim, target_vector_dim, streams), /* m_prev */ new TransformStorage<FloatT>(source_vector_dim, target_vector_dim, streams) /* v_prev */}), beta1_(beta1), beta2_(beta2), t_(1) {} template <typename FloatT> void adam_update(const device_matrix<FloatT>& m, const device_matrix<FloatT>& v, device_matrix<FloatT>* grad, const FloatT bias_correction, const FloatT epsilon) { CHECK_DIMENSIONS_EQUAL(m, *grad); CHECK_DIMENSIONS_EQUAL(v, *grad); thrust::transform( thrust::make_transform_iterator( m.begin(), func::scale_by_constant<FloatT>(bias_correction)), thrust::make_transform_iterator( m.end(), func::scale_by_constant<FloatT>(bias_correction)), thrust::make_transform_iterator( thrust::make_transform_iterator( v.begin(), func::sqrt<FloatT>()), func::add_constant<FloatT>(epsilon)), grad->begin(), thrust::divides<FloatT>()); } template <typename FloatT> void AdamTransformGradientUpdater<FloatT>::update( TransformStorage<FloatT>* const storage, typename TransformStorage<FloatT>::GradientType* const gradient_desc, const FloatT learning_rate, const FloatT scaled_regularization_lambda, Streams* const streams) { CHECK(storage != nullptr); apply_regularization( streams->next(), scaled_regularization_lambda, storage, gradient_desc); // Update m_t. dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->update( *gradient_desc, 1.0 - beta1_, /* learning_rate */ 1.0, /* regularization_lambda */ streams, func::identity<FloatT>() /* update_transform_op */); // Update v_t. dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->update( *gradient_desc, 1.0 - beta2_, /* learning_rate */ 1.0, /* regularization_lambda */ streams, func::square<FloatT>() /* update_transform_op */); device_matrix<FloatT>& grad_transform = std::get<0>(*gradient_desc); device_matrix<FloatT>& grad_bias = std::get<1>(*gradient_desc); device_matrix<FloatT>* m_transform; device_matrix<FloatT>* m_bias; device_matrix<FloatT>* v_transform; device_matrix<FloatT>* v_bias; std::tie(m_transform, m_bias) = dynamic_cast<TransformStorage<FloatT>*>(this->storages_[0].get())->get(); std::tie(v_transform, v_bias) = dynamic_cast<TransformStorage<FloatT>*>(this->storages_[1].get())->get(); const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_)); adam_update(*m_transform, *v_transform, &grad_transform, bias_correction, this->epsilon_); adam_update(*m_bias, *v_bias, &grad_bias, bias_correction, this->epsilon_); t_ += 1; CHECK_MATRIX(grad_transform); CHECK_MATRIX(grad_bias); return storage->update( *gradient_desc, learning_rate, static_cast<FloatT>(0.0) /* regularization_lambda */, streams); } #define DENSE_UPDATE_DENSE_VARIANCE AdamConf::DENSE_UPDATE_DENSE_VARIANCE #define DENSE_UPDATE AdamConf::DENSE_UPDATE #define SPARSE AdamConf::SPARSE template <typename FloatT, typename IdxType> AdamRepresentationsGradientUpdater<FloatT, IdxType>::AdamRepresentationsGradientUpdater( const size_t num_objects, const size_t repr_size, const AdamConf& conf, Streams* const streams, const FloatT beta1, const FloatT beta2, const FloatT epsilon) : RepresentationsGradientUpdater<FloatT, IdxType>( epsilon, {new RepresentationsStorage<FloatT, IdxType>(num_objects, repr_size, streams), /* m_prev */ new RepresentationsStorage<FloatT, IdxType>(num_objects, conf.mode() < DENSE_UPDATE_DENSE_VARIANCE ? 1 : repr_size /* repr_size */, streams) /* v_prev */}), conf_(conf), beta1_(beta1), beta2_(beta2), t_(1) {} template <typename FloatT, typename IdxType> __global__ void adam_sparse_update_kernel(const size_t window_size, const IdxType* const indices, const FloatT* const m, const FloatT* const v, FloatT* const grad, const FloatT bias_correction, const FloatT epsilon) { FloatT agg_m = 0.0; FloatT agg_v = 0.0; for (IdxType w = 0; w < window_size; ++w) { agg_m += m[indices[blockIdx.x * window_size + w] * blockDim.x + threadIdx.x]; agg_v += v[indices[blockIdx.x * window_size + w]]; } agg_m /= window_size; agg_v /= window_size; grad[blockIdx.x * blockDim.x + threadIdx.x] = bias_correction * agg_m / (::sqrt(agg_v) + epsilon); } template <typename FloatT, typename IdxType> void AdamRepresentationsGradientUpdater<FloatT, IdxType>::update( RepresentationsStorage<FloatT, IdxType>* const storage, typename RepresentationsStorage<FloatT, IdxType>::GradientType* const gradient_descs, const FloatT learning_rate, const FloatT scaled_regularization_lambda, Streams* const streams) { CHECK(storage != nullptr); const bool use_sgd_regularization = (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE); if (use_sgd_regularization) { LOG_IF_EVERY_N(WARNING, scaled_regularization_lambda > 0.0, 10000) << "Sparse variants of Adam currently do not correctly implement l2 regularization."; } RepresentationsStorage<FloatT, IdxType>* const m_storage = dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>( this->storages_[0].get()); RepresentationsStorage<FloatT, IdxType>* const v_storage = dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>( this->storages_[1].get()); device_matrix<FloatT>* m = m_storage->get(); device_matrix<FloatT>* v = v_storage->get(); // Invariants. for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) { device_matrix<FloatT>& gradient = std::get<0>(gradient_desc); const size_t repr_size = gradient.getRows(); const size_t num_grads = gradient.getCols(); CHECK_EQ((dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>( this->storages_[0].get())->get()->getRows()), repr_size); const device_matrix<IdxType>& indices = std::get<1>(gradient_desc); const size_t window_size = std::get<2>(gradient_desc); CHECK_EQ(indices.getCols() % window_size, 0); } // Update m_t. dynamic_cast<RepresentationsStorage<FloatT, IdxType>*>(this->storages_[0].get())->update( *gradient_descs, 1.0 - beta1_, /* learning_rate */ 1.0, /* regularization_lambda */ streams); // Add regularization within m_t. if (!use_sgd_regularization) { // m_t = beta1 * m_{t-1} + (1.0 - beta1) grad // with grad = grad - lambda * params // m_t = beta1 * m_{t-1} + (1.0 - beta1) grad - (1.0 - beta1) * lambda * params apply_regularization( streams->next(), static_cast<FloatT>((1.0 - beta1_) * scaled_regularization_lambda), storage->get(), m); } // Update v_t. if (conf_.mode() < DENSE_UPDATE_DENSE_VARIANCE) { std::vector<std::unique_ptr<device_matrix<FloatT>>> matrix_ptrs; // For memory management. std::vector<typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType> average_squared_gradients; for (const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc : *gradient_descs) { device_matrix<FloatT>& gradient = std::get<0>(gradient_desc); matrix_ptrs.push_back( std::unique_ptr<device_matrix<FloatT>>( new device_matrix<FloatT>( 1, /* num_rows */ gradient.getCols(), gradient.getStream()))); device_matrix<FloatT>* const average_squared_gradient = matrix_ptrs.back().get(); reduce_axis<FloatT, func::square<FloatT>>( average_squared_gradient->getStream(), FIRST_AXIS, gradient, average_squared_gradient); average_squared_gradient->scale( average_squared_gradient->getStream(), exp(-log(gradient.getRows()))); average_squared_gradients.push_back( std::forward_as_tuple(*average_squared_gradient, std::get<1>(gradient_desc), std::get<2>(gradient_desc), std::get<3>(gradient_desc))); } v_storage->update(average_squared_gradients, 1.0 - beta2_, /* learning_rate */ 1.0, /* regularization_lambda */ streams); } else { CHECK(!use_sgd_regularization); std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> agg_repr_grad( new RepresentationsStorage<FloatT, IdxType>( v_storage->num_objects(), v_storage->repr_size(), DefaultStream::get())); agg_repr_grad->initialize_with_null(); agg_repr_grad->update(*gradient_descs, 1.0, /* learning_rate */ 0.0, /* scaled_regularization_lambda */ streams); apply_regularization( streams->next(), scaled_regularization_lambda, storage->get(), agg_repr_grad->get()); agg_repr_grad->get()->square(agg_repr_grad->get()->getStream()); // g_t^2 v_storage->update_dense(merge_streams(v->getStream(), agg_repr_grad->get()->getStream()), begin(*agg_repr_grad->get()), 1.0 - beta2_, /* learning_rate */ 1.0 /* regularization_lambda */); } // Compute update. const FloatT bias_correction = sqrt(1.0 - pow(beta2_, t_)) / (1.0 - pow(beta1_, t_)); t_ += 1; if (conf_.mode() >= DENSE_UPDATE) { const cudaStream_t m_v_stream = merge_streams( m->getStream(), v->getStream()); if (conf_.mode() == DENSE_UPDATE) { return storage->update_dense( m_v_stream, thrust::make_transform_iterator( thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( begin(*m), thrust::make_transform_iterator( thrust::make_transform_iterator( thrust::make_permutation_iterator( begin(*v), /* elements */ make_matrix_column_iterator(*m) /* map */), func::sqrt<FloatT>()), func::add_constant<FloatT>(this->epsilon_)))), func::divides_tuple<FloatT>()), func::scale_by_constant<FloatT>(bias_correction)), learning_rate, scaled_regularization_lambda); } else if (conf_.mode() == DENSE_UPDATE_DENSE_VARIANCE) { return storage->update_dense( m_v_stream, thrust::make_transform_iterator( thrust::make_transform_iterator( thrust::make_zip_iterator( thrust::make_tuple( begin(*m), thrust::make_transform_iterator( thrust::make_transform_iterator( begin(*v), /* elements */ func::sqrt<FloatT>()), func::add_constant<FloatT>(this->epsilon_)))), func::divides_tuple<FloatT>()), func::scale_by_constant<FloatT>(bias_correction)), learning_rate, 0.0 /* scaled_regularzation_lambda */); } else { LOG(FATAL) << "Invalid mode configuration."; } } else { // This is a variant of the sparse implementation of Adam. // // Statistics are kept on a per-representation level, but updates are averaged over // all objects in one window. // // The true sparse algorithm would track statistics per representation, but not // spread the updates over all objects in one window. It would simply load the // right update for every object in the batch. However, this would require // deduplicating (i.e., sorting) the object indices, resizing the indices and update // tensors and then copying. This is probably more expensive than simply computing the full // update, as this avoids the deduplication step (albeit more expensive memory-wise, // depending on the batch size, corpus size and the distribution of instances). // // TODO(cvangysel): implement an option to compute the full gradient. CHECK_EQ(gradient_descs->size(), 1) << "Sparse Adam currently does not implement multiple gradients."; const typename RepresentationsStorage<FloatT, IdxType>::SingleGradientType& gradient_desc = gradient_descs->front(); device_matrix<FloatT>& gradient = std::get<0>(gradient_desc); const size_t repr_size = gradient.getRows(); const size_t num_grads = gradient.getCols(); const device_matrix<IdxType>& indices = std::get<1>(gradient_desc); const size_t window_size = std::get<2>(gradient_desc); LAUNCH_KERNEL( adam_sparse_update_kernel<<<num_grads, /* num_blocks */ repr_size, /* threads_per_block */ 0, merge_streams( m->getStream(), v->getStream())>>>( window_size, indices.getData(), m->getData(), v->getData(), gradient.getData(), bias_correction, this->epsilon_)); CHECK_MATRIX(gradient); return storage->update( *gradient_descs, learning_rate, use_sgd_regularization ? scaled_regularization_lambda : static_cast<FloatT>(0.0), /* scaled_regularization_lambda */ streams); } } // Explicit instantiations. template class AdamTransformGradientUpdater<FLOATING_POINT_TYPE>; template class AdamRepresentationsGradientUpdater<FLOATING_POINT_TYPE, int32>;
the_stack
#include "_reg_localTransformation_gpu.h" #include "_reg_localTransformation_kernels.cu" /* *************************************************************** */ /* *************************************************************** */ void reg_spline_getDeformationField_gpu(nifti_image *controlPointImage, nifti_image *reference, float4 **controlPointImageArray_d, float4 **positionFieldImageArray_d, int **mask_d, int activeVoxelNumber, bool bspline) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int voxelNumber = reference->nx * reference->ny * reference->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(reference->nx, reference->ny, reference->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int useBSpline = static_cast<int>(bspline); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / reference->dx, controlPointImage->dy / reference->dy, controlPointImage->dz / reference->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_UseBSpline,&useBSpline,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))) if(reference->nz>1){ const unsigned int Grid_reg_spline_getDeformationField3D = (unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField3D))); dim3 G1(Grid_reg_spline_getDeformationField3D,Grid_reg_spline_getDeformationField3D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField3D,1,1); // 8 floats of shared memory are allocated per thread reg_spline_getDeformationField3D <<< G1, B1, NR_BLOCK->Block_reg_spline_getDeformationField3D*8*sizeof(float) >>> (*positionFieldImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_spline_getDeformationField2D = (unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField2D))); dim3 G1(Grid_reg_spline_getDeformationField2D,Grid_reg_spline_getDeformationField2D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField2D,1,1); // 4 floats of shared memory are allocated per thread reg_spline_getDeformationField2D <<< G1, B1, NR_BLOCK->Block_reg_spline_getDeformationField2D*4*sizeof(float) >>> (*positionFieldImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)) return; } /* *************************************************************** */ /* *************************************************************** */ float reg_spline_approxBendingEnergy_gpu(nifti_image *controlPointImage, float4 **controlPointImageArray_d) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; if(controlPointImage->nz>1){ NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1); reg_spline_getApproxSecondDerivatives3D <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 3*controlPointGridMem)) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1); reg_spline_getApproxSecondDerivatives2D <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) // Compute the bending energy from the second derivatives float *penaltyTerm_d; NR_CUDA_SAFE_CALL(cudaMalloc(&penaltyTerm_d, controlPointNumber*sizeof(float))) if(controlPointImage->nz>1){ NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_reg_spline_ApproxBendingEnergy = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D))); dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1); dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D,1,1); reg_spline_getApproxBendingEnergy3D_kernel <<< G2, B2 >>>(penaltyTerm_d); NR_CUDA_CHECK_KERNEL(G2,B2) } else{ NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 3*controlPointGridMem)) const unsigned int Grid_reg_spline_ApproxBendingEnergy = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D))); dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1); dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D,1,1); reg_spline_getApproxBendingEnergy2D_kernel <<< G2, B2 >>>(penaltyTerm_d); NR_CUDA_CHECK_KERNEL(G2,B2) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d)) // Compute the mean bending energy value double penaltyValue=reg_sumReduction_gpu(penaltyTerm_d,controlPointNumber); NR_CUDA_SAFE_CALL(cudaFree(penaltyTerm_d)) return (float)(penaltyValue/(double)controlPointImage->nvox); } /* *************************************************************** */ /* *************************************************************** */ void reg_spline_approxBendingEnergyGradient_gpu(nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeGradientArray_d, float bendingEnergyWeight) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; if(controlPointImage->nz>1){ NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1); reg_spline_getApproxSecondDerivatives3D <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 3*controlPointNumber*sizeof(float4))) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1); reg_spline_getApproxSecondDerivatives2D <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) // Compute the gradient bendingEnergyWeight *= 1.f / (float)controlPointNumber; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&bendingEnergyWeight,sizeof(float))) if(controlPointImage->nz>1){ NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D))); dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1); dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D,1,1); reg_spline_getApproxBendingEnergyGradient3D_kernel <<< G2, B2 >>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G2,B2) } else{ NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 3*controlPointNumber*sizeof(float4))) const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D))); dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1); dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D,1,1); reg_spline_getApproxBendingEnergyGradient2D_kernel <<< G2, B2 >>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G2,B2) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d)) return; } /* *************************************************************** */ /* *************************************************************** */ void reg_spline_ComputeApproxJacobianValues(nifti_image *controlPointImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorientation; if(controlPointImage->sform_code>0) reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz); else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz); float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // The Jacobian matrix is computed for every control point if(controlPointImage->nz>1){ const unsigned int Grid_reg_spline_getApproxJacobianValues3D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D))); dim3 G1(Grid_reg_spline_getApproxJacobianValues3D,Grid_reg_spline_getApproxJacobianValues3D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D,1,1); reg_spline_getApproxJacobianValues3D_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_spline_getApproxJacobianValues2D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D))); dim3 G1(Grid_reg_spline_getApproxJacobianValues2D,Grid_reg_spline_getApproxJacobianValues2D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D,1,1); reg_spline_getApproxJacobianValues2D_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) } /* *************************************************************** */ void reg_spline_ComputeJacobianValues(nifti_image *controlPointImage, nifti_image *referenceImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorientation; if(controlPointImage->sform_code>0) reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz); else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz); float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) // The Jacobian matrix is computed for every voxel if(controlPointImage->nz>1){ const unsigned int Grid_reg_spline_getJacobianValues3D = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues3D))); dim3 G1(Grid_reg_spline_getJacobianValues3D,Grid_reg_spline_getJacobianValues3D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues3D,1,1); // 8 floats of shared memory are allocated per thread reg_spline_getJacobianValues3D_kernel <<< G1, B1, NR_BLOCK->Block_reg_spline_getJacobianValues3D*8*sizeof(float)>>> (*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_spline_getJacobianValues2D = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues2D))); dim3 G1(Grid_reg_spline_getJacobianValues2D,Grid_reg_spline_getJacobianValues2D,1); dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues2D,1,1); reg_spline_getJacobianValues2D_kernel <<< G1, B1>>> (*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) } /* *************************************************************** */ /* *************************************************************** */ double reg_spline_getJacobianPenaltyTerm_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx ) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2); if(controlPointImage->nz>1){ jacSum *= controlPointImage->nz-2; // Allocate array for 3x3 matrices NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) } else{ // Allocate array for 2x2 matrices NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float))) } NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; jacSum=jacNumber; if(controlPointImage->nz>1){ // Allocate array for 3x3 matrices NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) } else{ // Allocate array for 2x2 matrices NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float))) } NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) // The Jacobian determinant are squared and logged (might not be english but will do) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) const unsigned int Grid_reg_spline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues))); dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1); dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1); reg_spline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) // Perform the reduction double penaltyTermValue = reg_sumReduction_gpu(jacobianDet_d,jacNumber); NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) return penaltyTermValue/jacSum; } /* *************************************************************** */ void reg_spline_getJacobianPenaltyTermGradient_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeGradientArray_d, float jacobianWeight, bool approx) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; if(controlPointImage->nz>1) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) else NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; if(controlPointImage->nz>1) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) else NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorientation; if(controlPointImage->sform_code>0) reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk); else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk); float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) if(controlPointImage->nz>1) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) else NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 4*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) float3 weight=make_float3( referenceImage->dx*jacobianWeight / ((float)jacNumber*controlPointImage->dx), referenceImage->dy*jacobianWeight / ((float)jacNumber*controlPointImage->dy), referenceImage->dz*jacobianWeight / ((float)jacNumber*controlPointImage->dz)); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight3,&weight,sizeof(float3))) if(approx){ if(controlPointImage->nz>1){ const unsigned int Grid_reg_spline_computeApproxJacGradient3D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D))); dim3 G1(Grid_reg_spline_computeApproxJacGradient3D,Grid_reg_spline_computeApproxJacGradient3D,1); dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D,1,1); reg_spline_computeApproxJacGradient3D_kernel<<< G1, B1>>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_spline_computeApproxJacGradient2D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D))); dim3 G1(Grid_reg_spline_computeApproxJacGradient2D,Grid_reg_spline_computeApproxJacGradient2D,1); dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D,1,1); reg_spline_computeApproxJacGradient2D_kernel<<< G1, B1>>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) if(controlPointImage->nz>1){ const unsigned int Grid_reg_spline_computeJacGradient3D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient3D))); dim3 G1(Grid_reg_spline_computeJacGradient3D,Grid_reg_spline_computeJacGradient3D,1); dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient3D,1,1); reg_spline_computeJacGradient3D_kernel<<< G1, B1>>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_spline_computeJacGradient2D = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient2D))); dim3 G1(Grid_reg_spline_computeJacGradient2D,Grid_reg_spline_computeJacGradient2D,1); dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient2D,1,1); reg_spline_computeJacGradient2D_kernel<<< G1, B1>>>(*nodeGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } } NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) } /* *************************************************************** */ double reg_spline_correctFolding_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2); NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacSum=jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_spline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Check if the Jacobian determinant average NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) float *jacobianDet2_d; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet2_d,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet2_d,jacobianDet_d,jacNumber*sizeof(float),cudaMemcpyDeviceToDevice)) const unsigned int Grid_reg_spline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues))); dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1); dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1); reg_spline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet2_d); NR_CUDA_CHECK_KERNEL(G1,B1) float *jacobianDet_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&jacobianDet_h,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet_h,jacobianDet2_d, jacNumber*sizeof(float), cudaMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet2_d)) double penaltyTermValue=0.; for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i]; NR_CUDA_SAFE_CALL(cudaFreeHost(jacobianDet_h)) penaltyTermValue /= jacSum; if(penaltyTermValue==penaltyTermValue){ NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) return penaltyTermValue; } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorientation; if(controlPointImage->sform_code>0) reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk); else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk); float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) if(approx){ const unsigned int Grid_reg_spline_approxCorrectFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D))); dim3 G1(Grid_reg_spline_approxCorrectFolding,Grid_reg_spline_approxCorrectFolding,1); dim3 B1(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D,1,1); reg_spline_approxCorrectFolding3D_kernel<<< G1, B1>>>(*controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) const unsigned int Grid_reg_spline_correctFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_correctFolding3D))); dim3 G1(Grid_reg_spline_correctFolding,Grid_reg_spline_correctFolding,1); dim3 B1(NR_BLOCK->Block_reg_spline_correctFolding3D,1,1); reg_spline_correctFolding3D_kernel<<< G1, B1>>>(*controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) return std::numeric_limits<double>::quiet_NaN(); } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFromDisplacement_gpu( nifti_image *image, float4 **imageArray_d) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDeformationFromDisplacement = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDeformationFromDisplacement))); dim3 G1(Grid_reg_getDeformationFromDisplacement,Grid_reg_getDeformationFromDisplacement,1); dim3 B1(NR_BLOCK->Block_reg_getDeformationFromDisplacement,1,1); reg_getDeformationFromDisplacement3D_kernel<<< G1, B1>>>(*imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDisplacementFromDeformation_gpu( nifti_image *image, float4 **imageArray_d) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDisplacementFromDeformation = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDisplacementFromDeformation))); dim3 G1(Grid_reg_getDisplacementFromDeformation,Grid_reg_getDisplacementFromDeformation,1); dim3 B1(NR_BLOCK->Block_reg_getDisplacementFromDeformation,1,1); reg_getDisplacementFromDeformation3D_kernel<<< G1, B1>>>(*imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h, nifti_image *def_h, float4 **cpp_gpu, float4 **def_gpu) { const int voxelNumber = def_h->nx * def_h->ny * def_h->nz; // Create a mask array where no voxel are excluded int *mask_gpu=NULL; NR_CUDA_SAFE_CALL(cudaMalloc(&mask_gpu, voxelNumber*sizeof(int))) reg_fillMaskArray_gpu(voxelNumber,&mask_gpu); // Define some variables for the deformation fields float4 *tempDef_gpu=NULL; NR_CUDA_SAFE_CALL(cudaMalloc(&tempDef_gpu,voxelNumber*sizeof(float4))) // The deformation field is computed reg_spline_getDeformationField_gpu(cpp_h, def_h, cpp_gpu, def_gpu, &mask_gpu, voxelNumber, true); // non-interpolant spline are used // The deformation field is converted into a displacement field reg_getDisplacementFromDeformation_gpu(def_h,def_gpu); // Scaling of the deformation field float scalingValue = pow(2.0f,fabs(cpp_h->intent_p1)); if(cpp_h->intent_p1<0) // backward deformation field is scaled down reg_multiplyValue_gpu(voxelNumber, def_gpu, -1.f/scalingValue); else // forward deformation field is scaled down reg_multiplyValue_gpu(voxelNumber, def_gpu, 1.f/scalingValue); // The displacement field is converted back into a deformation field reg_getDeformationFromDisplacement_gpu(def_h,def_gpu); // The deformation field is squared unsigned int squaringNumber = (unsigned int)fabs(cpp_h->intent_p1); for(unsigned int i=0;i<squaringNumber;++i){ // The deformation field arrays are updated NR_CUDA_SAFE_CALL(cudaMemcpy(tempDef_gpu,*def_gpu,voxelNumber*sizeof(float4),cudaMemcpyDeviceToDevice)) // The deformation fields are composed reg_defField_compose_gpu(def_h, &tempDef_gpu, def_gpu, &mask_gpu, voxelNumber); } NR_CUDA_SAFE_CALL(cudaFree(tempDef_gpu)) NR_CUDA_SAFE_CALL(cudaFree(mask_gpu)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_compose_gpu(nifti_image *def, float4 **def_gpu, float4 **defOut_gpu, int **mask_gpu, int activeVoxel) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int voxelNumber=def->nx*def->ny*def->nz; // Bind the qform or sform mat44 temp_mat=def->qto_ijk; if(def->sform_code>0) temp_mat=def->sto_ijk; float4 temp; temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) temp_mat=def->qto_xyz; if(def->sform_code>0) temp_mat=def->sto_xyz; temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2c,&temp,sizeof(float4))) const int3 referenceImageDim=make_int3(def->nx,def->ny,def->nz); NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDeformationTexture,*def_gpu,activeVoxel*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,maskTexture,*mask_gpu,activeVoxel*sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) if(def->nz>1){ const unsigned int Grid_reg_defField_compose3D = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose3D))); dim3 G1(Grid_reg_defField_compose3D,Grid_reg_defField_compose3D,1); dim3 B1(NR_BLOCK->Block_reg_defField_compose3D,1,1); reg_defField_compose3D_kernel<<< G1, B1>>>(*defOut_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_defField_compose2D = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose2D))); dim3 G1(Grid_reg_defField_compose2D,Grid_reg_defField_compose2D,1); dim3 B1(NR_BLOCK->Block_reg_defField_compose2D,1,1); reg_defField_compose2D_kernel<<< G1, B1>>>(*defOut_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDeformationTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_getJacobianMatrix_gpu(nifti_image *deformationField, float4 **deformationField_gpu, float **jacobianMatrices_gpu) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int3 referenceDim=make_int3(deformationField->nx,deformationField->ny,deformationField->nz); const float3 referenceSpacing=make_float3(deformationField->dx,deformationField->dy,deformationField->dz); const int voxelNumber = referenceDim.x*referenceDim.y*referenceDim.z; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceSpacing,&referenceSpacing,sizeof(float3))) mat33 reorientation; if(deformationField->sform_code>0) reorientation=reg_mat44_to_mat33(&deformationField->sto_xyz); else reorientation=reg_mat44_to_mat33(&deformationField->qto_xyz); float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDeformationTexture,*deformationField_gpu,voxelNumber*sizeof(float4))) const unsigned int Grid_reg_defField_getJacobianMatrix = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_getJacobianMatrix))); dim3 G1(Grid_reg_defField_getJacobianMatrix,Grid_reg_defField_getJacobianMatrix,1); dim3 B1(NR_BLOCK->Block_reg_defField_getJacobianMatrix); reg_defField_getJacobianMatrix3D_kernel<<<G1,B1>>>(*jacobianMatrices_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDeformationTexture)) } /* *************************************************************** */ /* *************************************************************** */ #endif
the_stack
#include <glm/glm.hpp> #include <GL/glut.h> // Thrust Dependencies #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> // Voxelpipe Dependency #include <voxelpipe/voxelpipe.h> // Octree-SLAM Dependencies #include <octree_slam/timing_utils.h> #include <octree_slam/world/voxelization/voxelization.h> #include <octree_slam/cuda_common_kernels.h> namespace octree_slam { namespace voxelization { //Declare voxelization resolution #define GRID_RES 8 #define TILE_SIZE 3 __host__ __device__ int log_N() { return GRID_RES; } __host__ __device__ int log_T() { return TILE_SIZE; } //N is the total number of voxels (per dimension) __host__ __device__ int N() { return 1 << log_N(); } //M is the total number of tiles (per dimension) __host__ __device__ int M() { return 1 << (log_N() - log_T()); } //T is the tile size - voxels per tile (per dimension) __host__ __device__ int T() { return 1 << log_T(); } voxelpipe::FRContext<GRID_RES, TILE_SIZE>* context; bool first_time = true; //Utility function for computing voxel sizes __device__ void getSizes(const glm::vec3& bbox0, const glm::vec3& bbox1, glm::vec3& t_d, glm::vec3& p_d) { //Compute tile/grid sizes t_d = glm::vec3((bbox1.x - bbox0.x) / float(M()), (bbox1.y - bbox0.y) / float(M()), (bbox1.z - bbox0.z) / float(M())); p_d = glm::vec3(t_d.x / float(T()), t_d.y / float(T()), t_d.z / float(T())); } __device__ glm::vec3 getCenterFromIndex(int idx, const glm::vec3& bbox0, const glm::vec3& bbox1) { glm::vec3 p_d, t_d; getSizes(bbox0, bbox1, t_d, p_d); int T3 = T()*T()*T(); int tile_num = idx / T3; int pix_num = idx % T3; glm::vec3 cent; int tz = tile_num / (M()*M()) % M(); int pz = pix_num / (T()*T()) % T(); int ty = tile_num / M() % M(); int py = pix_num / T() % T(); int tx = tile_num % M(); int px = pix_num % T(); cent.x = bbox0.x + tx*t_d.x + px*p_d.x + p_d.x / 2.0f; cent.y = bbox0.y + ty*t_d.y + py*p_d.y + p_d.y / 2.0f; cent.z = bbox0.z + tz*t_d.z + pz*p_d.z + p_d.z / 2.0f; return cent; } __host__ float computeScale(const glm::vec3& bbox0, const glm::vec3& bbox1) { return (bbox1.x - bbox0.x)/float(N())/2.0f; } struct ColorShader { glm::vec3* texture; int tex_width; int tex_height; float* texcoord; int texcoord_size; __device__ float shade( const int tri_id, const float4 v0, const float4 v1, const float4 v2, const float3 n, const float bary0, const float bary1, const int3 xyz) const { //If there is no texture, just return green if (tex_width == 0) { return __int_as_float((255 << 8) + (127 << 24)); } //If there are no texcoordinates, just return the first value in the texture if (texcoord_size == 0) { int r = (int)(texture[0].r * 255.0); int g = (int)(texture[0].g * 255.0); int b = (int)(texture[0].b * 255.0); return __int_as_float(r+(g << 8) + (b << 16) + (127 << 24)); } //Get the texture coordinates from the triangle id int t1_x = texcoord[6 * tri_id] * tex_width; int t1_y = texcoord[6 * tri_id + 1] * tex_height; int t2_x = texcoord[6 * tri_id + 2] * tex_width; int t2_y = texcoord[6 * tri_id + 3] * tex_height; int t3_x = texcoord[6 * tri_id + 4] * tex_width; int t3_y = texcoord[6 * tri_id + 5] * tex_height; //Get the colors from the texture at these vertices glm::vec3 c1 = texture[t1_y * tex_width + t1_x]; glm::vec3 c2 = texture[t2_y * tex_width + t2_x]; glm::vec3 c3 = texture[t3_y * tex_width + t3_x]; //TODO: Interpolate using barycentric coordinates glm::vec3 color = c1; //Compute rgb components int r = (int) (clamp(color.r, 0.0f, 1.0f) * 255.0f); int g = (int) (clamp(color.g, 0.0f, 1.0f) * 255.0f); int b = (int) (clamp(color.b, 0.0f, 1.0f) * 255.0f); //Compact int val = r + (g << 8) + (b << 16) + (127 << 24); return __int_as_float(val); } }; __global__ void getOccupiedVoxels(void* fb, int M, int T, int* voxels) { int T3 = T*T*T; int M3 = M*M*M; int pix_num = (blockIdx.x * 256 % T3) + threadIdx.x; int tile_num = blockIdx.x * 256 / T3; if (pix_num < T3 && tile_num < M3) { //TODO: Is there any benefit in making this shared? float* tile; bool is_occupied; tile = (float*)fb + tile_num*T3; int alpha = __float_as_int(tile[pix_num]) >> 24; is_occupied = alpha > 0; if (is_occupied) { voxels[tile_num*T3 + pix_num] = tile_num*T3 + pix_num; } else { voxels[tile_num*T3 + pix_num] = -1; } } } //Thrust predicate for removal of empty voxels struct check_voxel { __host__ __device__ bool operator() (const int& c) { return (c != -1); } }; __global__ void extractValues(void* fb, int* voxels, int num_voxels, int* values) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num_voxels) { //TODO: Make this support other storage_type's besides int32 float* tile = (float*)fb; values[index] = __float_as_int(tile[voxels[index]]); } } __global__ void createCubeMesh(const glm::vec4* voxels, const glm::vec4* values, const float scale_factor, const int num_voxels, float* cube_vbo, int cube_vbosize, int* cube_ibo, int cube_ibosize, float* cube_nbo, float* out_vbo, int* out_ibo, float* out_nbo, float* out_cbo) { //Get the index for the thread int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < num_voxels) { int vbo_offset = idx * cube_vbosize; int ibo_offset = idx * cube_ibosize; glm::vec4 center = voxels[idx]; glm::vec4 color = values[idx]; for (int i = 0; i < cube_vbosize; i++) { if (i % 3 == 0) { out_vbo[vbo_offset + i] = cube_vbo[i] * scale_factor + center.x; out_cbo[vbo_offset + i] = color.r; } else if (i % 3 == 1) { out_vbo[vbo_offset + i] = cube_vbo[i] * scale_factor + center.y; out_cbo[vbo_offset + i] = color.g; } else { out_vbo[vbo_offset + i] = cube_vbo[i] * scale_factor + center.z; out_cbo[vbo_offset + i] = color.b; } out_nbo[vbo_offset + i] = cube_nbo[i]; } for (int i = 0; i < cube_ibosize; i++) { out_ibo[ibo_offset + i] = cube_ibo[i] + ibo_offset; } } } __global__ void createVoxelGrid(const int* voxels, const int* values, const glm::vec3 bbox0, const glm::vec3 bbox1, const int num_voxels, glm::vec4* centers, glm::vec4* colors) { //Get the index for the thread int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < num_voxels) { glm::vec3 center = getCenterFromIndex(voxels[idx], bbox0, bbox1); centers[idx] = glm::vec4(center.x, center.y, center.z, 1.0f); int color = values[idx]; colors[idx].r = (float)((color & 0xFF) / 255.0); colors[idx].g = (float)(((color >> 8) & 0xFF) / 255.0); colors[idx].b = (float)(((color >> 16) & 0xFF) / 255.0); } } __host__ int voxelizeMesh(const Mesh &m_in, const bmp_texture* h_tex, const BoundingBox& box, int* d_voxels, int* d_values) { //Initialize sizes const int n_triangles = m_in.ibosize / 3; const int n_vertices = m_in.vbosize / 3; //Create host vectors thrust::host_vector<int4> h_triangles(n_triangles); thrust::host_vector<float4> h_vertices(n_vertices); //Fill in the data for (int i = 0; i < n_vertices; i++) { h_vertices[i].x = m_in.vbo[i * 3 + 0]; h_vertices[i].y = m_in.vbo[i * 3 + 1]; h_vertices[i].z = m_in.vbo[i * 3 + 2]; } for (int i = 0; i < n_triangles; i++) { h_triangles[i].x = m_in.ibo[i * 3 + 0]; h_triangles[i].y = m_in.ibo[i * 3 + 1]; h_triangles[i].z = m_in.ibo[i * 3 + 2]; } //Copy to device vectors thrust::device_vector<int4> d_triangles(h_triangles); thrust::device_vector<float4> d_vertices(h_vertices); if (first_time) { //Create the voxelpipe context context = new voxelpipe::FRContext<GRID_RES, TILE_SIZE>(); //Reserve data for voxelpipe context->reserve(n_triangles, 1024u * 1024u * 16u); } first_time = false; //Initialize the result data on the device thrust::device_vector<float> d_fb(M()*M()*M() * T()*T()*T()); //Copy the texture to the device glm::vec3 *device_tex = NULL; cudaMalloc((void**)&device_tex, h_tex->width * h_tex->height *sizeof(glm::vec3)); cudaMemcpy(device_tex, h_tex->data, h_tex->width * h_tex->height *sizeof(glm::vec3), cudaMemcpyHostToDevice); //Copy the texture coordinates to the device float* device_texcoord = NULL; cudaMalloc((void**)&device_texcoord, m_in.tbosize * sizeof(float)); cudaMemcpy(device_texcoord, m_in.tbo, m_in.tbosize *sizeof(float), cudaMemcpyHostToDevice); //Create the shader to be used that will write texture colors to voxels ColorShader my_shader; my_shader.texture = device_tex; my_shader.tex_height = h_tex->height; my_shader.tex_width = h_tex->width; my_shader.texcoord = device_texcoord; my_shader.texcoord_size = m_in.tbosize; //Perform coarse and fine voxelization context->coarse_raster(n_triangles, n_vertices, thrust::raw_pointer_cast(&d_triangles.front()), thrust::raw_pointer_cast(&d_vertices.front()), make_float3(box.bbox0.x, box.bbox0.y, box.bbox0.z), make_float3(box.bbox1.x, box.bbox1.y, box.bbox1.z)); context->fine_raster< voxelpipe::Float, voxelpipe::FP32S_FORMAT, voxelpipe::THIN_RASTER, voxelpipe::NO_BLENDING, ColorShader >( n_triangles, n_vertices, thrust::raw_pointer_cast(&d_triangles.front()), thrust::raw_pointer_cast(&d_vertices.front()), make_float3(box.bbox0.x, box.bbox0.y, box.bbox0.z), make_float3(box.bbox1.x, box.bbox1.y, box.bbox1.z), thrust::raw_pointer_cast(&d_fb.front()), my_shader); cudaFree(device_tex); cudaFree(device_texcoord); //Get voxel centers int numVoxels = N()*N()*N(); int* d_vox; cudaMalloc((void**)&d_vox, numVoxels*sizeof(int)); getOccupiedVoxels<<<N()*N()*N(), 256>>>(thrust::raw_pointer_cast(&d_fb.front()), M(), T(), d_vox); cudaDeviceSynchronize(); //Stream Compact voxels to remove the empties numVoxels = thrust::copy_if(thrust::device_pointer_cast(d_vox), thrust::device_pointer_cast(d_vox) + numVoxels, thrust::device_pointer_cast(d_voxels), check_voxel()) - thrust::device_pointer_cast(d_voxels); std::cout << "Num Voxels: " << numVoxels << std::endl; //Extract the values at these indices extractValues<<<ceil((float)numVoxels / 256.0f), 256 >>>(thrust::raw_pointer_cast(&d_fb.front()), d_voxels, numVoxels, d_values); cudaDeviceSynchronize(); cudaFree(d_vox); return numVoxels; } __host__ void voxelGridToMesh(const VoxelGrid& grid, const Mesh &m_cube, Mesh &m_out) { //Move cube data to GPU thrust::device_vector<float> d_vbo_cube(m_cube.vbo, m_cube.vbo + m_cube.vbosize); thrust::device_vector<int> d_ibo_cube(m_cube.ibo, m_cube.ibo + m_cube.ibosize); thrust::device_vector<float> d_nbo_cube(m_cube.nbo, m_cube.nbo + m_cube.nbosize); //Create output structs float* d_vbo_out; int* d_ibo_out; float* d_nbo_out; float* d_cbo_out; cudaMalloc((void**)&d_vbo_out, grid.size * m_cube.vbosize * sizeof(float)); cudaMalloc((void**)&d_ibo_out, grid.size * m_cube.ibosize * sizeof(int)); cudaMalloc((void**)&d_nbo_out, grid.size * m_cube.nbosize * sizeof(float)); cudaMalloc((void**)&d_cbo_out, grid.size * m_cube.nbosize * sizeof(float)); //Warn if vbo and nbo are not same size on cube if (m_cube.vbosize != m_cube.nbosize) { std::cout << "ERROR: cube vbo and nbo have different sizes." << std::endl; return; } //Create resulting cube-ized mesh createCubeMesh<<<ceil((float)grid.size / 256.0f), 256>>>(grid.centers, grid.colors, computeScale(grid.bbox.bbox0, grid.bbox.bbox1) / CUBE_MESH_SCALE, grid.size, thrust::raw_pointer_cast(&d_vbo_cube.front()), m_cube.vbosize, thrust::raw_pointer_cast(&d_ibo_cube.front()), m_cube.ibosize, thrust::raw_pointer_cast(&d_nbo_cube.front()), d_vbo_out, d_ibo_out, d_nbo_out, d_cbo_out); //Store output sizes m_out.vbosize = grid.size * m_cube.vbosize; m_out.ibosize = grid.size * m_cube.ibosize; m_out.nbosize = grid.size * m_cube.nbosize; m_out.cbosize = m_out.nbosize; //Memory allocation for the outputs m_out.vbo = (float*)malloc(m_out.vbosize * sizeof(float)); m_out.ibo = (int*)malloc(m_out.ibosize * sizeof(int)); m_out.nbo = (float*)malloc(m_out.nbosize * sizeof(float)); m_out.cbo = (float*)malloc(m_out.cbosize * sizeof(float)); //Sync here after doing some CPU work cudaDeviceSynchronize(); //Copy data back from GPU //TODO: Can we avoid this step by making everything run from device-side VBO/IBO/NBO/CBO? cudaMemcpy(m_out.vbo, d_vbo_out, m_out.vbosize*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(m_out.ibo, d_ibo_out, m_out.ibosize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(m_out.nbo, d_nbo_out, m_out.nbosize*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(m_out.cbo, d_cbo_out, m_out.cbosize*sizeof(float), cudaMemcpyDeviceToHost); ///Free GPU memory cudaFree(d_vbo_out); cudaFree(d_ibo_out); cudaFree(d_nbo_out); cudaFree(d_cbo_out); } __host__ void meshToVoxelGrid(const Mesh &m_in, const bmp_texture* tex, VoxelGrid &grid_out) { //Voxelize the mesh input int numVoxels = N()*N()*N(); int* d_voxels; int* d_values; cudaMalloc((void**)&d_voxels, numVoxels*sizeof(int)); cudaMalloc((void**)&d_values, numVoxels*sizeof(int)); numVoxels = voxelizeMesh(m_in, tex, m_in.bbox, d_voxels, d_values); //Extract centers and colors cudaMalloc((void**)&(grid_out.centers), numVoxels*sizeof(glm::vec4)); cudaMalloc((void**)&(grid_out.colors), numVoxels*sizeof(glm::vec4)); createVoxelGrid<<<ceil((float)numVoxels / 256.0f), 256>>>(d_voxels, d_values, m_in.bbox.bbox0, m_in.bbox.bbox1, numVoxels, grid_out.centers, grid_out.colors); //Free old memory from the grid cudaFree(d_voxels); cudaFree(d_values); //Set the scale and size grid_out.scale = computeScale(m_in.bbox.bbox0, m_in.bbox.bbox1); grid_out.size = numVoxels; //Copy the bounding box grid_out.bbox = m_in.bbox; } } // namespace voxelization } // namespace octree_slam
the_stack
// Multidecrease.cu // 实现图像的多阈值N值化图像生成操作 #include "Multidecrease.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X ºÍ DEF_BLOCK_Y // 定义了默认的线程块尺寸 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数: _multidecrease_frontKer(前向 N 值化) // 根据给定的阈值集合对图像进行 N 值化处理。判断当前像素点灰度值所在的阈值区间, // 并将处于某个阈值区间的像素点设定为该阈值区间的前向阈值(即阈值区间的左端点)。 static __global__ void // Kernel 函数无返回值 _multidecrease_frontKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char *thresholds, // 阈值集合 int thresnum // 阈值个数 ); // Kernel 函数: _multidecrease_backKer(后向 N 值化) // 根据给定的阈值集合对图像进行 N 值化处理。判断当前像素点灰度值所在的阈值区间, // 并将处于某个阈值区间的像素点设定为该阈值区间的后向阈值(即阈值区间的右端点)。 static __global__ void // Kernel 函数无返回值 _multidecrease_backKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 unsigned char *thresholds, // 阈值集合 int thresnum // 阈值个数 ); // Kernel 函数: _multidecrease_frontKer(前向 N 值化) static __global__ void _multidecrease_frontKer(ImageCuda inimg, ImageCuda outimg, unsigned char *thresholds, int thresnum) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。线程中处理的第一个点。 for (int i = 1; i < thresnum; i++) { if (intemp == 255) { outimg.imgMeta.imgData[outidx] = thresholds[thresnum - 2]; break; } if (intemp >= thresholds[i-1] && intemp < thresholds[i]) { outimg.imgMeta.imgData[outidx] = thresholds[i-1]; break; } } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 前向阈值。 for (int j = 1; j < thresnum; j++) { if (intemp == 255) { outimg.imgMeta.imgData[outidx] = thresholds[thresnum - 2]; break; } if (intemp >= thresholds[j-1] && intemp < thresholds[j]) { outimg.imgMeta.imgData[outidx] = thresholds[j-1]; break; } } } } // Kernel 函数: _multidecrease_backKer(后向 N 值化) static __global__ void _multidecrease_backKer(ImageCuda inimg, ImageCuda outimg, unsigned char *thresholds, int thresnum) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素。 // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 后向阈值。线程中处理的第一个点。线程中处理的第一个点。 for (int i = 1; i < thresnum; i++) { if (intemp == 0) { outimg.imgMeta.imgData[outidx] = thresholds[1]; break; } if (intemp > thresholds[i-1] && intemp <= thresholds[i]) { outimg.imgMeta.imgData[outidx] = thresholds[i]; break; } } // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 判断当前像素点的灰度值处于哪个阈值区间,并将该点的像素值设为阈值区间的 // 后向阈值。 for (int j = 1; j < thresnum; j++) { if (intemp == 0) { outimg.imgMeta.imgData[outidx] = thresholds[1]; break; } if (intemp > thresholds[j-1] && intemp <= thresholds[j]) { outimg.imgMeta.imgData[outidx] = thresholds[j]; break; } } } } // 存储阈值的结构 ThresNode class ThresNode { public: int thres; // 当前阈值节点的阈值 int rangeA; // 当前阈值节点的前向范围 int rangeB; // 当前阈值节点的后向范围 ThresNode *leftchild; // 当前阈值节点的左子节点 ThresNode *rightchild; // 当前阈值节点的右子节点 ThresNode(){}; }; // 给定直方图信息及搜索范围,在此范围内搜索双峰之间的谷值 __host__ int gettrough(int rangea, int rangeb, unsigned int *his, unsigned int widthrange, unsigned int pixelrange) { // 判断区间的值是否正确,当范围出错时返回 -1 if(rangea < 0 || rangeb < 0 || rangea > 255 || rangeb > 255 || rangea >= rangeb ||abs(rangea - rangeb) < widthrange) { return -1; } int minnum = 0; // 范围内像素点数目最少的灰度值 for(int i = rangea; i <= rangeb; i++){ if (his[i] < his[minnum]) minnum = i; } int firstpeak = minnum; // 范围内的第一峰值 int secondpeak = minnum; // 范围内的第二峰值 int secondfront = 1; // 判断坐标轴上的位置,第二峰值是否在第一峰值的前方 // 搜索第一峰值 for (int i = rangea; i <= rangeb; i++) { if (his[i] > his[firstpeak]) { firstpeak = i; } } int trough = firstpeak; // 双峰之间的谷值 // 分别将区间的左右顶点值与第一峰值做差,与 widthrange 做比较,判断能否求得 // 第二峰值。若大于 widthrange,则可以求得;若小于,则不可求得。 if ((firstpeak - rangea) >= widthrange) { for (int i = rangea; i < (firstpeak - widthrange); i++) { if (his[i] > his[secondpeak] && his[i] < his[firstpeak]) secondpeak = i; } } else { // 不在范围内,则第二峰值不可能在第一峰值的前方。 secondfront = 0; } if (rangeb - firstpeak >= widthrange) { for (int i = (firstpeak + widthrange); i < rangeb; i++) { if (his[i] > his[secondpeak] && his[i] < his[firstpeak]) { secondpeak = i; // 此时代表第二峰值在第一峰值的后方 secondfront = 0; } } } else if (secondfront == 0) { // 第一峰值的前后均不可求得第二峰值,故无法求得谷值,故退出计算。 return -1; } // 第一峰值在前,第二峰值在后 if (secondfront == 0) { for (int i = firstpeak + 1; i < secondpeak; i++){ if (his[i] < his[trough]) trough = i; } } // 第一峰值在后,第二峰值在前 else { for (int i = secondpeak + 1; i < firstpeak; i++){ if (his[i] < his[trough]) trough = i; } } // 若第二峰值与谷值的像素值的差比设定的范围小,则说明该谷值无效,返回 -1 if ((his[secondpeak] - his[trough]) < pixelrange ) { return -1; } else return trough; }; // 根据直方图信息,创建一棵存储阈值信息的二叉树 void createtree(ThresNode *tree, unsigned int * his, unsigned int widthrange, unsigned int pixelrange) { // 获取当前节点的阈值 int thres = gettrough(tree->rangeA, tree->rangeB, his, widthrange, pixelrange); // 判断阈值节点是否合法 if (thres == -1) { tree->thres = -1; } else{ tree->thres = thres; // 为阈值节点创建其左子节点,并设定左子节点的搜索范围 ThresNode * leftc = new ThresNode(); tree->leftchild = leftc; leftc->rangeA = tree->rangeA; leftc->rangeB = tree->thres; createtree(tree->leftchild, his, widthrange, pixelrange); // 为阈值节点创建其右子节点,并设定其右子节点的搜索范围 ThresNode *rightc = new ThresNode(); tree->rightchild = rightc; rightc->rangeA = tree->thres + 1; rightc->rangeB = tree->rangeB; createtree(tree->rightchild, his, widthrange, pixelrange); } }; // 获取所有阈值 void searchtree(ThresNode *tree, unsigned char *thresholds, int &thresnum) { if (tree -> thres != -1) { // 通过对二叉树进行中序遍历,按照从小到大的顺序存储所有阈值 searchtree(tree->leftchild, thresholds, thresnum); thresholds[thresnum++] = (unsigned char)tree->thres; searchtree(tree->rightchild, thresholds, thresnum); } }; // Host 成员方法:multidecrease(多阈值N值化处理) __host__ int Multidecrease::multidecrease(Image *inimg, Image *outimg) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 获取当前图像的直方图 Histogram h; unsigned int his[256]; h.histogram(inimg, his, true); // 存储当前图像的所有粗分割阈值 unsigned char *devthreshlods; unsigned char *hostthresholds = new unsigned char [20]; // 存储当前图像的所有粗分割阈值个数。 // 为了计算方便将第一个阈值设为 0 。 int thresnum = 1; hostthresholds[0] = 0; // 创建阈值的根节点信息,并设定其搜索范围 ThresNode * tn = new ThresNode(); tn->rangeA = 0; tn->rangeB = 255; // 根据当前图像直方图信息建立阈值二叉树 createtree(tn, his, this->getwidthrange(), this->getpixelrange()); // 对二叉树进行搜索和存储 searchtree(tn, hostthresholds, thresnum); // 图像总像素数 int sumpixel = 0; for (int i = 0; i < 256; i++) { sumpixel += his[i]; } // 数组 pix_w[i] 记录图像中值为i的像素占总像素数的比例 float pix_w[256]; // 计算 pix_w[i] 的值 for (int i = 0; i < 256; i++) { pix_w[i] = ((float)his[i])/sumpixel; } // 使用 OTSU 方法对各个粗分割阈值进行搜索,找到使类内方差最小的最佳阈值。 for (int countthres = 0; countthres < thresnum - 1; countthres++){ float min = 100000.0; // 类内方差的最小值 int threshold = 0; // 最佳阈值 float Wk = 0.0; // 第 k 个类的概率的累加和 float Ukp = 0.0; // 第 k 个类的各个像素值与概率乘积的累加和 float Uk = 0.0; // 第 k 个类的均值 float Qk = 0.0; // 第 k 个类的方差 // 在每个粗分割阈值的松弛余量范围内,进行 OTSU 法搜索,直到找到最 // 小类内方差,此时对应的即是最佳阈值。 for (int j = -5; j < 6; j++) { for (int i = hostthresholds[countthres] + 1; i <= (hostthresholds[countthres + 1] + j); i++){ // 计算类的概率的累加和 Wk+=pix_w[i]; // 计算像素值与其概率乘积的累加和 Ukp+=i * pix_w[i]; } // 计算类的均值 Uk = Ukp/Wk; for (int i = hostthresholds[countthres] + 1; i <= (hostthresholds[countthres + 1] + j); i++){ // 再次搜索,计算类的方差 Qk = (i - Uk)*(i - Uk) * pix_w[i]; } // 判断当前方差是否小于 min,若是则覆盖最小值,并存储当前阈值。 if (min > Qk) { min = Qk; threshold = hostthresholds[countthres + 1] + j; } } // 更新阈值集合的值为求得的最佳阈值。 hostthresholds[countthres + 1] = threshold; } // 返回最佳阈值集合至 threshold,并跳过第一个阈值(0)。 threshold = new unsigned char[thresnum -1]; for (int i = 1; i < thresnum; i++) { this->threshold[i -1] = hostthresholds[i]; } // 将阈值集合的最后一个阈值设定为255。 hostthresholds[thresnum] = 255; // 为标记数组分配大小。 errcode = cudaMalloc((void **)&devthreshlods, (thresnum + 1) * sizeof (unsigned char)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 为标记数组设定初值。 errcode = cudaMemset(devthreshlods, 0, (thresnum + 1) * sizeof (unsigned char)); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 将数组复制至 device 端。 errcode = cudaMemcpy(devthreshlods, hostthresholds, (thresnum + 1) * sizeof (unsigned char), cudaMemcpyHostToDevice); if (errcode != cudaSuccess) { cudaFree(devthreshlods); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 判断当前的 stateflag 是前向标记还是后向标记,并根据其值调用对应的 Kernel 函数。 if (this->stateflag == MD_FRONT) { _multidecrease_frontKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, devthreshlods, (thresnum + 1)); } else { _multidecrease_backKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, devthreshlods, (thresnum + 1)); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
#include <cstdio> #include <random> #include <hip/hip_runtime.h> #include "tables.h" // problem size constexpr unsigned int N(1024); constexpr unsigned int Nd2(N / 2); constexpr unsigned int voxelXLv1(16); constexpr unsigned int voxelYLv1(16); constexpr unsigned int voxelZLv1(64); constexpr unsigned int gridXLv1((N - 1) / (voxelXLv1 - 1)); constexpr unsigned int gridYLv1((N - 1) / (voxelYLv1 - 1)); constexpr unsigned int gridZLv1((N - 1) / (voxelZLv1 - 1)); constexpr unsigned int countingThreadNumLv1(128); constexpr unsigned int blockNum(gridXLv1* gridYLv1* gridZLv1); constexpr unsigned int countingBlockNumLv1(blockNum / countingThreadNumLv1); constexpr unsigned int voxelXLv2(4); constexpr unsigned int voxelYLv2(4); constexpr unsigned int voxelZLv2(8); constexpr unsigned int blockXLv2(5); constexpr unsigned int blockYLv2(5); constexpr unsigned int blockZLv2(9); constexpr unsigned int voxelNumLv2(blockXLv2* blockYLv2* blockZLv2); constexpr unsigned int countingThreadNumLv2(1024); constexpr unsigned int gridXLv2(gridXLv1* blockXLv2); constexpr unsigned int gridYLv2(gridYLv1* blockYLv2); //constexpr unsigned int gridZLv2(gridZLv1* blockZLv2); __inline__ __device__ float f(unsigned int x, unsigned int y, unsigned int z) { constexpr float d(2.0f / N); float xf((int(x - Nd2)) * d);//[-1, 1) float yf((int(z - Nd2)) * d); float zf((int(z - Nd2)) * d); return 1.f - 16.f * xf * yf * zf - 4.f * (xf * xf + yf * yf + zf * zf); } __inline__ __device__ float zeroPoint(unsigned int x, float v0, float v1, float isoValue) { return ((x * (v1 - isoValue) + (x + 1) * (isoValue - v0)) / (v1 - v0) - Nd2) * (2.0f / N); } __inline__ __device__ float transformToCoord(unsigned int x) { return (int(x) - int(Nd2)) * (2.0f / N); } __global__ void computeMinMaxLv1(float*__restrict minMax) { __shared__ float sminMax[64]; constexpr unsigned int threadNum(voxelXLv1 * voxelYLv1); constexpr unsigned int warpNum(threadNum / 32); unsigned int x(blockIdx.x * (voxelXLv1 - 1) + threadIdx.x); unsigned int y(blockIdx.y * (voxelYLv1 - 1) + threadIdx.y); unsigned int z(blockIdx.z * (voxelZLv1 - 1)); unsigned int tid(threadIdx.x + voxelXLv1 * threadIdx.y); unsigned int laneid = tid % 32; unsigned int blockid(blockIdx.x + gridXLv1 * (blockIdx.y + gridYLv1 * blockIdx.z)); unsigned int warpid(tid >> 5); float v(f(x, y, z)); float minV(v), maxV(v); for (int c0(1); c0 < voxelZLv1; ++c0) { v = f(x, y, z + c0); if (v < minV)minV = v; if (v > maxV)maxV = v; } #pragma unroll for (int c0(16); c0 > 0; c0 /= 2) { float t0, t1; t0 = __shfl_down(minV, c0); t1 = __shfl_down(maxV, c0); if (t0 < minV)minV = t0; if (t1 > maxV)maxV = t1; } if (laneid == 0) { sminMax[warpid] = minV; sminMax[warpid + warpNum] = maxV; } __syncthreads(); if (warpid == 0) { minV = sminMax[laneid]; maxV = sminMax[laneid + warpNum]; #pragma unroll for (int c0(warpNum / 2); c0 > 0; c0 /= 2) { float t0, t1; t0 = __shfl_down(minV, c0); t1 = __shfl_down(maxV, c0); if (t0 < minV)minV = t0; if (t1 > maxV)maxV = t1; } if (laneid == 0) { minMax[blockid * 2] = minV; minMax[blockid * 2 + 1] = maxV; } } } __global__ void compactLv1( float isoValue, const float*__restrict minMax, unsigned int*__restrict blockIndices, unsigned int*__restrict countedBlockNum) { __shared__ unsigned int sums[32]; constexpr unsigned int warpNum(countingThreadNumLv1 / 32); unsigned int tid(threadIdx.x); unsigned int laneid = tid % 32; unsigned int bIdx(blockIdx.x * countingThreadNumLv1 + tid); unsigned int warpid(tid >> 5); unsigned int test; if (minMax[2 * bIdx] <= isoValue && minMax[2 * bIdx + 1] >= isoValue)test = 1; else test = 0; unsigned int testSum(test); #pragma unroll for (int c0(1); c0 < 32; c0 *= 2) { unsigned int tp(__shfl_up(testSum, c0)); if (laneid >= c0)testSum += tp; } if (laneid == 31)sums[warpid] = testSum; __syncthreads(); if (warpid == 0) { unsigned int warpSum = sums[laneid]; #pragma unroll for (int c0(1); c0 < warpNum; c0 *= 2) { unsigned int tp(__shfl_up(warpSum, c0)); if (laneid >= c0) warpSum += tp; } sums[laneid] = warpSum; } __syncthreads(); if (warpid != 0)testSum += sums[warpid - 1]; if (tid == countingThreadNumLv1 - 1 && testSum != 0) sums[31] = atomicAdd(countedBlockNum, testSum); __syncthreads(); if (test)blockIndices[testSum + sums[31] - 1] = bIdx; } __global__ void computeMinMaxLv2( const unsigned int*__restrict blockIndicesLv1, float*__restrict minMax) { unsigned int tid(threadIdx.x); unsigned int voxelOffset(threadIdx.y); unsigned int blockIndex(blockIndicesLv1[blockIdx.x]); unsigned int tp(blockIndex); unsigned int x((blockIndex % gridXLv1) * (voxelXLv1 - 1) + (voxelOffset % 5) * (voxelXLv2 - 1) + (tid & 3)); tp /= gridXLv1; unsigned int y((tp % gridYLv1) * (voxelYLv1 - 1) + (voxelOffset / 5) * (voxelYLv2 - 1) + (tid >> 2)); tp /= gridYLv1; unsigned int z(tp * (voxelZLv1 - 1)); float v(f(x, y, z)); float minV(v), maxV(v); unsigned int idx(2 * (voxelOffset + voxelNumLv2 * blockIdx.x)); for (int c0(0); c0 < blockZLv2; ++c0) { for (int c1(1); c1 < voxelZLv2; ++c1) { v = f(x, y, z + c1); if (v < minV)minV = v; if (v > maxV)maxV = v; } z += voxelZLv2 - 1; #pragma unroll for (int c1(8); c1 > 0; c1 /= 2) { float t0, t1; t0 = __shfl_down(minV, c1); t1 = __shfl_down(maxV, c1); if (t0 < minV)minV = t0; if (t1 > maxV)maxV = t1; } if (tid == 0) { minMax[idx] = minV; minMax[idx + 1] = maxV; constexpr unsigned int offsetSize(2 * blockXLv2 * blockYLv2); idx += offsetSize; } minV = v; maxV = v; } } __global__ void compactLv2( float isoValue, const float*__restrict minMax, const unsigned int*__restrict blockIndicesLv1, unsigned int*__restrict blockIndicesLv2, unsigned int counterBlockNumLv1, unsigned int*__restrict countedBlockNumLv2) { __shared__ unsigned int sums[32]; constexpr unsigned int warpNum(countingThreadNumLv2 / 32); unsigned int tid(threadIdx.x); unsigned int laneid = tid % 32; unsigned int warpid(tid >> 5); unsigned int id0(tid + blockIdx.x * countingThreadNumLv2); unsigned int id1(id0 / voxelNumLv2); unsigned int test; if (id1 < counterBlockNumLv1) { if (minMax[2 * id0] <= isoValue && minMax[2 * id0 + 1] >= isoValue) test = 1; else test = 0; } else test = 0; unsigned int testSum(test); #pragma unroll for (int c0(1); c0 < 32; c0 *= 2) { unsigned int tp(__shfl_up(testSum, c0)); if (laneid >= c0)testSum += tp; } if (laneid == 31)sums[warpid] = testSum; __syncthreads(); if (warpid == 0) { unsigned warpSum = sums[laneid]; #pragma unroll for (int c0(1); c0 < warpNum; c0 *= 2) { unsigned int tp(__shfl_up(warpSum, c0)); if (laneid >= c0)warpSum += tp; } sums[laneid] = warpSum; } __syncthreads(); if (warpid != 0)testSum += sums[warpid - 1]; if (tid == countingThreadNumLv2 - 1) sums[31] = atomicAdd(countedBlockNumLv2, testSum); __syncthreads(); if (test) { unsigned int bIdx1(blockIndicesLv1[id1]); unsigned int bIdx2; unsigned int x1, y1, z1; unsigned int x2, y2, z2; unsigned int tp1(bIdx1); unsigned int tp2((tid + blockIdx.x * countingThreadNumLv2) % voxelNumLv2); x1 = tp1 % gridXLv1; x2 = tp2 % blockXLv2; tp1 /= gridXLv1; tp2 /= blockXLv2; y1 = tp1 % gridYLv1; y2 = tp2 % blockYLv2; z1 = tp1 / gridYLv1; z2 = tp2 / blockYLv2; bIdx2 = x2 + blockXLv2 * (x1 + gridXLv1 * (y2 + blockYLv2 * (y1 + gridYLv1 * (z1 * blockZLv2 + z2)))); blockIndicesLv2[testSum + sums[31] - 1] = bIdx2; } } __global__ void generatingTriangles( float isoValue, const unsigned int*__restrict blockIndicesLv2, const unsigned short *__restrict distinctEdgesTable, const int *__restrict triTable, const uchar4 *__restrict edgeIDTable, unsigned int*__restrict countedVerticesNum, unsigned int*__restrict countedTrianglesNum, unsigned long long*__restrict triangles, float*__restrict coordX, float*__restrict coordY, float*__restrict coordZ, float*__restrict coordZP) { __shared__ unsigned short vertexIndices[voxelZLv2][voxelYLv2][voxelXLv2]; __shared__ float value[voxelZLv2 + 1][voxelYLv2 + 1][voxelXLv2 + 1]; __shared__ unsigned int sumsVertices[32]; __shared__ unsigned int sumsTriangles[32]; unsigned int blockId(blockIndicesLv2[blockIdx.x]); unsigned int tp(blockId); unsigned int x((tp % gridXLv2) * (voxelXLv2 - 1) + threadIdx.x); tp /= gridXLv2; unsigned int y((tp % gridYLv2) * (voxelYLv2 - 1) + threadIdx.y); unsigned int z((tp / gridYLv2) * (voxelZLv2 - 1) + threadIdx.z); unsigned int eds(7); float v(value[threadIdx.z][threadIdx.y][threadIdx.x] = f(x, y, z)); if (threadIdx.x == voxelXLv2 - 1) { eds &= 6; value[threadIdx.z][threadIdx.y][voxelXLv2] = f(x + 1, y, z); if (threadIdx.y == voxelYLv2 - 1) value[threadIdx.z][voxelYLv2][voxelXLv2] = f(x + 1, y + 1, z); } if (threadIdx.y == voxelYLv2 - 1) { eds &= 5; value[threadIdx.z][voxelYLv2][threadIdx.x] = f(x, y + 1, z); if (threadIdx.z == voxelZLv2 - 1) value[voxelZLv2][voxelYLv2][threadIdx.x] = f(x, y + 1, z + 1); } if (threadIdx.z == voxelZLv2 - 1) { eds &= 3; value[voxelZLv2][threadIdx.y][threadIdx.x] = f(x, y, z + 1); if (threadIdx.x == voxelXLv2 - 1) value[voxelZLv2][threadIdx.y][voxelXLv2] = f(x + 1, y, z + 1); } eds <<= 13; __syncthreads(); unsigned int cubeCase(0); if (value[threadIdx.z][threadIdx.y][threadIdx.x] < isoValue) cubeCase |= 1; if (value[threadIdx.z][threadIdx.y][threadIdx.x + 1] < isoValue) cubeCase |= 2; if (value[threadIdx.z][threadIdx.y + 1][threadIdx.x + 1] < isoValue) cubeCase |= 4; if (value[threadIdx.z][threadIdx.y + 1][threadIdx.x] < isoValue) cubeCase |= 8; if (value[threadIdx.z + 1][threadIdx.y][threadIdx.x] < isoValue) cubeCase |= 16; if (value[threadIdx.z + 1][threadIdx.y][threadIdx.x + 1] < isoValue) cubeCase |= 32; if (value[threadIdx.z + 1][threadIdx.y + 1][threadIdx.x + 1] < isoValue) cubeCase |= 64; if (value[threadIdx.z + 1][threadIdx.y + 1][threadIdx.x] < isoValue) cubeCase |= 128; unsigned int distinctEdges(eds ? distinctEdgesTable[cubeCase] : 0); unsigned int numTriangles(eds != 0xe000 ? 0 : distinctEdges & 7); unsigned int numVertices(__popc(distinctEdges &= eds)); unsigned int laneid = (threadIdx.x + voxelXLv2 * (threadIdx.y + voxelYLv2 * threadIdx.z)) % 32; unsigned warpid((threadIdx.x + voxelXLv2 * (threadIdx.y + voxelYLv2 * threadIdx.z)) >> 5); constexpr unsigned int threadNum(voxelXLv2 * voxelYLv2 * voxelZLv2); constexpr unsigned int warpNum(threadNum / 32); unsigned int sumVertices(numVertices); unsigned int sumTriangles(numTriangles); #pragma unroll for (int c0(1); c0 < 32; c0 *= 2) { unsigned int tp0(__shfl_up(sumVertices, c0)); unsigned int tp1(__shfl_up(sumTriangles, c0)); if (laneid >= c0) { sumVertices += tp0; sumTriangles += tp1; } } if (laneid == 31) { sumsVertices[warpid] = sumVertices; sumsTriangles[warpid] = sumTriangles; } __syncthreads(); if (warpid == 0) { unsigned warpSumVertices = sumsVertices[laneid]; unsigned warpSumTriangles = sumsTriangles[laneid]; #pragma unroll for (int c0(1); c0 < warpNum; c0 *= 2) { unsigned int tp0(__shfl_up(warpSumVertices, c0)); unsigned int tp1(__shfl_up(warpSumTriangles, c0)); if (laneid >= c0) { warpSumVertices += tp0; warpSumTriangles += tp1; } } sumsVertices[laneid] = warpSumVertices; sumsTriangles[laneid] = warpSumTriangles; } __syncthreads(); if (warpid != 0) { sumVertices += sumsVertices[warpid - 1]; sumTriangles += sumsTriangles[warpid - 1]; } if (eds == 0) { sumsVertices[31] = atomicAdd(countedVerticesNum, sumVertices); sumsTriangles[31] = atomicAdd(countedTrianglesNum, sumTriangles); } unsigned int interOffsetVertices(sumVertices - numVertices); sumVertices = interOffsetVertices + sumsVertices[31];//exclusive offset sumTriangles = sumTriangles + sumsTriangles[31] - numTriangles;//exclusive offset vertexIndices[threadIdx.z][threadIdx.y][threadIdx.x] = interOffsetVertices | distinctEdges; __syncthreads(); for (unsigned int c0(0); c0 < numTriangles; ++c0) { #pragma unroll for (unsigned int c1(0); c1 < 3; ++c1) { int edgeID(triTable[16 * cubeCase + 3 * c0 + c1]); uchar4 edgePos(edgeIDTable[edgeID]); unsigned short vertexIndex(vertexIndices[threadIdx.z + edgePos.z][threadIdx.y + edgePos.y][threadIdx.x + edgePos.x]); unsigned int tp(__popc(vertexIndex >> (16 - edgePos.w)) + (vertexIndex & 0x1fff)); atomicAdd(triangles, (unsigned long long)(sumsVertices[31] + tp)); } } // sumVertices may be too large for a GPU memory float zp = 0.f, cx = 0.f, cy = 0.f, cz = 0.f; if (distinctEdges & (1 << 15)) { zp = zeroPoint(x, v, value[threadIdx.z][threadIdx.y][threadIdx.x + 1], isoValue); cy = transformToCoord(y); cz = transformToCoord(z); } if (distinctEdges & (1 << 14)) { cx = transformToCoord(x); zp += zeroPoint(y, v, value[threadIdx.z][threadIdx.y + 1][threadIdx.x], isoValue); cz += transformToCoord(z); } if (distinctEdges & (1 << 13)) { cx += transformToCoord(x); cy += transformToCoord(y); zp += zeroPoint(z, v, value[threadIdx.z + 1][threadIdx.y][threadIdx.x], isoValue); } atomicAdd(coordX, cx); atomicAdd(coordY, cy); atomicAdd(coordZ, cz); atomicAdd(coordZP, zp); } int main(int argc, char* argv[]) { unsigned int iterations = atoi(argv[1]); std::uniform_real_distribution<float>rd(0, 1); std::mt19937 mt(123); float* minMaxLv1Device; float* minMaxLv2Device; unsigned int* blockIndicesLv1Device; unsigned int* blockIndicesLv2Device; unsigned int* countedBlockNumLv1Device; unsigned int* countedBlockNumLv2Device; unsigned short* distinctEdgesTableDevice; int* triTableDevice; uchar4* edgeIDTableDevice; unsigned int* countedVerticesNumDevice; unsigned int* countedTrianglesNumDevice; unsigned long long* trianglesDevice; float *coordXDevice; float *coordYDevice; float *coordZDevice; float *coordZPDevice; hipMalloc(&minMaxLv1Device, blockNum * 2 * sizeof(float)); hipMalloc(&blockIndicesLv1Device, blockNum * sizeof(unsigned int)); hipMalloc(&countedBlockNumLv1Device, sizeof(unsigned int)); hipMalloc(&countedBlockNumLv2Device, sizeof(unsigned int)); hipMalloc(&distinctEdgesTableDevice, sizeof(distinctEdgesTable)); hipMalloc(&triTableDevice, sizeof(triTable)); hipMalloc(&edgeIDTableDevice, sizeof(edgeIDTable)); hipMalloc(&countedVerticesNumDevice, sizeof(unsigned int)); hipMalloc(&countedTrianglesNumDevice, sizeof(unsigned int)); hipMemcpy(distinctEdgesTableDevice, distinctEdgesTable, sizeof(distinctEdgesTable), hipMemcpyHostToDevice); hipMemcpy(triTableDevice, triTable, sizeof(triTable), hipMemcpyHostToDevice); hipMemcpy(edgeIDTableDevice, edgeIDTable, sizeof(edgeIDTable), hipMemcpyHostToDevice); // simulate rendering without memory allocation for vertices and triangles hipMalloc(&trianglesDevice, sizeof(unsigned long long)); hipMalloc(&coordXDevice, sizeof(float)); hipMalloc(&coordYDevice, sizeof(float)); hipMalloc(&coordZDevice, sizeof(float)); hipMalloc(&coordZPDevice, sizeof(float)); const dim3 BlockSizeLv1{ voxelXLv1, voxelYLv1, 1 }; const dim3 GridSizeLv1{ gridXLv1, gridYLv1, gridZLv1 }; const dim3 BlockSizeLv2{ voxelXLv2 * voxelYLv2, blockXLv2 * blockYLv2, 1 }; const dim3 BlockSizeGenerating{ voxelXLv2, voxelYLv2, voxelZLv2 }; float isoValue(-0.9f); unsigned int countedBlockNumLv1; unsigned int countedBlockNumLv2; unsigned int countedVerticesNum; unsigned int countedTrianglesNum; for (unsigned int c0(0); c0 < iterations; ++c0) { hipDeviceSynchronize(); hipMemset(countedBlockNumLv1Device, 0, sizeof(unsigned int)); hipMemset(countedBlockNumLv2Device, 0, sizeof(unsigned int)); hipMemset(countedVerticesNumDevice, 0, sizeof(unsigned int)); hipMemset(countedTrianglesNumDevice,0, sizeof(unsigned int)); hipMemset(trianglesDevice, 0, sizeof(unsigned long long)); hipMemset(coordXDevice, 0, sizeof(float)); hipMemset(coordYDevice, 0, sizeof(float)); hipMemset(coordZDevice, 0, sizeof(float)); hipMemset(coordZPDevice, 0, sizeof(float)); hipLaunchKernelGGL(computeMinMaxLv1, GridSizeLv1, BlockSizeLv1, 0, 0, minMaxLv1Device); hipLaunchKernelGGL(compactLv1, dim3(countingBlockNumLv1), dim3(countingThreadNumLv1), 0, 0, isoValue, minMaxLv1Device, blockIndicesLv1Device, countedBlockNumLv1Device); hipMemcpy(&countedBlockNumLv1, countedBlockNumLv1Device, sizeof(unsigned int), hipMemcpyDeviceToHost); hipMalloc(&minMaxLv2Device, countedBlockNumLv1 * voxelNumLv2 * 2 * sizeof(float)); hipLaunchKernelGGL(computeMinMaxLv2, dim3(countedBlockNumLv1), BlockSizeLv2, 0, 0, blockIndicesLv1Device, minMaxLv2Device); hipMalloc(&blockIndicesLv2Device, countedBlockNumLv1 * voxelNumLv2 * sizeof(unsigned int)); unsigned int countingBlockNumLv2((countedBlockNumLv1 * voxelNumLv2 + countingThreadNumLv2 - 1) / countingThreadNumLv2); hipLaunchKernelGGL(compactLv2, dim3(countingBlockNumLv2), dim3(countingThreadNumLv2 ), 0, 0, isoValue, minMaxLv2Device, blockIndicesLv1Device, blockIndicesLv2Device, countedBlockNumLv1, countedBlockNumLv2Device); hipMemcpy(&countedBlockNumLv2, countedBlockNumLv2Device, sizeof(unsigned int), hipMemcpyDeviceToHost); hipLaunchKernelGGL(generatingTriangles, dim3(countedBlockNumLv2), BlockSizeGenerating, 0, 0, isoValue, blockIndicesLv2Device, distinctEdgesTableDevice, triTableDevice, edgeIDTableDevice, countedVerticesNumDevice, countedTrianglesNumDevice, trianglesDevice, coordXDevice, coordYDevice, coordZDevice, coordZPDevice); hipMemcpy(&countedVerticesNum, countedVerticesNumDevice, sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(&countedTrianglesNum, countedTrianglesNumDevice, sizeof(unsigned int), hipMemcpyDeviceToHost); hipFree(minMaxLv2Device); hipFree(blockIndicesLv2Device); } printf("Block Lv1: %u\nBlock Lv2: %u\n", countedBlockNumLv1, countedBlockNumLv2); printf("Vertices Size: %u\n", countedBlockNumLv2 * 304); printf("Triangles Size: %u\n", countedBlockNumLv2 * 315 * 3); printf("Vertices: %u\nTriangles: %u\n", countedVerticesNum, countedTrianglesNum); // specific to the problem size bool ok = (countedBlockNumLv1 == 8296 && countedBlockNumLv2 == 240380 && countedVerticesNum == 4856560 && countedTrianglesNum == 6101640); printf("%s\n", ok ? "PASS" : "FAIL"); hipFree(minMaxLv1Device); hipFree(blockIndicesLv1Device); hipFree(countedBlockNumLv1Device); hipFree(countedBlockNumLv2Device); hipFree(distinctEdgesTableDevice); hipFree(triTableDevice); hipFree(edgeIDTableDevice); hipFree(countedVerticesNumDevice); hipFree(countedTrianglesNumDevice); hipFree(trianglesDevice); hipFree(coordXDevice); hipFree(coordYDevice); hipFree(coordZDevice); hipFree(coordZPDevice); return 0; }
the_stack
#pragma once #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/app/vn/vn_problem.cuh> #include <gunrock/oprtr/oprtr.cuh> namespace gunrock { namespace app { namespace vn { /** * @brief Speciflying parameters for vn Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } /** * @brief defination of VN iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct VNIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push | (((EnactorT::Problem::FLAG & Mark_Predecessors) != 0) ? Update_Predecessors : 0x0)> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::CsrT CsrT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push | (((EnactorT::Problem::FLAG & Mark_Predecessors) != 0) ? Update_Predecessors : 0x0)> BaseIterationLoop; VNIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of VN, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // Data vn that works on auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &distances = data_slice.distances; auto &labels = data_slice.labels; auto &preds = data_slice.preds; // auto &row_offsets = graph.CsrT::row_offsets; auto &weights = graph.CsrT::edge_values; auto &original_vertex = graph.GpT::original_vertex; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; // auto &stream = enactor_slice.stream; auto &iteration = enactor_stats.iteration; // The advance operation auto advance_op = [distances, weights, original_vertex, preds] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { ValueT src_distance = Load<cub::LOAD_CG>(distances + src); ValueT edge_weight = Load<cub::LOAD_CS>(weights + edge_id); ValueT new_distance = src_distance + edge_weight; // Check if the destination node has been claimed as someone's child ValueT old_distance = atomicMin(distances + dest, new_distance); if (new_distance < old_distance) { if (!preds.isEmpty()) { VertexT pred = src; if (!original_vertex.isEmpty()) pred = original_vertex[src]; Store(preds + dest, pred); } return true; } return false; }; // The filter operation auto filter_op = [labels, iteration] __host__ __device__( const VertexT &src, VertexT &dest, const SizeT &edge_id, const VertexT &input_item, const SizeT &input_pos, SizeT &output_pos) -> bool { if (!util::isValid(dest)) return false; if (labels[dest] == iteration) return false; labels[dest] = iteration; return true; }; oprtr_parameters.label = iteration + 1; // Call the advance operator, using the advance operation GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>( graph.csr(), frontier.V_Q(), frontier.Next_V_Q(), oprtr_parameters, advance_op, filter_op)); if (oprtr_parameters.advance_mode != "LB_CULL" && oprtr_parameters.advance_mode != "LB_LIGHT_CULL") { frontier.queue_reset = false; // Call the filter operator, using the filter operation GUARD_CU(oprtr::Filter<oprtr::OprtrType_V2V>( graph.csr(), frontier.V_Q(), frontier.Next_V_Q(), oprtr_parameters, filter_op)); } // Get back the resulted frontier length GUARD_CU(frontier.work_progress.GetQueueLength( frontier.queue_index, frontier.queue_length, false, oprtr_parameters.stream, true)); return retval; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto iteration = enactor_slice.enactor_stats.iteration; auto &distances = data_slice.distances; auto &labels = data_slice.labels; auto &preds = data_slice.preds; auto label = this->enactor->mgpu_slices[this->gpu_num] .in_iteration[iteration % 2][peer_]; auto expand_op = [distances, labels, label, preds] __host__ __device__( VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { ValueT in_val = value__associate_ins[in_pos]; ValueT old_val = atomicMin(distances + key, in_val); if (old_val <= in_val) return false; if (labels[key] == label) return false; labels[key] = label; if (!preds.isEmpty()) preds[key] = vertex_associate_ins[in_pos]; return true; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } }; // end of vnIteration /** * @brief vn enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase<typename _Problem::GraphT, typename _Problem::LabelT, typename _Problem::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: // Definations typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::ValueT ValueT; typedef typename Problem::GraphT GraphT; typedef typename Problem::LabelT LabelT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef VNIterationLoop<EnactorT> IterationT; // Members Problem *problem; IterationT *iterations; /** * \addtogroup PublicInterface * @{ */ /** * @brief VNEnactor constructor */ Enactor() : BaseEnactor("vn"), problem(NULL) { this->max_num_vertex_associates = (Problem::FLAG & Mark_Predecessors) != 0 ? 1 : 0; this->max_num_value__associates = 1; } /** * @brief VNEnactor destructor */ virtual ~Enactor() { // Release(); } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the enactor. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges, this->queue_factors)); for (int peer = 0; peer < this->num_gpus; peer++) { this->enactor_slices[gpu * this->num_gpus + peer] .oprtr_parameters.labels = &(problem.data_slices[gpu]->labels); } } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief Reset enactor * @param[in] srcs Source nodes to start primitive. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(VertexT *srcs, SizeT num_srcs, util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { if ((this->num_gpus == 1)) // if ((this->num_gpus == 1) || // (gpu == this->problem->org_graph->GpT::partition_table[src])) // // TODO -- finish multiGPU implementation { this->thread_slices[gpu].init_size = num_srcs; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer_].frontier; frontier.queue_length = (peer_ == 0) ? num_srcs : 0; if (peer_ == 0) { // Copy `srcs` to GPU util::Array1D<SizeT, VertexT> tmp_srcs; tmp_srcs.Allocate(num_srcs, target | util::HOST); for (SizeT i = 0; i < num_srcs; ++i) { tmp_srcs[i] = srcs[i]; } GUARD_CU(tmp_srcs.Move(util::HOST, target)); GUARD_CU(frontier.V_Q()->ForEach( tmp_srcs, [] __host__ __device__(VertexT & v, VertexT & src) { v = src; }, num_srcs, target, 0)); tmp_srcs.Release(); } } } else { this->thread_slices[gpu].init_size = 0; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { this->enactor_slices[gpu * this->num_gpus + peer_] .frontier.queue_length = 0; } } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief one run of vn, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop< ((Enactor::Problem::FLAG & Mark_Predecessors) != 0) ? 1 : 0, 1, IterationT>(thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Enacts a vn computing on the specified graph. * @param[in] src Source node to start primitive. * \return cudaError_t error message(s), if any */ cudaError_t Enact(VertexT *srcs) { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU VN Done.", this->flag & Debug); return retval; } /** @} */ }; } // namespace vn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "core/providers/cuda/cu_inc/common.cuh" #include "transpose_impl.h" namespace onnxruntime { namespace cuda { constexpr unsigned int TILE_DIM = 16; template <typename T> __global__ void Transpose3DKernel(const TArray<int64_t> input_shape, const TArray<int64_t> input_strides, const T* input_data, T* output_data) { __shared__ T tile[TILE_DIM * (TILE_DIM + 1)]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; tile[threadIdx.y * TILE_DIM + threadIdx.x] = input_data[blockIdx.z * input_strides[0] + y * input_shape[2] + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; output_data[blockIdx.z * input_strides[0] + y * input_shape[1] + x] = tile[threadIdx.x * TILE_DIM + threadIdx.y]; } bool CanDoTranspose3D(const cudaDeviceProp& prop, int32_t rank, const std::vector<int64_t>& input_dims, const std::vector<size_t>& permutations, dim3& grid_size, dim3& block_size) { if (rank == 3 && // permutation is done in the last two dimensions. permutations[rank - 2] == (rank - 1) && permutations[rank - 1] == (rank - 2) && // the last two dimensions are aligned with TILE_DIM. input_dims[rank - 2] % TILE_DIM == 0 && input_dims[rank - 1] % TILE_DIM == 0) { int grid_size_x = static_cast<int>(input_dims[2] / TILE_DIM); int grid_size_y = static_cast<int>(input_dims[1] / TILE_DIM); int grid_size_z = static_cast<int>(input_dims[0]); if (grid_size_x <= prop.maxGridSize[0] && grid_size_y <= prop.maxGridSize[1] && grid_size_z <= prop.maxGridSize[2]) { block_size = dim3(TILE_DIM, TILE_DIM); grid_size = dim3(static_cast<unsigned int>(grid_size_x), static_cast<unsigned int>(grid_size_y), static_cast<unsigned int>(grid_size_z)); return true; } else { return false; } } return false; } Status Transpose3DImpl(cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, void* output_data, int64_t N, const dim3& grid_size, const dim3& block_size) { switch (element_size) { case sizeof(int8_t): Transpose3DKernel<int8_t><<<grid_size, block_size, 0, stream>>>( input_shape, input_strides, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data)); break; case sizeof(int16_t): Transpose3DKernel<int16_t><<<grid_size, block_size, 0, stream>>>( input_shape, input_strides, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data)); break; case sizeof(int32_t): Transpose3DKernel<int32_t><<<grid_size, block_size, 0, stream>>>( input_shape, input_strides, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data)); break; case sizeof(int64_t): Transpose3DKernel<int64_t><<<grid_size, block_size, 0, stream>>>( input_shape, input_strides, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data)); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ", element_size); } return Status::OK(); } template <int element_size> __global__ void Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim( const TArray<int64_t> input_strides, const void* input_data, const TArray<int64_t> output_strides, void* output_data, int64_t input_shape_2, CUDA_LONG N) { // coordinates will be: [d0, d1, d2, d3] CUDA_LONG d0 = blockIdx.z; CUDA_LONG d1 = blockIdx.y; CUDA_LONG d2 = threadIdx.y + blockIdx.x * blockDim.y; CUDA_LONG d3 = threadIdx.x; CUDA_LONG input_index = (d0 * input_strides[0] + d1 * input_strides[1] + d2 * input_strides[2]) / (4 * sizeof(int) / element_size) + d3 * input_strides[3]; CUDA_LONG output_index = (d0 * output_strides[0] + d1 * output_strides[1] + d2 * output_strides[2]) / (4 * sizeof(int) / element_size) + d3 * output_strides[3]; const int4* v_input = reinterpret_cast<const int4*>(input_data); int4* v_output = reinterpret_cast<int4*>(output_data); if (input_index < N && output_index < N && d2 < input_shape_2) { v_output[output_index] = v_input[input_index]; } } bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const cudaDeviceProp& prop, size_t element_size, int32_t rank, const std::vector<int64_t>& input_dims, const std::vector<size_t>& permutations, dim3& grid_size, dim3& block_size) { if (rank == 4 && // the permutations is not on the last dimension. permutations[3] == 3) { unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data. // dims[3]: block.x // dims[2]: block.y + grid.x // dims[1]: grid.y // dims[0]: grid.z if (input_dims[3] / num_elements_per_thread <= prop.maxThreadsPerBlock && (input_dims[3] % num_elements_per_thread) == 0 && input_dims[1] <= prop.maxGridSize[1] && input_dims[0] <= prop.maxGridSize[2]) { // There are 2 constrains when luanching the kernels // 1. block_size_x * block_size_y <= prop.maxThreadsPerBlock // 2. block_size_y * num_block_ext >= input_dims[2] int64_t block_size_x = input_dims[3] / num_elements_per_thread; int64_t max_block_size_y = prop.maxThreadsPerBlock / block_size_x; int64_t block_size_y = min(input_dims[2], max_block_size_y); int64_t num_block_ext = CeilDiv(input_dims[2], block_size_y); if (num_block_ext <= prop.maxGridSize[0]) { block_size = dim3(static_cast<unsigned int>(block_size_x), static_cast<unsigned int>(block_size_y)); grid_size = dim3(static_cast<unsigned int>(num_block_ext), static_cast<unsigned int>(input_dims[1]), static_cast<unsigned int>(input_dims[0])); return true; } else { return false; } } } return false; } Status Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim( cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size) { unsigned int num_elements_per_thread = 4 * sizeof(int) / static_cast<unsigned int>(element_size); // int4 is used in the kernel to access data. switch (element_size) { case sizeof(int8_t): Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int8_t)> <<<grid_size, block_size, 0, stream>>>( input_strides, input_data, output_strides, output_data, input_shape[2], N / num_elements_per_thread); break; case sizeof(int16_t): Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int16_t)> <<<grid_size, block_size, 0, stream>>>( input_strides, input_data, output_strides, output_data, input_shape[2], N / num_elements_per_thread); break; case sizeof(int32_t): Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int32_t)> <<<grid_size, block_size, 0, stream>>>( input_strides, input_data, output_strides, output_data, input_shape[2], N / num_elements_per_thread); break; case sizeof(int64_t): Transpose4DKernelParallelizeMultipleElementsPerThreadInInnermostDim<sizeof(int64_t)> <<<grid_size, block_size, 0, stream>>>( input_strides, input_data, output_strides, output_data, input_shape[2], N / num_elements_per_thread); break; default: // User will not hit this as this kernel is for fixed element size tensors only return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ", element_size); } return Status::OK(); } __global__ void Transpose4DKernelParallelizeOneElementPerThread( const TArray<int64_t> input_strides, const int8_t* input_data, const TArray<int64_t> output_strides, int8_t* output_data, size_t element_size, int64_t input_shape_2, CUDA_LONG N) { // coordinates will be: [d0, d1, d2, d3] CUDA_LONG d0 = blockIdx.z; CUDA_LONG d1 = blockIdx.y; CUDA_LONG d2 = threadIdx.y + blockIdx.x * blockDim.y; CUDA_LONG d3 = threadIdx.x; CUDA_LONG input_index = d0 * input_strides[0] + d1 * input_strides[1] + d2 * input_strides[2] + d3 * input_strides[3]; CUDA_LONG output_index = d0 * output_strides[0] + d1 * output_strides[1] + d2 * output_strides[2] + d3 * output_strides[3]; if (input_index < N && output_index < N && d2 < input_shape_2) { const int8_t* input_data_to_be_copied = input_data + (input_index * element_size); int8_t* output_data_to_be_copied = output_data + (output_index * element_size); // copy over the bytes for (size_t iter = 0; iter < element_size; ++iter) { *output_data_to_be_copied++ = *input_data_to_be_copied++; } } } bool CanDoTranspose4DParallelizeOneElementPerThread(const cudaDeviceProp& prop, size_t element_size, int32_t rank, const std::vector<int64_t>& input_dims, const std::vector<size_t>& permutations, dim3& grid_size, dim3& block_size) { if (rank == 4) { // dims[3]: block.x // dims[2]: block.y + grid.x // dims[1]: grid.y // dims[0]: grid.z if (input_dims[3] <= prop.maxThreadsPerBlock && input_dims[1] <= prop.maxGridSize[1] && input_dims[0] <= prop.maxGridSize[2]) { // There are 2 constrains when luanching the kernels // 1. block_size_x * block_size_y <= prop.maxThreadsPerBlock // 2. block_size_y * num_block_ext >= input_dims[2] int64_t block_size_x = input_dims[3]; int64_t max_block_size_y = prop.maxThreadsPerBlock / block_size_x; int64_t block_size_y = min(input_dims[2], max_block_size_y); int64_t num_block_ext = CeilDiv(input_dims[2], block_size_y); if (num_block_ext <= prop.maxGridSize[0]) { block_size = dim3(static_cast<unsigned int>(block_size_x), static_cast<unsigned int>(block_size_y)); grid_size = dim3(static_cast<unsigned int>(num_block_ext), static_cast<unsigned int>(input_dims[1]), static_cast<unsigned int>(input_dims[0])); return true; } else { return false; } } } return false; } Status Transpose4DParallelizeOneElementPerThread( cudaStream_t stream, size_t element_size, const TArray<int64_t>& input_shape, const TArray<int64_t>& input_strides, const void* input_data, const TArray<int64_t>& output_strides, void* output_data, int N, const dim3& grid_size, const dim3& block_size) { if (element_size != sizeof(int8_t) && element_size != sizeof(int16_t) && element_size != sizeof(int32_t) && element_size != sizeof(int64_t)) { // User will not hit this as this kernel is for fixed element size tensors only return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ", element_size); } Transpose4DKernelParallelizeOneElementPerThread<<<grid_size, block_size, 0, stream>>>( input_strides, reinterpret_cast<const int8_t*>(input_data), output_strides, reinterpret_cast<int8_t*>(output_data), element_size, input_shape[2], N); return Status::OK(); } template <typename T> __global__ void TransposeKernel(int32_t shape_rank, const TArray<int64_t> input_strides, const T* input_data, const TArray<fast_divmod> output_strides, T* output_data, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; CUDA_LONG output_index = id; #pragma unroll for (auto dim = 0; dim < input_strides.Capacity(); ++dim) { if (dim >= shape_rank) { break; } int out_coord, r; output_strides[dim].divmod(output_index, out_coord, r); output_index = r; input_index += input_strides[dim] * out_coord; } output_data[id] = input_data[input_index]; } Status TransposeImpl(cudaStream_t stream, size_t element_size, int32_t shape_rank, const TArray<int64_t>& input_strides, const void* input_data, const TArray<fast_divmod>& fdm_output_strides, void* output_data, int N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); switch (element_size) { case sizeof(int8_t): TransposeKernel<int8_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_strides, reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data), fdm_output_strides, reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data), N); break; case sizeof(int16_t): TransposeKernel<int16_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_strides, reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data), fdm_output_strides, reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data), N); break; case sizeof(int32_t): TransposeKernel<int32_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_strides, reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data), fdm_output_strides, reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data), N); break; case sizeof(int64_t): TransposeKernel<int64_t><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_strides, reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data), fdm_output_strides, reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data), N); break; default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for transpose on CUDA. Element size was ", element_size); } return Status::OK(); } } // namespace cuda } // namespace onnxruntime
the_stack
#include "_reg_nmi.h" #include "_reg_nmi_gpu.h" #include "_reg_nmi_kernels.cu" /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_nmi_gpu::reg_nmi_gpu(): reg_nmi::reg_nmi() { this->forwardJointHistogramLog_device=NULL; // this->backwardJointHistogramLog_device=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu constructor called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ reg_nmi_gpu::~reg_nmi_gpu() { this->ClearHistogram(); #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu destructor called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_nmi_gpu::ClearHistogram() { if(this->forwardJointHistogramLog_device!=NULL){ cudaFree(this->forwardJointHistogramLog_device); } this->forwardJointHistogramLog_device=NULL; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu::ClearHistogram() called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_nmi_gpu::InitialiseMeasure(nifti_image *refImgPtr, nifti_image *floImgPtr, int *maskRefPtr, int activeVoxNum, nifti_image *warFloImgPtr, nifti_image *warFloGraPtr, nifti_image *forVoxBasedGraPtr, cudaArray **refDevicePtr, cudaArray **floDevicePtr, int **refMskDevicePtr, float **warFloDevicePtr, float4 **warFloGradDevicePtr, float4 **forVoxBasedGraDevicePtr) { this->ClearHistogram(); reg_nmi::InitialiseMeasure(refImgPtr, floImgPtr, maskRefPtr, warFloImgPtr, warFloGraPtr, forVoxBasedGraPtr); // Check if a symmetric measure is required if(this->isSymmetric){ fprintf(stderr,"[NiftyReg ERROR] reg_nmi_gpu::InitialiseMeasure\n"); fprintf(stderr,"[NiftyReg ERROR] Symmetric scheme is not yet supported on the GPU\n"); reg_exit(1); } // Check if the input images have multiple timepoints if(this->referenceTimePoint>1 || this->floatingImagePointer->nt>1){ fprintf(stderr,"[NiftyReg ERROR] reg_nmi_gpu::InitialiseMeasure\n"); fprintf(stderr,"[NiftyReg ERROR] This class can only be \n"); reg_exit(1); } // Check that the input image are of type float if(this->referenceImagePointer->datatype!=NIFTI_TYPE_FLOAT32 || this->warpedFloatingImagePointer->datatype!=NIFTI_TYPE_FLOAT32){ fprintf(stderr,"[NiftyReg ERROR] reg_nmi_gpu::InitialiseMeasure\n"); fprintf(stderr,"[NiftyReg ERROR] This class can only be \n"); reg_exit(1); } // Bind the required pointers this->referenceDevicePointer = *refDevicePtr; this->floatingDevicePointer = *floDevicePtr; this->referenceMaskDevicePointer = *refMskDevicePtr; this->activeVoxeNumber = activeVoxNum; this->warpedFloatingDevicePointer = *warFloDevicePtr; this->warpedFloatingGradientDevicePointer = *warFloGradDevicePtr; this->forwardVoxelBasedGradientDevicePointer = *forVoxBasedGraDevicePtr; // The reference and floating images have to be updated on the device if(cudaCommon_transferNiftiToArrayOnDevice<float> (&this->referenceDevicePointer, this->referenceImagePointer)){ fprintf(stderr,"[NiftyReg ERROR] reg_nmi_gpu::InitialiseMeasure\n"); printf("[NiftyReg ERROR] Error when transfering the reference image.\n"); reg_exit(1); } if(cudaCommon_transferNiftiToArrayOnDevice<float> (&this->floatingDevicePointer, this->floatingImagePointer)){ fprintf(stderr,"[NiftyReg ERROR] reg_nmi_gpu::InitialiseMeasure\n"); printf("[NiftyReg ERROR] Error when transfering the floating image.\n"); reg_exit(1); } // Allocate the required joint histogram on the GPU cudaMalloc(&this->forwardJointHistogramLog_device, this->totalBinNumber[0]*sizeof(float)); #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu::InitialiseMeasure called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ double reg_nmi_gpu::GetSimilarityMeasureValue() { // The NMI computation is performed into the host for now // The relevant images have to be transfered from the device to the host cudaMemcpy(this->warpedFloatingImagePointer->data, this->warpedFloatingDevicePointer, this->warpedFloatingImagePointer->nvox * this->warpedFloatingImagePointer->nbyper, cudaMemcpyDeviceToHost ); reg_getNMIValue<float> (this->referenceImagePointer, this->warpedFloatingImagePointer, this->activeTimePoint, this->referenceBinNumber, this->floatingBinNumber, this->totalBinNumber, this->forwardJointHistogramLog, this->forwardJointHistogramPro, this->forwardEntropyValues, this->referenceMaskPointer ); double nmi_value=0.; nmi_value += (this->forwardEntropyValues[0][0] + this->forwardEntropyValues[0][1] ) / this->forwardEntropyValues[0][2]; #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu::GetSimilarityMeasureValue called\n"); #endif return nmi_value; } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /// Called when we only have one target and one source image void reg_getVoxelBasedNMIGradient_gpu(nifti_image *referenceImage, cudaArray **referenceImageArray_d, float **warpedImageArray_d, float4 **warpedGradientArray_d, float **logJointHistogram_d, float4 **voxelNMIGradientArray_d, int **mask_d, int activeVoxelNumber, double *entropies, int refBinning, int floBinning) { // Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0); const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 imageSize=make_int3(referenceImage->nx,referenceImage->ny,referenceImage->nz); const int binNumber = refBinning*floBinning+refBinning+floBinning; const float normalisedJE=(float)(entropies[2]*entropies[3]); const float NMI = (float)((entropies[0]+entropies[1])/entropies[2]); // Bind Symbols NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ImageSize,&imageSize,sizeof(int3))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstTargetBin,&refBinning,sizeof(int))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_firstResultBin,&floBinning,sizeof(int))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NormalisedJE,&normalisedJE,sizeof(float))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NMI,&NMI,sizeof(float))); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))); // Texture bindingcurrentFloating //Bind target image array to a 3D texture firstreferenceImageTexture.normalized = true; firstreferenceImageTexture.filterMode = cudaFilterModeLinear; firstreferenceImageTexture.addressMode[0] = cudaAddressModeWrap; firstreferenceImageTexture.addressMode[1] = cudaAddressModeWrap; firstreferenceImageTexture.addressMode[2] = cudaAddressModeWrap; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); NR_CUDA_SAFE_CALL(cudaBindTextureToArray(firstreferenceImageTexture, *referenceImageArray_d, channelDesc)) NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstwarpedImageTexture, *warpedImageArray_d, voxelNumber*sizeof(float))); NR_CUDA_SAFE_CALL(cudaBindTexture(0, firstwarpedImageGradientTexture, *warpedGradientArray_d, voxelNumber*sizeof(float4))); NR_CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float))); NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))); NR_CUDA_SAFE_CALL(cudaMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4))); if(referenceImage->nz>1){ const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW3D = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)NR_BLOCK->Block_reg_getVoxelBasedNMIGradientUsingPW3D)); dim3 B1(NR_BLOCK->Block_reg_getVoxelBasedNMIGradientUsingPW3D,1,1); dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW3D,Grid_reg_getVoxelBasedNMIGradientUsingPW3D,1); reg_getVoxelBasedNMIGradientUsingPW3D_kernel <<< G1, B1 >>> (*voxelNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW2D = (unsigned int)ceil(sqrtf((float)activeVoxelNumber/(float)NR_BLOCK->Block_reg_getVoxelBasedNMIGradientUsingPW2D)); dim3 B1(NR_BLOCK->Block_reg_getVoxelBasedNMIGradientUsingPW2D,1,1); dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW2D,Grid_reg_getVoxelBasedNMIGradientUsingPW2D,1); reg_getVoxelBasedNMIGradientUsingPW2D_kernel <<< G1, B1 >>> (*voxelNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstreferenceImageTexture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstwarpedImageTexture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(firstwarpedImageGradientTexture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(histogramTexture)); NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)); } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ void reg_nmi_gpu::GetVoxelBasedSimilarityMeasureGradient() { // The latest joint histogram is transfered onto the GPU float *temp=(float *)malloc(this->totalBinNumber[0]*sizeof(float)); for(unsigned short i=0;i<this->totalBinNumber[0]; ++i) temp[i]=static_cast<float>(this->forwardJointHistogramLog[0][i]); cudaMemcpy(this->forwardJointHistogramLog_device, temp, this->totalBinNumber[0]*sizeof(float), cudaMemcpyHostToDevice); free(temp); // THe gradient of the NMI is computed on the GPU reg_getVoxelBasedNMIGradient_gpu(this->referenceImagePointer, &this->referenceDevicePointer, &this->warpedFloatingDevicePointer, &this->warpedFloatingGradientDevicePointer, &this->forwardJointHistogramLog_device, &this->forwardVoxelBasedGradientDevicePointer, &this->referenceMaskDevicePointer, this->activeVoxeNumber, this->forwardEntropyValues[0], this->referenceBinNumber[0], this->floatingBinNumber[0]); #ifndef NDEBUG printf("[NiftyReg DEBUG] reg_nmi_gpu::GetVoxelBasedSimilarityMeasureGradient called\n"); #endif } /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ /* \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/ */ #endif
the_stack
#ifndef CONV_KERNEL_CU #define CONV_KERNEL_CU #include <stdint.h> /* #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif */ #define MIN(a, b) ((a) < (b) ? (a) : (b) ) #define MAX(a, b) ((a) < (b) ? (b) : (a) ) //Must be the same size as a ptr. We can't use unsigned long as on Windows 64 //bit, it is 32 bit. const uintptr_t COALESCED_ALIGN = 0xFFFFFFFFFFFFFF00; // zero-out the trailing bits of pointers __device__ void load_to_shared(float * dst, const float * src, const int thread_id, int nb_thread, const int N, const bool flipped=false){ if (nb_thread < 64) { if(flipped) //TODO very slow on device before 1.3. // make access to kern sequential and access to d_kern flipped. for(int i=thread_id;i<N;i+=nb_thread) dst[i]=src[N - 1 - i]; //dst[N-1-i]=src[i]; else { for(int i = thread_id; i < N; i += nb_thread) { dst[i] = src[i]; } } } else { nb_thread = nb_thread & 0xFFFFFFE0; //make nb_thread a multiple of 32 // Global memory: // <--------------------------------------> // A A A A A // points of 256-byte alignment // dddddddddddddddddddddd // layout of src in global memory if (thread_id < nb_thread) { const float * my_src_ptr = (const float *)( ((uintptr_t)src) & COALESCED_ALIGN); my_src_ptr += thread_id; while (my_src_ptr < src + N) { if (my_src_ptr >= src) { int i = my_src_ptr - src; if (flipped) { dst[N - 1 - i] = *my_src_ptr; } else { dst[i] = *my_src_ptr; } } my_src_ptr += nb_thread; } } } } /* * We load from global memory to shared memory. The outer if is optimized away at compilation. */ __device__ void load_to_shared(float * dst, const float * src, const int thread_id, int nb_thread, const int nb_col, const int nb_row, const int stride_col, const int stride_row, const bool flipped=false, const bool c_contiguous=true){ if (c_contiguous) { load_to_shared(dst, src, thread_id, nb_thread, nb_col*nb_row, flipped); } else { if (flipped) { int LAST = nb_row * nb_col - 1; for(int i=thread_id;i<nb_row*nb_col;i+=nb_thread) { // XXX // THIS IS SLOW - use whatever blocks are in the the // threads to avoid division and modulo dst[LAST - i] \ = src[(i/nb_col)*stride_row+(i%nb_col)*stride_col]; } } else { for(int i=thread_id;i<nb_row*nb_col;i+=nb_thread) { // XXX // THIS IS SLOW - use whatever blocks are in the the // threads to avoid division and modulo dst[i]=src[i/nb_col*stride_row+i%nb_col*stride_col]; } } } } __device__ void fill(float * dst, int N, float value, int thread_id, int nb_thread){ for(int i=thread_id;i<N;i+=nb_thread) dst[i]=value; } /* * We load from global memory to shared memory. The outer if is optimized away at compilation. * We put the image at the center of another one. Usefull to padd an image with 0. */ __device__ void load_padded_col_to_shared(float * dst, const float * src, const int thread_id, const int nb_thread, const int nb_col, const int nb_row, const int stride_col, const int stride_row, const int wid_pad, const bool c_contiguous=true){ if(c_contiguous){//flipped==false for(int i=thread_id;i<nb_col*nb_row;i+=nb_thread){ int col=i%nb_col; int row=i/nb_col; dst[row*(nb_col+2*wid_pad)+col+wid_pad]=src[i]; } }else{ for(int i=thread_id;i<nb_row*nb_col;i+=nb_thread){ int col=i%nb_col; int row=i/nb_col; dst[row*(nb_col+2*wid_pad)+col+wid_pad]=src[row*stride_row+col*stride_col]; } } } template<int i> __device__ float convolutionRowNoFlip(const float *data, const float *kern){ return convolutionRowNoFlip<i/2>(data, kern)+ convolutionRowNoFlip<(i+1)/2>(data+i/2, kern+i/2) ; //return data[i-1] * kern[i-1] + convolutionRowNoFlip<i - 1>(data,kern); } template<> __device__ float convolutionRowNoFlip<1>(const float *data, const float *kern){ return data[0]*kern[0]; } template<> __device__ float convolutionRowNoFlip<0>(const float *data, const float *kern){ return 0; } template<int KERN_WIDTH> __device__ void convolutionRowNoFlip(float& sum, const float *data, const float *kern, const int kern_wid){ if(KERN_WIDTH>0) sum+=convolutionRowNoFlip<KERN_WIDTH>(data,kern); else #pragma unroll 8 for (int col=0; col < kern_wid; col++) {//loop over col sum+=data[col]*kern[col]; } } template<bool accumulate> __device__ void store_or_accumulate(float& dst,const float value ){ if(accumulate){ dst += value; }else dst = value; } /** * Implementation of the valid convolution that keep the full image and the full kernel in shared memory * Don't implement the stack. * each thread compute only one value for the output if split is false * thread block size=out_wid, out_len(or less then out_len if split is true) * grid block size=batch_id, nkern * dynamic shared memory: img_len*img_wid+kern_len*kern_wid * * nkern: the number of kernel, used to compute the output image to store the result * nstack: the size of the stack, used to compute the image to load. * template flipped_kern: if true, we "flip" the kernel as in a real convolution, else we don't * template split: if true, each thread computes more than 1 output pixel * When true, allow for output image bigger then 512 pixel. * Use more registers. */ template<bool flipped_kern, int KERN_WIDTH, bool split> __global__ void conv_patch( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len - kern_len + 1; out_wid = img_wid - kern_wid + 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; __shared__ int batch_id, kern_id; batch_id = blockIdx.x; kern_id = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int out_col = tx;//output col const int thread_id = ty*blockDim.x + tx; float * d_img=&s_data[0];//size of [IMAGE_LEN * IMAGE_WID]; float * d_kern=&s_data[img_len * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; kern+=kern_len*kern_wid*nstack*kern_id; img+=img_len*img_wid*(nstack*batch_id); load_to_shared(d_img, img, thread_id,nb_thread_id,img_len*img_wid); load_to_shared(d_kern, kern, thread_id,nb_thread_id,kern_len*kern_wid,flipped_kern); __syncthreads(); if(!split){ int out_row = ty;//output row float sum = 0.0f; for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } out[batch_id*out_wid*out_len*nkern+//the good batch blockIdx.y*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; }else{ for(int out_row=ty;out_row<out_len;out_row+=blockDim.y){ float sum = 0.0f; for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } out[batch_id*out_wid*out_len*nkern+//the good batch kern_id*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; } } } /** * As conv_patch, but implement the stack in the kernel. * I keep it separated from conv_patch as we take more registers and this could lower the occupency. * Implementation of the valid convolution that keep the full image and the full kernel in shared memory * each thread compute only one value for the output if split==false else it compute more than 1 values * thread block size=out_wid, out_len/X (X is any number, optimized value is ceil(out_len/N) * grid block size=batch_id, nkern * dynamic shared memory: img_len*img_wid+(preload_full_kern?KERNEL_LEN:1)*kern_wid * * nkern: the number of kernel, used to compute the output image to store the result * nstack: the size of the stack, used to compute the image to load. * dx: patch stride rows(1 for normal convolution) * dy: patch stride cols(1 for normal convolution) * template flipped_kern: if true, we "flip" the kernel as in a real convolution, else we don't * template accumulate: if true, we add the result, else we override the result * template KERN_WIDTH: if 0, will work for any kern_wid, else it specialyse to this kern_wid as an optimization * template img_c_contiguous_2d: if true, the img have are collon and row contiguous * template kern_c_contiguous_2d: if true, the kernel have are collon and row contiguous * template split: if true, each thread generate more than 1 output pixel, but use more registers. * template preload_full_kern: if true, we load the full kernel in shared memory, else, we load 1 row at a time. * template subsample: if false, remove some computation needed when dx or dy!=1. */ template<bool flipped_kern, bool accumulate, int KERN_WIDTH, bool img_c_contiguous_2d, bool kern_c_contiguous_2d, bool split, bool preload_full_kern, bool subsample> __global__ void conv_patch_stack( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int out_len, int out_wid, int nkern, int nstack, int img_stride_col,int img_stride_row, int img_stride_stack, int img_stride_batch, int kern_stride_col, int kern_stride_row, int kern_stride_stack, int kern_stride_nkern, int dx, int dy) { int __shared__ nb_thread_id; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; int batch_id = blockIdx.x; int kern_id = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int out_col = tx;//output col int out_row = ty;//output row const int thread_id = out_row*out_wid + out_col; float * d_img=&s_data[0];//size of [IMAGE_LEN * IMAGE_WID]; float * d_kern=&s_data[img_len * img_wid];//size of [(preload_full_kern?KERNEL_LEN:1) * KERNEL_WID]; if(!split){ kern+=kern_stride_nkern*kern_id;//the good nkern img+=img_stride_batch*batch_id;//the good batch float sum = 0.0f; for (int stack = 0;stack<nstack;stack++,kern+=kern_stride_stack, img+=img_stride_stack){ load_to_shared(d_img,img,thread_id,nb_thread_id,img_wid,img_len, img_stride_col, img_stride_row, false, img_c_contiguous_2d); if(preload_full_kern) load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, flipped_kern, kern_c_contiguous_2d); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row if(!preload_full_kern){ __syncthreads(); int idx2; if(flipped_kern) idx2=(kern_len-row-1)*kern_stride_row; else idx2=(row)*kern_stride_row; load_to_shared(d_kern, kern+idx2, thread_id, nb_thread_id, kern_wid,1, kern_stride_col, kern_stride_row, flipped_kern, kern_c_contiguous_2d); __syncthreads(); } const float* idx_kern; if(preload_full_kern) idx_kern=&d_kern[row*kern_wid]; else idx_kern=d_kern; const float* idx_in; if(subsample) idx_in=&d_img[(row+out_row*dx)*img_wid+out_col*dy]; else idx_in=&d_img[(row+out_row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } __syncthreads(); // ensure calculations have completed before any thread starts changing the shared memory } store_or_accumulate<accumulate>( out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*kern_id+//the output image out_row*out_wid+out_col],sum); }else{ float __shared__ *kern_, *img_; int __shared__ out_len_max; kern_=kern+kern_stride_nkern*kern_id;//the good nkern img_=img+img_stride_batch*batch_id;//the good batch //out_len_max must by higher then out_len as we need all thread when we load the image as the blockDim.y is not always a multiple of out_len. out_len_max = (out_len/blockDim.y+(out_len%blockDim.y==0?0:1))*blockDim.y; //TODO: inverse the out_row and stack loop to don't load the date as frequently! //TODO: do this happen elsewhere? for(;out_row<out_len_max;out_row+=blockDim.y){ float sum = 0.0f; for (int stack = 0;stack<nstack;stack++){ //TODO: load only the part of the image needed or put the partial result in shared memory int idx1=img_stride_stack*stack; load_to_shared(d_img,img_+idx1,thread_id,nb_thread_id,img_wid,img_len, img_stride_col, img_stride_row, false, img_c_contiguous_2d); if(preload_full_kern){ int idx2=kern_stride_stack*stack; load_to_shared(d_kern, kern_+idx2, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, flipped_kern, kern_c_contiguous_2d); } __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row if(!preload_full_kern){ __syncthreads(); int idx2=kern_stride_stack*stack; if(flipped_kern) idx2+=(kern_len-row-1)*kern_stride_row; else idx2+=(row)*kern_stride_row; load_to_shared(d_kern, kern_+idx2, thread_id, nb_thread_id, kern_wid,1, kern_stride_col, kern_stride_row, flipped_kern, kern_c_contiguous_2d); __syncthreads(); } const float* idx_kern; if(preload_full_kern) idx_kern=&d_kern[row*kern_wid]; else idx_kern=d_kern; const float* idx_in; if(subsample) idx_in=&d_img[(row+out_row*dx)*img_wid+out_col*dy]; else idx_in=&d_img[(row+out_row)*img_wid+out_col]; //if needed as on Fermi as reading out of bound index from shared memory generate an error. //Not needed on generation before as they worked anyway. Removing the if generate the good code //as we store the result of only the good thread. //This was with nvcc 3.0 on an GTX470 card. if(out_row<out_len) convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } __syncthreads(); // ensure calculations have completed before any thread starts changing the shared memory } if(out_row<out_len) store_or_accumulate<accumulate>( out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*kern_id+//the output image out_row*out_wid+out_col],sum); } } } /** * As conv_patch_stack, but kern_len thread for each output pixel * I keep it separated as use more register. * Implementation of the valid convolution that keep the full image and the full kernel in shared memory * thread block size=out_wid, out_len, ceil_intdiv(kern_len/nb_split) * grid block size=batch_id, nkern * dynamic shared memory: img_len*img_wid+kern_wid*(preload_full_kern?kern_len:thread_z)+out_size*thread_z * * nkern: the number of kernel, used to compute the output image to store the result * nstack: the size of the stack, used to compute the image to load. * template flipped_kern: if true, we "flip" the kernel as in a real convolution, else we don't * template img_contiguous: if true, the img have are collon and row contiguous * template preload_full_kern: work only when split is true. We don't load the full kernel at once, but we load ceil_intdiv(kern_len/nb_split) kernel row at a time */ template<bool flipped_kern, int KERN_WIDTH, bool c_contiguous, bool split, bool preload_full_kern> __global__ void conv_patch_stack_reduce( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack, int img_stride_col,int img_stride_row, int img_stride_stack, int img_stride_batch, int kern_stride_col, int kern_stride_row, int kern_stride_stack, int kern_stride_nkern) { //int __shared__ out_len, out_wid, nb_thread_id; //out_len = img_len - kern_len + 1; //out_wid = img_wid - kern_wid + 1; const int out_wid = blockDim.x; const int out_len = blockDim.y; const int nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; int batch_id = blockIdx.x; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int out_col = tx;//output col int out_row = ty;//output row const int thread_id = tz*blockDim.y*blockDim.x+ty*blockDim.x+tx; //d_img size [IMAGE_LEN * IMAGE_WID]; float * d_img=&s_data[0]; //d_kern size[(preload_full_kern?KERNEL_LEN:blockDim.z) * KERNEL_WID] float * d_kern=&s_data[img_len * img_wid]; //d_reduce size [n_threads] //N.B. this overlaps with d_img and d_kern! float * d_reduce=&s_data[0]; float sum = 0.0f; kern+=kern_stride_nkern*blockIdx.y;//the good nkern img+=img_stride_batch*batch_id;//the good batch for (int stack = 0;stack<nstack;stack++,kern+=kern_stride_stack, img+=img_stride_stack){ __syncthreads(); load_to_shared(d_img, img, thread_id, nb_thread_id, img_wid, img_len, img_stride_col, img_stride_row, false, c_contiguous); if(split && ! preload_full_kern){ for(int first_row=0;first_row<kern_len;first_row+=blockDim.z){ //N.B. - Jan 30, 2011 with CUDA 3.2 I found that without the explicit cast to // (int)blockDim.z, idx3 would sometimes be negative. I'm rusty on my signed vs. unsigned // details, but that seemed really weird. tricky bug to find too. int idx3 = flipped_kern ? max((kern_len - (int)blockDim.z - first_row),0) : first_row; int len3 = min(blockDim.z, kern_len - first_row); __syncthreads(); load_to_shared(d_kern, kern+idx3*kern_stride_row, thread_id, nb_thread_id, kern_wid, len3, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); const float* idx_kern=&d_kern[tz*kern_wid]; const float* idx_in=&d_img[(first_row+tz+out_row)*img_wid+out_col]; float sum2 = 0; if(tz<len3) convolutionRowNoFlip<KERN_WIDTH>(sum2,idx_in,idx_kern,kern_wid); sum+=sum2; } }else if(split){ load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_wid, kern_len, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); for(int row=tz;row<kern_len;row+=blockDim.z){ const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } }else{ int row = tz;//The row of the kernel. const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid+out_col]; load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_wid, kern_len, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } __syncthreads(); // ensure calculations have completed before any thread starts changing the shared memory } //reduce no sync because previous loop ends with sync d_reduce[thread_id]=sum; __syncthreads(); if(thread_id<out_len*out_wid){ // blockDim.x==out_wid, blockDim.y==out_len //sum=0; for(int i=1;i<blockDim.z;i++){ sum+=d_reduce[thread_id+i*out_wid*out_len]; } out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*blockIdx.y+//the output image out_row*out_wid+out_col] = sum; } } /** * WORK FOR IMAGE THAT DON'T FIT IN SHARED MEMORY * we store kern_len row of the image and the full kernel in the shared memory * each thread compute only one value for the output * Don't implement the stack and nkern in the kernel. * thread block size=out_wid * grid block size=out_len,batch_id * dynamic shared memory: kern_len*img_wid+kern_len*kern_wid * Diff with conv_patch: don't store the full image in the shared memory. * I.E. work for bigger image then conv_patch<split=true,...>. */ template<int KERN_WIDTH, bool c_contiguous> __global__ void conv_rows( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack, int img_stride_col, int img_stride_row, int img_stride_stack, int img_stride_batch, int kern_stride_col, int kern_stride_row, int kern_stride_stack, int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id, batch_id, kern_id; float __shared__ *d_img, *d_kern; out_len = img_len - kern_len + 1; out_wid = img_wid - kern_wid + 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; batch_id= blockIdx.y/nkern; kern_id = blockIdx.y%nkern; extern __shared__ float s_data[]; const int out_col = threadIdx.x;//output col const int out_row = blockIdx.x;;//output row const int thread_id = threadIdx.x; d_img=&s_data[0];//size of [KERN_LEN * IMAGE_WID]; d_kern=&s_data[kern_len * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; img+=img_stride_batch*batch_id;//selection the good image from the batch img+=out_row*img_stride_row;//select the good top row. kern+=kern_stride_nkern*kern_id;//the good nkern load_to_shared(d_img,img,thread_id,nb_thread_id,img_wid,kern_len, img_stride_col, img_stride_row, false, c_contiguous); load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, true, c_contiguous); __syncthreads(); float sum = 0.0f; for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } out[batch_id*out_wid*out_len*nkern+//the good batch kern_id*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; } /** * WORK FOR IMAGE THAT DON'T FIT IN SHARED MEMORY * as conv_rows, but implement the stack. Separate as this use more register. * we store kern_len row of the image and the full kernel in the shared memory * each thread compute only one value for the output * thread block size=out_wid, block_len * grid block size=intceil(out_len/block_len),nb_batch*nb_kern * dynamic shared memory: (kern_len+block_len-1)*img_wid+kern_len*kern_wid * Diff with conv_patch: don't store the full image in the shared memory. * I.E. work for bigger image then conv_patch<split=true,...>. */ template<int KERN_WIDTH, bool c_contiguous> __global__ void conv_rows_stack( float* img, float* kern, float* out, const int img_len, const int img_wid, const int kern_len, const int kern_wid, const int nkern, const int nstack, const int img_stride_col, const int img_stride_row, const int img_stride_stack, const int img_stride_batch, const int kern_stride_col, const int kern_stride_row, const int kern_stride_stack, const int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id, batch_id, kern_id, nb_rows; float __shared__ *d_img, *d_kern; out_len = img_len - kern_len + 1; out_wid = img_wid - kern_wid + 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; batch_id= blockIdx.y/nkern; kern_id = blockIdx.y%nkern; nb_rows = blockDim.y; int rows_to_read = MIN( kern_len + nb_rows - 1, img_len - blockIdx.x * nb_rows); /** * Every thread ultimately computes one value in the output, at coordinates * out[ batch_id, kern_id, out_row, out_col] * * The batch_id and kern_id are packed into blockIdx.y. out_row and out_col * are the threadIdx.x and threadIdx.y. * * Every thread block deals only with one image, and one filter kernel. */ extern __shared__ float s_data[]; const int out_col = threadIdx.x;//output col const int out_row = blockIdx.x*blockDim.y+threadIdx.y;//output row const int shared_row = threadIdx.y; const int thread_id = threadIdx.y*blockDim.x+threadIdx.x; /* * The kernel works by looping over channels (aka colours, aka the stack). * On each iteration, a thread block loads one channel of all the image rows that * it needs to use, and one channel slice of one kernel. */ d_img=&s_data[0];//size of [(KERN_LEN+block_len-1) * IMAGE_WID]; d_kern=&s_data[(kern_len+nb_rows-1) * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; float sum = 0.0f; for (int stack = 0; stack < nstack; stack++){ int offset = img_stride_batch * batch_id + img_stride_stack * stack //blockIdx.x is which chunk of nb_rows this thread block deals with + img_stride_row * (blockIdx.x * nb_rows); load_to_shared( d_img, // dst img+offset, // src thread_id, // linear position in block nb_thread_id, // number of threads img_wid, // cols in image to read rows_to_read, // number of rows to read img_stride_col, // img[i, j, k, l] to img[i, j, k, l + 1] img_stride_row, // img[i, j, k, l] to img[i, j, k + 1, l] false, // flip while reading c_contiguous); offset = kern_stride_nkern * kern_id + kern_stride_stack * stack; load_to_shared(d_kern, kern+offset, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, true, c_contiguous); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+shared_row)*img_wid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum,idx_in,idx_kern,kern_wid); } __syncthreads();//to be sure all thread have finished before we modif the shared memory. } if (out_row < out_len) out[batch_id*out_wid*out_len*nkern+//the good batch kern_id*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; } /** * WORK FOR IMAGE THAT DON'T FIT IN SHARED MEMORY * as conv_rows_stack, but load only block_len of the image at a time and 1 or all kern row. * we store block_len row of the image(at a time) and one or all kernel row in the shared memory * each thread compute only one value for the output * thread block size=out_wid, block_len * grid block size=intceil(out_len/block_len),nb_batch*nb_kern * dynamic shared memory: block_len * img_wid+(preload_full_kern?kern_len:1)*kern_wid * Diff with conv_patch: don't store the full image and kernel in the shared memory. * I.E. work for bigger image then conv_patch<split=true,...>. */ template<int KERN_WIDTH, bool c_contiguous, bool preload_full_kern> __global__ void conv_rows_stack2( float* img, float* kern, float* out, const int img_len, const int img_wid, const int kern_len, const int kern_wid, const int nkern, const int nstack, const int img_stride_col, const int img_stride_row, const int img_stride_stack, const int img_stride_batch, const int kern_stride_col, const int kern_stride_row, const int kern_stride_stack, const int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id, batch_id, kern_id, nb_rows; float __shared__ *d_img, *d_kern; out_len = img_len - kern_len + 1; out_wid = img_wid - kern_wid + 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; batch_id= blockIdx.y/nkern; kern_id = blockIdx.y%nkern; nb_rows = blockDim.y; extern __shared__ float s_data[]; const int out_col = threadIdx.x;//output col const int out_row = blockIdx.x*blockDim.y+threadIdx.y;//output row const int shared_row = threadIdx.y; const int thread_id = threadIdx.y*blockDim.x+threadIdx.x; d_img=&s_data[0];//size of [nb_rows * IMAGE_WID]; d_kern=&s_data[nb_rows*img_wid];//size of [(preload_full_kern?KERNEL_LEN:1) * KERNEL_WID]; float sum = 0.0f; for (int stack = 0;stack<nstack;stack++){ int _idx2=img_stride_batch*batch_id+img_stride_stack*stack;//selection the good image from the batch and stack _idx2+=(blockIdx.x*nb_rows)*img_stride_row;//select the good top row for the block of threads __syncthreads(); load_to_shared(d_img,img+_idx2,thread_id,nb_thread_id,img_wid,nb_rows-1, img_stride_col, img_stride_row, false, c_contiguous); if(preload_full_kern) load_to_shared(d_kern, kern+kern_stride_nkern*kern_id+kern_stride_stack*stack, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, true, c_contiguous); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row __syncthreads(); if((blockIdx.x*nb_rows+row+nb_rows-1)<img_len){ int _idx1=img_stride_batch*batch_id+img_stride_stack*stack;//selection the good image from the batch and stack _idx1+=(blockIdx.x*nb_rows)*img_stride_row;//select the good top row for the block of threads _idx1+=(row+nb_rows-1)*img_stride_row;//the current last row load_to_shared(d_img+((row+nb_rows-1)%nb_rows)*img_wid, img+_idx1, thread_id, nb_thread_id, img_wid, 1, img_stride_col, img_stride_row, false, c_contiguous);//we use d_img as a circular buffer. } if(!preload_full_kern){ int _idx3=kern_stride_nkern*kern_id+kern_stride_stack*stack;//selection the good kern from the batch and stack _idx3+=(kern_len-row-1)*kern_stride_row;//the current last row flipped load_to_shared(d_kern, kern+_idx3, thread_id, nb_thread_id, kern_wid,1, kern_stride_col, kern_stride_row, true, c_contiguous); } __syncthreads(); //if needed as on Fermi as reading out of bound index from shared memory generate an error. //Not needed on generation before as they worked anyway. Removing the if generate the good code //as we store the result of only the good thread. //This was with nvcc 3.0 on an GTX470 card. if(out_row<out_len){ const float* idx_kern; if(preload_full_kern) idx_kern=&d_kern[row*kern_wid]; else idx_kern=d_kern; const float* idx_in=&d_img[((shared_row+row)%nb_rows)*img_wid+out_col]; float sum_ =0.0f; convolutionRowNoFlip<KERN_WIDTH>(sum_,idx_in,idx_kern,kern_wid); sum+=sum_;//We pass by an intermediate variable to have more precission. } } } __syncthreads(); if(out_row<out_len) out[batch_id*out_wid*out_len*nkern+//the good batch kern_id*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; } /** * Implementation of 'valid' mode convolution that uses one block per output pixel, and uses a sum-reduce within each block to compute the * kernel-image inner-product in parallel. * * This implementation uses shared memory for the reduce, so it is limited by the product of stacklen x kern_len * * template stack_loop: if true, we accept that blockDim.x < nstack and we add a loop for this(use 3 more registers, so lower occupency when true, but accept nstack*kern_len>512) * TODO: explain parameters, preconditions */ template<bool stack_loop> __global__ void conv_valid_row_reduce(int nB, int nK, int stacklen, int img_len, int img_wid, int kern_len, int kern_wid, int out_len, int out_wid, //physical float *img, int img_str_B, int img_str_S, int img_str_R, int img_str_C, float *kern, int kern_str_K, int kern_str_S, int kern_str_R, int kern_str_C, float *out, int out_str_B, int out_str_K, int out_str_R, int out_str_C , int subsample_rows, int subsample_cols, const int initial_reduce_boundary) { const int outsize = nB * nK * out_len * out_wid; extern __shared__ float reducebuf[]; for (int i = blockIdx.x; i < /*physical*/outsize; i += gridDim.x) { //figure out what output element we're in charge of computing int ii = i; int iB = ii % nB; // output batch index ii = ii / nB; int iK = ii % nK; // output kernel index ii = ii / nK; int iR_physical = ii % out_len; //output kernel row int iC_physical = ii / out_len; // output kernel column int iR_logical = iR_physical * subsample_rows; int iC_logical = iC_physical * subsample_cols; int ss = threadIdx.x; int rr = threadIdx.y; int img_rr = iR_logical + kern_len - 1 - rr; int reduceIdx = threadIdx.x * blockDim.y + threadIdx.y; float sum = 0.0f; if(stack_loop){ for (; ss < stacklen; ss+=blockDim.x){ float * kk_0 = kern + iK*kern_str_K + ss*kern_str_S + rr*kern_str_R; float * ii_0 = img + iB*img_str_B + ss*img_str_S + img_rr*img_str_R + (iC_logical + kern_wid - 1)*img_str_C; for (int cc = 0; cc < kern_wid; ++cc) { sum += kk_0[0] * ii_0[0]; kk_0 += kern_str_C; ii_0 -= img_str_C; } } }else{ float * kk_0 = kern + iK*kern_str_K + ss*kern_str_S + rr*kern_str_R; float * ii_0 = img + iB*img_str_B + ss*img_str_S + img_rr*img_str_R + (iC_logical + kern_wid - 1)*img_str_C; for (int cc = 0; cc < kern_wid; ++cc) { sum += kk_0[0] * ii_0[0]; kk_0 += kern_str_C; ii_0 -= img_str_C; } } if (blockDim.x * blockDim.y == 1) { out[iB * out_str_B + iK * out_str_K + iR_physical * out_str_R + iC_physical * out_str_C] = sum; } else { reducebuf[reduceIdx] = sum; __syncthreads(); int reduce_boundary = initial_reduce_boundary; // add in the terms above the reduce boundary if (reduceIdx + reduce_boundary < (blockDim.x * blockDim.y)) reducebuf[reduceIdx] += reducebuf[reduce_boundary +reduceIdx]; reduce_boundary >>= 1; // there are an equal number of terms above and below the reduce_boundary while (reduce_boundary) { __syncthreads(); if (reduceIdx < reduce_boundary) { reducebuf[reduceIdx] += reducebuf[reduce_boundary + reduceIdx]; } reduce_boundary >>= 1; } if (reduceIdx == 0) { out[iB * out_str_B + iK * out_str_K + iR_physical * out_str_R + iC_physical * out_str_C] = reducebuf[0]; } } } } /** * Reference implementation of 'valid' mode convolution (with stack) * * This implementation works for any size of image and kernel. It does not use shared memory. * * TODO: explain parameters, preconditions */ __global__ void conv_reference_valid(int nB, int nK, int stacklen, int img_len, int img_wid, int kern_len, int kern_wid, int out_len, int out_wid, //physical float *img, int img_str_B, int img_str_S, int img_str_R, int img_str_C, float *kern, int kern_str_K, int kern_str_S, int kern_str_R, int kern_str_C, float *out, int out_str_B, int out_str_K, int out_str_R, int out_str_C , int subsample_rows, int subsample_cols) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int numThreads, outsize; numThreads = blockDim.x * gridDim.x; outsize = nB * nK * out_len * out_wid; for (int i = idx; i < outsize; i += numThreads) //physical { //figure out what output element we're in charge of computing int ii = i; int iB = ii % nB; // output batch index ii = ii / nB; int iK = ii % nK; // output kernel index ii = ii / nK; int iR_physical = ii % out_len; //output kernel row int iC_physical = ii / out_len; // output kernel column int iR_logical = iR_physical * subsample_rows; int iC_logical = iC_physical * subsample_cols; float sum = 0.0f; for (int ss = 0; ss < stacklen; ++ss) { for (int rr = 0; rr < kern_len; ++rr) { int img_rr = iR_logical + kern_len - 1 - rr; for (int cc = 0; cc < kern_wid; ++cc) { int img_cc = iC_logical + kern_wid-1-cc; float k_0 = kern[iK*kern_str_K + ss*kern_str_S + rr*kern_str_R + cc*kern_str_C]; float i_0 = img[iB*img_str_B + ss*img_str_S + img_rr*img_str_R + img_cc*img_str_C]; sum += k_0 * i_0; } } } //coords[i*5+0] = iB; //coords[i*5+1] = iK; //coords[i*5+2] = iR; //coords[i*5+3] = iC; //coords[i*5+4] = iB * out_str_B + iK * out_str_K + iR * out_str_R + iC * out_str_C; out[iB * out_str_B + iK * out_str_K + iR_physical * out_str_R + iC_physical * out_str_C] = sum; } } /** * Reference implementation of 'full' mode convolution (with stack) * * This implementation works for any size of image and kernel. It does not use shared memory. * * TODO: explain parameters, preconditions */ __global__ void conv_reference_full(int nB, int nK, int stacklen, int img_len, int img_wid, int kern_len, int kern_wid, int out_len, int out_wid, //physical dimensions float *img, int img_str_B, int img_str_S, int img_str_R, int img_str_C, float *kern, int kern_str_K, int kern_str_S, int kern_str_R, int kern_str_C, float *out, int out_str_B, int out_str_K, int out_str_R, int out_str_C, int subsample_rows, int subsample_cols) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int numThreads, physical_outsize; numThreads = blockDim.x * gridDim.x; physical_outsize = nB * nK * out_len * out_wid; for (int i = idx; i < physical_outsize; i += numThreads) { //figure out what output element we're in charge of computing int ii = i; int iB = ii % nB; // output batch index ii = ii / nB; int iK = ii % nK; // output kernel index ii = ii / nK; int iR_physical = ii % out_len; //output kernel row int iC_physical = ii / out_len; // output kernel column int iR_logical = iR_physical * subsample_rows; int iC_logical = iC_physical * subsample_cols; float sum = 0.0f; for (int ss = 0; ss < stacklen; ++ss) { for (int rr = 0; rr < kern_len; ++rr) { int img_rr = iR_logical - rr; if ((img_rr >= 0) && (img_rr < img_len)) { for (int cc = 0; cc < kern_wid; ++cc) { int img_cc = iC_logical - cc; if ((img_cc >= 0) && (img_cc < img_wid)) { float k_0 = kern[iK*kern_str_K + ss*kern_str_S + rr*kern_str_R + cc*kern_str_C]; float i_0 = img[iB*img_str_B + ss*img_str_S + img_rr*img_str_R + img_cc*img_str_C]; sum += k_0 * i_0; } } } } } out[iB * out_str_B + iK * out_str_K + iR_physical * out_str_R + iC_physical * out_str_C] = sum; } } #endif // #ifndef CONV_KERNEL_CU /* Local Variables: mode:c++ c-basic-offset:4 c-file-style:"stroustrup" indent-tabs-mode:nil fill-column:79 End: */ // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=79 :
the_stack
// #define DEBUG_RESULT namespace lightseq { namespace cuda { template <OperationType OpType_> QuantGptEncoder<OpType_>::QuantGptEncoder( int max_batch_size, const int *p_d_token_id, float *p_d_ppl, int *p_d_sample_id, const QuantGptWeight<OpType_> &tw, cudaStream_t stream, cudaStream_t cache_stream, cublasHandle_t hd) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_ppl(p_d_ppl), _p_d_sample_id(p_d_sample_id), _tw(tw), _stream(stream), _cache_stream(cache_stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _src_emb_clip_max(tw.get_src_emb_clip_max()), _output_ln_clip_max(tw.get_output_ln_clip_max()), _logits_clip_max(tw.get_logits_clip_max()), _enc_clip_max(tw.get_enc_clip_max()), _ione((int32_t)1), _izero((int32_t)0), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024), _h_real_seq_len(max_batch_size, 0), _h_ppl(max_batch_size, 0.f), _h_sample_id(max_batch_size * tw._max_step, 0), _h_unfinished(1) { CHECK_GPU_ERROR(cublasLtCreate(&_cublas_lt_handle)); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void QuantGptEncoder<OpType_>::init_buffer() { CHECK_GPU_ERROR( cudaMalloc(&_p_d_real_seq_len, _max_batch_size * sizeof(int))); CHECK_GPU_ERROR(cudaMalloc(&_p_d_query, _max_batch_dim * sizeof(_DataType))); CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_curandstate, _max_batch_size * sizeof(curandState))); CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_sample_id_buf, _max_batch_size * _tw._max_step * sizeof(int))); CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_unfinished, sizeof(int))); ker_curand_setup<<<_max_batch_size, 1, 0, _stream>>>(_p_d_curandstate); _DataType *qkv_buf; CHECK_GPU_ERROR(cudaMalloc(&qkv_buf, 3 * _max_batch_dim * sizeof(_DataType))); _p_d_q = qkv_buf; _p_d_k = qkv_buf + _max_batch_dim; _p_d_v = qkv_buf + 2 * _max_batch_dim; int max_attn_score_dim = round_up( _max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step, 32); CHECK_GPU_ERROR(cudaMalloc(&_p_d_c, max_attn_score_dim * sizeof(_DataType))); int max_batch_dim = _max_batch_size * _tw._max_step * round_up(std::max(_tw._inner_size, _tw._hidden_size * 3), 32); CHECK_GPU_ERROR( cudaMalloc(&_int8_ffn_in_buf, max_batch_dim * sizeof(int8_t))); CHECK_GPU_ERROR(cudaMalloc( &_int32_ffn_out_buf, std::max(max_batch_dim, max_attn_score_dim) * sizeof(int32_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_ffn_out_buf, std::max(max_batch_dim, round_up(_tw._src_vocab_size, 32) * _tw._max_step * _max_batch_size) * sizeof(int8_t))); // malloc embeddings CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_src_emb_wei, _tw._src_vocab_size * _tw._hidden_size * sizeof(int8_t))); quantize_weight(_p_d_src_emb_wei[0], _int8_p_d_src_emb_wei, _tw._hidden_size, _tw._src_vocab_size, _quant_range / _src_emb_clip_max, _stream, _cublas_lt_handle); CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_src_emb_bottom_wei, _tw._src_vocab_size * _tw._hidden_size * sizeof(int8_t))); quantize_weight(_p_d_src_emb_wei[0], _int8_p_d_src_emb_bottom_wei, _tw._hidden_size, _tw._src_vocab_size, _quant_range / _src_emb_clip_max, _stream, _cublas_lt_handle, kColMajor); _p_device_emb.push_back(nullptr); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[1], _tw._max_step * _tw._hidden_size, _stream)); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[2], _tw._hidden_size, _stream)); _p_device_emb.push_back( to_gpu(_p_d_src_emb_wei[3], _tw._hidden_size, _stream)); // malloc reused kv cache max size: _tw._hidden_size * 2 * _tw._n_enc_layer * // _max_batch_size * _max_step * sizeof(T) int8_t *self_kv_cache_buffer; int8_t *sliding_p; CHECK_GPU_ERROR( cudaMalloc(&self_kv_cache_buffer, _max_batch_dim * _tw._n_enc_layer * 4 * sizeof(int8_t))); sliding_p = self_kv_cache_buffer; for (int i = 0; i < _tw._n_enc_layer * 2; i++) { _p_d_self_k_cache.push_back(sliding_p); sliding_p += _max_batch_dim; } for (int i = 0; i < _tw._n_enc_layer * 2; i++) { _p_d_self_v_cache.push_back(sliding_p); sliding_p += _max_batch_dim; } _p_d_self_k_cache1 = _p_d_self_k_cache.data(); _p_d_self_k_cache2 = _p_d_self_k_cache.data() + _tw._n_enc_layer; _p_d_self_v_cache1 = _p_d_self_v_cache.data(); _p_d_self_v_cache2 = _p_d_self_v_cache.data() + _tw._n_enc_layer; // malloc weights _int8_p_d_enc_wei = std::vector<int8_t *>(_tw._n_enc_layer * 4); _scaled_ffn2_colsum = std::vector<_DataType *>(_tw._n_enc_layer); for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; // malloc quantized weights CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4], _tw._hidden_size * 3 * _tw._hidden_size * sizeof(int8_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 1], _tw._hidden_size * _tw._hidden_size * sizeof(int8_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 2], _tw._hidden_size * _tw._inner_size * sizeof(int8_t))); CHECK_GPU_ERROR( cudaMalloc(&_int8_p_d_enc_wei[_layer_id * 4 + 3], _tw._inner_size * _tw._hidden_size * sizeof(int8_t))); // malloc unquantized weights _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 1], _tw._hidden_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back(to_gpu(_p_d_enc_wei[_weight_offset + 3], _tw._hidden_size * 3, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 5], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 6], _tw._hidden_size, _stream)); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 7], _tw._hidden_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 9], _tw._inner_size, _stream)); _p_device_wei.push_back(nullptr); _p_device_wei.push_back( to_gpu(_p_d_enc_wei[_weight_offset + 11], _tw._hidden_size, _stream)); quantize_weight(_p_d_enc_wei[_weight_offset + 2], _int8_p_d_enc_wei[_layer_id * 4], _tw._hidden_size, _tw._hidden_size * 3, _quant_range / _enc_clip_max[_layer_id * 12], _stream, _cublas_lt_handle); quantize_weight(_p_d_enc_wei[_weight_offset + 4], _int8_p_d_enc_wei[_layer_id * 4 + 1], _tw._hidden_size, _tw._hidden_size, _quant_range / _enc_clip_max[_layer_id * 12 + 1], _stream, _cublas_lt_handle, kColMajor); quantize_weight(_p_d_enc_wei[_weight_offset + 8], _int8_p_d_enc_wei[_layer_id * 4 + 2], _tw._hidden_size, _tw._inner_size, _quant_range / _enc_clip_max[_layer_id * 12 + 2], _stream, _cublas_lt_handle); quantize_weight(_p_d_enc_wei[_weight_offset + 10], _int8_p_d_enc_wei[_layer_id * 4 + 3], _tw._inner_size, _tw._hidden_size, _quant_range / _enc_clip_max[_layer_id * 12 + 3], _stream, _cublas_lt_handle, kColMajor); _scaled_ffn2_colsum[_layer_id] = nullptr; } CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); CHECK_GPU_ERROR(cudaGetLastError()); std::cout << "quantized encoder buffer init succeed" << std::endl; return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string QuantGptEncoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } std::string sampling_method = _tw._sampling_method; if (kSamplingMethods.find(sampling_method) == kSamplingMethods.end()) { return std::string("unsupported sampling_method: ") + sampling_method; } if (_tw._topk <= 0) { return "topk must be positive"; } if (_tw._topp <= 0 && _tw._topp >= 1.0) { return "topp must be in (0, 1)"; } return ""; } template <OperationType OpType_> void QuantGptEncoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { if (batch_size > _max_batch_size) { throw std::runtime_error("batch size of input greater than max_batch_size"); } if (batch_seq_len > _tw._max_step) { throw std::runtime_error("seq len of input greater than max_step"); } _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, cudaMemcpyHostToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_i8I_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _int8_p_d_src_emb_bottom_wei, _p_device_emb[1], _p_d_token_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0, _src_emb_clip_max / _quant_range); for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } compute_ppl(); return; } template <OperationType OpType_> int QuantGptEncoder<OpType_>::run_one_sample(int batch_size, int batch_seq_len) { if (batch_size > _max_batch_size) { throw std::runtime_error("batch size of input greater than max_batch_size"); } if (batch_seq_len > _tw._max_step) { throw std::runtime_error("seq len of input greater than max_step"); } _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id, _p_d_token_id, sizeof(int) * _batch_size * _batch_seq_len, cudaMemcpyDeviceToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; std::cout << "Sample with " << _tw._sampling_method << std::endl; std::cout << "padding_id: " << _tw._padding_id << std::endl; std::cout << "vocab_size: " << _tw._src_vocab_size << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_i8I_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._hidden_size, _stream, _int8_p_d_src_emb_bottom_wei, _p_device_emb[1], _p_d_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0, _src_emb_clip_max / _quant_range); for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } int8_t **ftmp = _p_d_self_k_cache2; _p_d_self_k_cache2 = _p_d_self_k_cache1; _p_d_self_k_cache1 = ftmp; ftmp = _p_d_self_v_cache2; _p_d_self_v_cache2 = _p_d_self_v_cache1; _p_d_self_v_cache1 = ftmp; if (sample_one_token() == 0 || _batch_seq_len >= _tw._max_step) { CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), cudaMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _batch_seq_len; } while (1) { #ifdef DEBUG_RESULT std::cout << "before sample:batch_size-" << _batch_size << " batch_seq_len-" << _batch_seq_len << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", _batch_token_num); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_i8I_launcher<_DataType>( batch_size, 1, _tw._hidden_size, _stream, _int8_p_d_src_emb_bottom_wei, _p_device_emb[1], _p_d_last_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, _batch_seq_len - 1, _src_emb_clip_max / _quant_range); for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention_with_cache(); ffn_add_norm_with_cache(); } int8_t **ftmp = _p_d_self_k_cache2; _p_d_self_k_cache2 = _p_d_self_k_cache1; _p_d_self_k_cache1 = ftmp; ftmp = _p_d_self_v_cache2; _p_d_self_v_cache2 = _p_d_self_v_cache1; _p_d_self_v_cache1 = ftmp; if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step) break; } CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), cudaMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _batch_seq_len; } template <OperationType OpType_> int QuantGptEncoder<OpType_>::sample_one_token() { /* ---step 1. project hidden states to vocab logits--- */ cublasLtMM_withAlgo_i8IO(_int8_ffn_out_buf, 1, _batch_token_num, _tw._src_vocab_size, _tw._hidden_size, 0, 0, 0, _output_ln_clip_max * _src_emb_clip_max / (_logits_clip_max * _quant_range), _int8_ffn_in_buf, _int8_p_d_src_emb_wei, _cublas_lt_handle, _stream, false); CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_i8I_launcher( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _int8_ffn_out_buf, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id, _logits_clip_max / _quant_range, true); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_i8I_launcher( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _int8_ffn_out_buf, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id, _logits_clip_max / _quant_range, true); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> int QuantGptEncoder<OpType_>::sample_one_token_with_cache() { /* ---step 1. project hidden states to vocab logits--- */ cublasLtMM_withAlgo_i8IO(_int8_ffn_out_buf, 1, _batch_size, _tw._src_vocab_size, _tw._hidden_size, 0, 0, 0, _output_ln_clip_max * _src_emb_clip_max / (_logits_clip_max * _quant_range), _int8_ffn_in_buf, _int8_p_d_src_emb_wei, _cublas_lt_handle, _stream, false); CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); // /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_i8I_launcher( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _int8_ffn_out_buf, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id, _logits_clip_max / _quant_range, true); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_i8I_launcher( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _int8_ffn_out_buf, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id, _logits_clip_max / _quant_range, true); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> void QuantGptEncoder<OpType_>::self_attention() { /* ---step 0. layer_norm, add output_bias to "query"--- */ if (_layer_id == 0) { ker_norm_layer_resual_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _int8_ffn_in_buf, _p_device_wei[_weight_offset], _p_device_wei[_weight_offset + 1], _p_device_wei[_weight_offset + 5], _max_thread_per_block, _quant_range / _enc_clip_max[_layer_id * 12 + 4], false, true); } cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_token_num, _tw._hidden_size * 3, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12] * _enc_clip_max[_layer_id * 12 + 4] / (_enc_clip_max[_layer_id * 12 + 8] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4], _cublas_lt_handle, _stream, false); #ifdef DEBUG_RESULT print_vec(_int8_ffn_in_buf, "attn qkv in", 20); print_vec(_int8_p_d_enc_wei[_layer_id * 4], "attn qkv w", 20); print_vec(_int8_ffn_out_buf, "attn qkv out", 20); #endif // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_i8I_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _int8_ffn_out_buf, _p_device_wei[_weight_offset + 3], _int8_ffn_in_buf, _p_d_self_k_cache1[_layer_id], _p_d_self_v_cache1[_layer_id], _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block, _enc_clip_max[_layer_id * 12 + 8] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 11], true); /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_ione, _p_d_self_k_cache1[_layer_id], CUDA_R_8I, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _int8_ffn_in_buf, CUDA_R_8I, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_izero, _int32_ffn_out_buf, CUDA_R_32I, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_correlation_softmax_gpt_i32I_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._head_num, _stream, _int32_ffn_out_buf, _p_d_c, _p_d_real_seq_len, _atten_scaler, _enc_clip_max[_layer_id * 12 + 11] / _quant_range); /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _int8_ffn_in_buf, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block, _quant_range / _enc_clip_max[_layer_id * 12 + 5], false); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ cublaslt_gemm( _int8_p_d_enc_wei[_layer_id * 4 + 1], _int8_ffn_in_buf, _int8_ffn_out_buf, 1, _tw._hidden_size, _batch_token_num, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 1] * _enc_clip_max[_layer_id * 12 + 5] / (_enc_clip_max[_layer_id * 12 + 9] * _quant_range), _cublas_lt_handle, _stream); #ifdef DEBUG_RESULT print_vec(_int8_ffn_in_buf, "attn out in", 20); print_vec(_int8_p_d_enc_wei[_layer_id * 4 + 1], "attn out w", 20); print_vec(_int8_ffn_out_buf, "attn out out", 20); #endif ker_residual_bias_ln_i8I_i8O_launcher<_DataType>( _int8_ffn_out_buf, _p_device_wei[_weight_offset + 6], _p_device_wei[_weight_offset + 7], _p_device_wei[_weight_offset + 11], _int8_ffn_in_buf, _p_d_query, _batch_token_num, _tw._hidden_size, _enc_clip_max[_layer_id * 12 + 9] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 6], _max_thread_per_block, _stream, false, false, true); return; } template <OperationType OpType_> void QuantGptEncoder<OpType_>::self_attention_with_cache() { /* ---step 0. layer_norm, add output_bias to "query"--- */ if (_layer_id == 0) { ker_norm_layer_resual_i8O_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _int8_ffn_in_buf, _p_device_wei[_weight_offset], _p_device_wei[_weight_offset + 1], _p_device_wei[_weight_offset + 5], _max_thread_per_block, _quant_range / _enc_clip_max[_layer_id * 12 + 4], false, true); } /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_size, _tw._hidden_size * 3, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12] * _enc_clip_max[_layer_id * 12 + 4] / (_enc_clip_max[_layer_id * 12 + 8] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4], _cublas_lt_handle, _stream, false); // get q, k, v by split and reshape qkv ker_arrange_qkv_with_cache_i8I_i8O_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _int8_ffn_out_buf, _p_device_wei[_weight_offset + 3], _int8_ffn_in_buf, _p_d_self_k_cache1[_layer_id], _p_d_self_k_cache2[_layer_id], _p_d_self_v_cache1[_layer_id], _p_d_self_v_cache2[_layer_id], _batch_seq_len, _tw._dim_per_head, _tw._head_num, _enc_clip_max[_layer_id * 12 + 8] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 11], true); /* ---step 2. correlation = q * k, perform softmax on correlation correlation: [batch_size, heads_num, 1, batch_seq_len]--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, 1, _tw._dim_per_head, &_ione, _p_d_self_k_cache1[_layer_id], CUDA_R_8I, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _int8_ffn_in_buf, CUDA_R_8I, _tw._dim_per_head, _tw._dim_per_head, &_izero, _int32_ffn_out_buf, CUDA_R_32I, _batch_seq_len, _batch_seq_len, _batch_size * _tw._head_num, CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_fuse_softmax_new_value_i32I_i8O_launcher( _int32_ffn_out_buf, _p_d_self_v_cache1[_layer_id], _int8_ffn_in_buf, _batch_size * _tw._head_num, _batch_seq_len, _batch_seq_len, _tw._head_num, _tw._dim_per_head, float(_atten_scaler), _enc_clip_max[_layer_id * 12 + 11] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 5], false, _stream); /* ---step 4. new_q = ori_q + new_q * output_wei--- */ cublaslt_gemm( _int8_p_d_enc_wei[_layer_id * 4 + 1], _int8_ffn_in_buf, _int8_ffn_out_buf, 1, _tw._hidden_size, _batch_size, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 1] * _enc_clip_max[_layer_id * 12 + 5] / (_enc_clip_max[_layer_id * 12 + 9] * _quant_range), _cublas_lt_handle, _stream); ker_residual_bias_ln_i8I_i8O_launcher<_DataType>( _int8_ffn_out_buf, _p_device_wei[_weight_offset + 6], _p_device_wei[_weight_offset + 7], _p_device_wei[_weight_offset + 11], _int8_ffn_in_buf, _p_d_query, _batch_size, _tw._hidden_size, _enc_clip_max[_layer_id * 12 + 9] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 6], _max_thread_per_block, _stream, false, false, true); return; } template <OperationType OpType_> void QuantGptEncoder<OpType_>::ffn_add_norm() { /* ---step 1. first ffn layer--- */ cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_token_num, _tw._inner_size, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 2] * _enc_clip_max[_layer_id * 12 + 6] / (_enc_clip_max[_layer_id * 12 + 10] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4 + 2], _cublas_lt_handle, _stream, false); #ifdef DEBUG_RESULT print_vec(_int8_ffn_in_buf, "ffn1 in", 20); print_vec(_int8_p_d_enc_wei[_layer_id * 4 + 2], "ffn1 w", 20); print_vec(_int8_ffn_out_buf, "ffn1 out", 20); #endif ker_bias_gelu_i8I_i8O_launcher<_DataType>( _batch_token_num, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf, _p_device_wei[_weight_offset + 9], _tw._inner_size, _enc_clip_max[_layer_id * 12 + 10] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 7], true, false); /* ---step 2. second ffn layer--- */ cublaslt_gemm(_int8_p_d_enc_wei[_layer_id * 4 + 3], _int8_ffn_in_buf, _int32_ffn_out_buf, 1, _tw._hidden_size, _batch_token_num, _tw._inner_size, 0, 0, 0, 1, _cublas_lt_handle, _stream); #ifdef DEBUG_RESULT print_vec(_int8_ffn_in_buf, "ffn2 in", 20); print_vec(_int8_p_d_enc_wei[_layer_id * 4 + 3], "ffn2 w", 20); print_vec(_int32_ffn_out_buf, "ffn2 out", 20); #endif const _DataType *scale_ptr, *bias_ptr, *res_bias_ptr; float clip_max, dequant_scale; dequant_scale = _enc_clip_max[_layer_id * 12 + 3] * _enc_clip_max[_layer_id * 12 + 7] / (_quant_range * _quant_range); if (_layer_id == _tw._n_enc_layer - 1) { scale_ptr = _p_device_emb[2]; bias_ptr = _p_device_emb[3]; res_bias_ptr = nullptr; clip_max = _output_ln_clip_max; } else { scale_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer]; bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 1]; res_bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 5]; clip_max = _enc_clip_max[(_layer_id + 1) * 12 + 4]; } ker_residual_bias_ln_i32I_i8O_launcher<_DataType>( _int32_ffn_out_buf, scale_ptr, bias_ptr, res_bias_ptr, _int8_ffn_in_buf, _p_d_query, _batch_token_num, _tw._hidden_size, dequant_scale, _quant_range / clip_max, _max_thread_per_block, _stream, false, false, true, _scaled_ffn2_colsum[_layer_id]); return; } template <OperationType OpType_> void QuantGptEncoder<OpType_>::ffn_add_norm_with_cache() { /* ---step 1. first ffn layer--- */ cublasLtMM_withAlgo_i8IO( _int8_ffn_out_buf, 1, _batch_size, _tw._inner_size, _tw._hidden_size, 0, 0, 0, _enc_clip_max[_layer_id * 12 + 2] * _enc_clip_max[_layer_id * 12 + 6] / (_enc_clip_max[_layer_id * 12 + 10] * _quant_range), _int8_ffn_in_buf, _int8_p_d_enc_wei[_layer_id * 4 + 2], _cublas_lt_handle, _stream, false); ker_bias_gelu_i8I_i8O_launcher<_DataType>( _batch_size, _stream, _int8_ffn_out_buf, _int8_ffn_in_buf, _p_device_wei[_weight_offset + 9], _tw._inner_size, _enc_clip_max[_layer_id * 12 + 10] / _quant_range, _quant_range / _enc_clip_max[_layer_id * 12 + 7], true, false); /* ---step 2. second ffn layer--- */ cublaslt_gemm(_int8_p_d_enc_wei[_layer_id * 4 + 3], _int8_ffn_in_buf, _int32_ffn_out_buf, 1, _tw._hidden_size, _batch_size, _tw._inner_size, 0, 0, 0, 1, _cublas_lt_handle, _stream); const _DataType *scale_ptr, *bias_ptr, *res_bias_ptr; float clip_max, dequant_scale; dequant_scale = _enc_clip_max[_layer_id * 12 + 3] * _enc_clip_max[_layer_id * 12 + 7] / (_quant_range * _quant_range); if (_layer_id == _tw._n_enc_layer - 1) { scale_ptr = _p_device_emb[2]; bias_ptr = _p_device_emb[3]; res_bias_ptr = nullptr; clip_max = _output_ln_clip_max; } else { scale_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer]; bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 1]; res_bias_ptr = _p_device_wei[(_layer_id + 1) * _tw._weight_per_enc_layer + 5]; clip_max = _enc_clip_max[(_layer_id + 1) * 12 + 4]; } ker_residual_bias_ln_i32I_i8O_launcher<_DataType>( _int32_ffn_out_buf, scale_ptr, bias_ptr, res_bias_ptr, _int8_ffn_in_buf, _p_d_query, _batch_size, _tw._hidden_size, dequant_scale, _quant_range / clip_max, _max_thread_per_block, _stream, false, false, true, _scaled_ffn2_colsum[_layer_id]); return; } /** Compute ppl from encoder output */ template <OperationType OpType_> void QuantGptEncoder<OpType_>::compute_ppl() { /* ---step 1. project hidden states to vocab logits--- */ cublasLtMM_withAlgo_i8IO(_int8_ffn_out_buf, 1, _batch_token_num, _tw._src_vocab_size, _tw._hidden_size, 0, 0, 0, _output_ln_clip_max * _src_emb_clip_max / (_logits_clip_max * _quant_range), _int8_ffn_in_buf, _int8_p_d_src_emb_wei, _cublas_lt_handle, _stream, false); #ifdef DEBUG_RESULT print_vec(_int8_ffn_in_buf, "logits in", 20); print_vec(_int8_p_d_src_emb_wei, "logits w", 20); print_vec(_int8_ffn_out_buf, "logits out", 20); #endif /* ---step 2. compute language model ppl--- */ ker_ppl_i8I_launcher(_batch_size, _batch_seq_len, _max_thread_per_block, _stream, _int8_ffn_out_buf, _p_d_token_id, _p_d_real_seq_len, _p_d_ppl, _tw._src_vocab_size, _logits_clip_max / _quant_range, true); } template class QuantGptEncoder<OperationType::FP16>; template class QuantGptEncoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
the_stack
#include <optix.h> #if (OPTIX_VERSION < 70000) #include <optixu/optixu_math_namespace.h> #else #include <optix.h> #include <cuda_runtime.h> #endif #include <OSL/device_string.h> #include <OSL/oslclosure.h> #include "rend_lib.h" #include "util.h" #if (OPTIX_VERSION < 70000) // Ray payload rtDeclareVariable (PRD_radiance, prd_radiance, rtPayload, ); // ray/hit variables rtDeclareVariable (float3, shading_normal, attribute shading_normal, ); rtDeclareVariable (float3, geometric_normal, attribute geometric_normal,); rtDeclareVariable (float3, texcoord, attribute texcoord, ); rtDeclareVariable (float, surface_area, attribute surface_area, ); rtDeclareVariable (float3, dPdu, attribute dPdu, ); rtDeclareVariable (float3, dPdv, attribute dPdv, ); rtDeclareVariable (int, obj_id, attribute obj_id, ); rtDeclareVariable (int, lgt_idx, attribute lgt_idx, ); // ray/hit variables rtDeclareVariable (uint2, launch_index, rtLaunchIndex, ); rtDeclareVariable (uint2, launch_dim, rtLaunchDim, ); rtDeclareVariable (optix::Ray, ray, rtCurrentRay, ); rtDeclareVariable (float, t_hit, rtIntersectionDistance, ); // Buffers rtBuffer<float3,2> output_buffer; // Function pointers for the OSL shader rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_init_func, , ); rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_group_func, ,); RT_PROGRAM void any_hit_shadow() { rtTerminateRay(); } static __device__ void globals_from_hit(ShaderGlobals& sg) { // Setup the ShaderGlobals sg.I = ray.direction; sg.N = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, shading_normal)); sg.Ng = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, geometric_normal)); sg.P = ray.origin + t_hit * ray.direction; sg.dPdu = dPdu; sg.u = texcoord.x; sg.v = texcoord.y; sg.Ci = NULL; sg.surfacearea = surface_area; sg.backfacing = (dot(sg.N, sg.I) > 0.0f); if (sg.backfacing) { sg.N = -sg.N; sg.Ng = -sg.Ng; } // NB: These variables are not used in the current iteration of the sample sg.raytype = CAMERA; sg.flipHandedness = 0; } static __device__ float3 process_closure(const OSL::ClosureColor* closure_tree) { OSL::Color3 result = OSL::Color3 (0.0f); if (!closure_tree) { return make_float3(result.x, result.y, result.z); } // The depth of the closure tree must not exceed the stack size. // A stack size of 8 is probably quite generous for relatively // balanced trees. const int STACK_SIZE = 8; // Non-recursive traversal stack int stack_idx = 0; const OSL::ClosureColor* ptr_stack[STACK_SIZE]; OSL::Color3 weight_stack[STACK_SIZE]; // Shading accumulator OSL::Color3 weight = OSL::Color3(1.0f); const void* cur = closure_tree; while (cur) { switch (((OSL::ClosureColor*)cur)->id) { case OSL::ClosureColor::ADD: { ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB; weight_stack[stack_idx++] = weight; cur = ((OSL::ClosureAdd*) cur)->closureA; break; } case OSL::ClosureColor::MUL: { weight *= ((OSL::ClosureMul*) cur)->weight; cur = ((OSL::ClosureMul*) cur)->closure; break; } case EMISSION_ID: { cur = NULL; break; } case DIFFUSE_ID: case OREN_NAYAR_ID: case PHONG_ID: case WARD_ID: case REFLECTION_ID: case REFRACTION_ID: case FRESNEL_REFLECTION_ID: { result += ((OSL::ClosureComponent*) cur)->w * weight; cur = NULL; break; } case MICROFACET_ID: { const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data(); const char* dist_str = *(const char**) &mem[0]; #if 0 if (launch_index.x == launch_dim.x / 2 && launch_index.y == launch_dim.y / 2) printf ("microfacet, dist: %s\n", HDSTR(dist_str).c_str()); #endif if (HDSTR(dist_str) == OSL::DeviceStrings::default_) return make_float3(0.0f, 1.0f, 1.0f); return make_float3(1.0f, 0.0f, 1.0f); } default: cur = NULL; break; } if (cur == NULL && stack_idx > 0) { cur = ptr_stack [--stack_idx]; weight = weight_stack[ stack_idx]; } } return make_float3(result.x, result.y, result.z); } RT_PROGRAM void closest_hit_osl() { // TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader // networks, so there should be (at least) some mechanism to issue a // warning or error if the closure or param storage can possibly be // exceeded. alignas(8) char closure_pool[256]; alignas(8) char params [256]; ShaderGlobals sg; globals_from_hit (sg); // Pack the "closure pool" into one of the ShaderGlobals pointers *(int*) &closure_pool[0] = 0; sg.renderstate = &closure_pool[0]; // Create some run-time options structs. The OSL shader fills in the structs // as it executes, based on the options specified in the shader source. NoiseOptCUDA noiseopt; TextureOptCUDA textureopt; TraceOptCUDA traceopt; // Pack the pointers to the options structs in a faux "context", // which is a rough stand-in for the host ShadingContext. ShadingContextCUDA shading_context = { &noiseopt, &textureopt, &traceopt }; sg.context = &shading_context; // Run the OSL group and init functions osl_init_func (&sg, params); osl_group_func(&sg, params); prd_radiance.result = process_closure ((OSL::ClosureColor*) sg.Ci); } #else //#if (OPTIX_VERSION < 70000) #include "../render_params.h" extern "C" { __device__ __constant__ RenderParams render_params; } extern"C" __global__ void __anyhit__any_hit_shadow () { optixTerminateRay(); } static __device__ void globals_from_hit (ShaderGlobals& sg) { const GenericRecord *record = reinterpret_cast<GenericRecord *> (optixGetSbtDataPointer()); ShaderGlobals local_sg; // hit-kind 0: quad hit // 1: sphere hit optixDirectCall<void, unsigned int, float, float3, float3, ShaderGlobals *>( optixGetHitKind(), optixGetPrimitiveIndex(), optixGetRayTmax(), optixGetWorldRayOrigin(), optixGetWorldRayDirection(), &local_sg); // Setup the ShaderGlobals const float3 ray_direction = optixGetWorldRayDirection(); const float3 ray_origin = optixGetWorldRayOrigin(); const float t_hit = optixGetRayTmin(); sg.I = ray_direction; sg.N = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.N)); sg.Ng = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.Ng)); sg.P = ray_origin + t_hit * ray_direction; sg.dPdu = local_sg.dPdu; sg.dPdv = local_sg.dPdv; sg.u = local_sg.u; sg.v = local_sg.v; sg.Ci = NULL; sg.surfacearea = local_sg.surfacearea; sg.backfacing = dot (sg.N, sg.I) > 0.0f; sg.shaderID = local_sg.shaderID; if (sg.backfacing) { sg.N = -sg.N; sg.Ng = -sg.Ng; } // NB: These variables are not used in the current iteration of the sample sg.raytype = CAMERA; sg.flipHandedness = 0; } static __device__ float3 process_closure (const OSL::ClosureColor* closure_tree) { OSL::Color3 result = OSL::Color3 (0.0f); if (!closure_tree) { return make_float3 (result.x, result.y, result.z); } // The depth of the closure tree must not exceed the stack size. // A stack size of 8 is probably quite generous for relatively // balanced trees. const int STACK_SIZE = 8; // Non-recursive traversal stack int stack_idx = 0; const OSL::ClosureColor* ptr_stack[STACK_SIZE]; OSL::Color3 weight_stack[STACK_SIZE]; // Shading accumulator OSL::Color3 weight = OSL::Color3 (1.0f); const void* cur = closure_tree; while (cur) { switch (((OSL::ClosureColor*)cur)->id) { case OSL::ClosureColor::ADD: { ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB; weight_stack[stack_idx++] = weight; cur = ((OSL::ClosureAdd*) cur)->closureA; break; } case OSL::ClosureColor::MUL: { weight *= ((OSL::ClosureMul*) cur)->weight; cur = ((OSL::ClosureMul*) cur)->closure; break; } case EMISSION_ID: { cur = NULL; break; } case DIFFUSE_ID: case OREN_NAYAR_ID: case PHONG_ID: case WARD_ID: case REFLECTION_ID: case REFRACTION_ID: case FRESNEL_REFLECTION_ID: { result += ((OSL::ClosureComponent*) cur)->w * weight; cur = NULL; break; } case MICROFACET_ID: { const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data(); const char* dist_str = *(const char**) &mem[0]; if (HDSTR(dist_str) == STRING_PARAMS(default)) return make_float3(0.0f, 1.0f, 1.0f); else return make_float3(1.0f, 0.0f, 1.0f); break; } default: cur = NULL; break; } if (cur == NULL && stack_idx > 0) { cur = ptr_stack [--stack_idx]; weight = weight_stack[ stack_idx]; } } return make_float3(result.x, result.y, result.z); } extern "C" __global__ void __closesthit__closest_hit_osl() { // TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader // networks, so there should be (at least) some mechanism to issue a // warning or error if the closure or param storage can possibly be // exceeded. alignas(8) char closure_pool[256]; alignas(8) char params [256]; ShaderGlobals sg; globals_from_hit (sg); // Pack the "closure pool" into one of the ShaderGlobals pointers *(int*) &closure_pool[0] = 0; sg.renderstate = &closure_pool[0]; // Create some run-time options structs. The OSL shader fills in the structs // as it executes, based on the options specified in the shader source. NoiseOptCUDA noiseopt; TextureOptCUDA textureopt; TraceOptCUDA traceopt; // Pack the pointers to the options structs in a faux "context", // which is a rough stand-in for the host ShadingContext. ShadingContextCUDA shading_context = { &noiseopt, &textureopt, &traceopt }; sg.context = &shading_context; // Run the OSL group and init functions const unsigned int shaderInitOpIdx = 2u + 2u * sg.shaderID + 0u; const unsigned int shaderGroupIdx = 2u + 2u * sg.shaderID + 1u; optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderInitOpIdx, &sg, params, nullptr, nullptr, 0); // call osl_init_func optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderGroupIdx , &sg, params, nullptr, nullptr, 0); // call osl_group_func float3 result = process_closure ((OSL::ClosureColor*) sg.Ci); uint3 launch_dims = optixGetLaunchDimensions(); uint3 launch_index = optixGetLaunchIndex(); float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer); int pixel = launch_index.y * launch_dims.x + launch_index.x; output_buffer[pixel] = make_float3(result.x, result.y, result.z); } #endif //#if (OPTIX_VERSION < 70000)
the_stack
// TODO: Add d_visit_lookup and d_valid_in d_valid_out into ProblemBase #pragma once #include <gunrock/util/cta_work_distribution.cuh> #include <gunrock/util/cta_work_progress.cuh> #include <gunrock/util/kernel_runtime_stats.cuh> #include <gunrock/util/device_intrinsics.cuh> #include <gunrock/oprtr/CULL_filter/cta.cuh> #include <gunrock/oprtr/CULL_filter/kernel_policy.cuh> //#include <gunrock/oprtr/bypass_filter/kernel.cuh> namespace gunrock { namespace oprtr { namespace CULL { /** * @brief Structure for invoking CTA processing tile over all elements. * * @tparam KernelPolicy Kernel policy type for partitioned edge mapping. * @tparam Problem Problem data type for partitioned edge mapping. * @tparam Functor Functor type for the specific problem type. */ template <typename KernelPolicyT, typename FilterOpT> struct SweepPass { typedef Cta<KernelPolicyT, FilterOpT> Cta; typedef typename KernelPolicyT::SizeT SizeT; typedef typename KernelPolicyT::InKeyT InKeyT; typedef typename KernelPolicyT::OutKeyT OutKeyT; typedef typename KernelPolicyT::ValueT ValueT; typedef typename KernelPolicyT::LabelT LabelT; // typedef typename KernelPolicy::FilterOpT FilterOpT; static __device__ __forceinline__ void Invoke( const SizeT &queue_index, const InKeyT *&keys_in, const ValueT *&values_in, const LabelT &label, LabelT *&labels, unsigned char *&visited_masks, OutKeyT *&keys_out, typename KernelPolicyT::SmemStorage &smem_storage, util::CtaWorkProgress<SizeT> &work_progress, // util::CtaWorkDistribution<SizeT> &work_decomposition, FilterOpT &filter_op) // SizeT &max_out_frontier) { // Determine our threadblock's work range util::CtaWorkLimits<SizeT> work_limits; // work_decomposition.template GetCtaWorkLimits< smem_storage.state.work_decomposition .template GetCtaWorkLimits<KernelPolicyT::LOG_TILE_ELEMENTS, KernelPolicyT::LOG_SCHEDULE_GRANULARITY>( work_limits); // Return if we have no work to do if (!work_limits.elements) { return; } // CTA processing abstraction Cta cta(queue_index, keys_in, values_in, label, labels, visited_masks, keys_out, smem_storage, work_progress, // max_out_frontier, filter_op); // Process full tiles while (work_limits.offset < work_limits.guarded_offset) { cta.ProcessTile(work_limits.offset); work_limits.offset += KernelPolicyT::TILE_ELEMENTS; } // Clean up last partial tile with guarded-i/o if (work_limits.guarded_elements) { cta.ProcessTile(work_limits.offset, work_limits.guarded_elements); } } }; /****************************************************************************** * Arch dispatch ******************************************************************************/ /** * Not valid for this arch (default) * * @tparam KernelPolicy Kernel policy type for partitioned edge mapping. * @tparam Problem Problem data type for partitioned edge mapping. * @tparam Functor Functor type for the specific problem type. * @tparam VALID. */ template <OprtrFlag FLAG, typename InKeyT, typename OutKeyT, typename SizeT, typename ValueT, typename LabelT, typename FilterOpT, bool VALID = #ifdef __CUDA_ARCH__ true #else false #endif > struct Dispatch { }; template <OprtrFlag FLAG, typename InKeyT, typename OutKeyT, typename SizeT, typename ValueT, typename LabelT, typename FilterOpT> struct Dispatch<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT, true> { typedef KernelPolicy<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT, sizeof(InKeyT) == 4 ? 8 : 4, // MAX_CTA_OCCUPANCY 8, // LOG_THREADS 1, // LOG_LOAD_VEC_SIZE 0, // LOG_LOADS_PER_TILE 5, // LOG_RAKING_THREADS 5, // END_BITMASK_CULL 8> // LOG_SCHEDULE_GRANULARITY KernelPolicyT; static __device__ __forceinline__ void Kernel( const bool &queue_reset, const SizeT &queue_index, const InKeyT *&keys_in, const ValueT *&values_in, SizeT &num_inputs, const LabelT &label, LabelT *&labels, unsigned char *&visited_masks, OutKeyT *&keys_out, util::CtaWorkProgress<SizeT> &work_progress, FilterOpT &filter_op) // SizeT &max_in_frontier, // SizeT &max_out_frontier) // util::KernelRuntimeStats &kernel_stats) { // Shared storage for the kernel __shared__ typename KernelPolicyT::SmemStorage smem_storage; // Determine work decomposition if (threadIdx.x == 0) { // Obtain problem size if (queue_reset) { work_progress.StoreQueueLength(num_inputs, queue_index); } else { num_inputs = work_progress.LoadQueueLength(queue_index); // Check if we previously overflowed // if (num_elements >= max_in_frontier) { // printf(" num_elements >= max_in_frontier, num_elements = %d, // max_in_frontier = %d\n", num_elements, max_in_frontier); // num_elements = 0; //} // Signal to host that we're done // if ((num_elements == 0) || // (KernelPolicy::SATURATION_QUIT && (num_elements <= gridDim.x * // KernelPolicy::SATURATION_QUIT))) //{ // if (d_done) d_done[0] = num_elements; //} } // Initialize work decomposition in smem smem_storage.state.work_decomposition .template Init<KernelPolicyT::LOG_SCHEDULE_GRANULARITY>(num_inputs, gridDim.x); // Reset our next outgoing queue counter to zero if (blockIdx.x == 0) work_progress.StoreQueueLength(0, queue_index + 2); } // Barrier to protect work decomposition __syncthreads(); SweepPass<KernelPolicyT, FilterOpT>::Invoke( queue_index, keys_in, values_in, label, labels, visited_masks, keys_out, smem_storage, work_progress, filter_op); // smem_storage.state.work_decomposition, // max_out_frontier); } }; template <OprtrFlag FLAG, typename InKeyT, typename OutKeyT, typename SizeT, typename ValueT, typename LabelT, typename FilterOpT> __launch_bounds__(Dispatch<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT, true>::KernelPolicyT::THREADS, Dispatch<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT, true>::KernelPolicyT::CTA_OCCUPANCY) __global__ void Kernel(const bool queue_reset, const SizeT queue_index, const InKeyT *keys_in, const ValueT *values_in, SizeT num_inputs, const LabelT label, LabelT *labels, unsigned char *visited_masks, OutKeyT *keys_out, util::CtaWorkProgress<SizeT> work_progress, FilterOpT filter_op) // typename KernelPolicy::SizeT max_in_queue, // typename KernelPolicy::SizeT max_out_queue, // util::KernelRuntimeStats kernel_stats) // bool filtering_flag = true) { Dispatch<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT>::Kernel( queue_reset, queue_index, keys_in, values_in, num_inputs, label, labels, visited_masks, keys_out, work_progress, filter_op); } template <OprtrFlag FLAG, typename GraphT, typename FrontierInT, typename FrontierOutT, typename ParametersT, typename AdvanceOpT, typename FilterOpT> cudaError_t Launch(const GraphT graph, const FrontierInT *frontier_in, FrontierOutT *frontier_out, ParametersT &parameters, AdvanceOpT advance_op, FilterOpT filter_op) { // if (queue_reset) // work_progress.Reset_(0, stream); typedef typename FrontierInT ::ValueT InKeyT; typedef typename FrontierOutT::ValueT OutKeyT; typedef typename ParametersT ::SizeT SizeT; typedef typename ParametersT ::ValueT ValueT; typedef typename ParametersT ::LabelT LabelT; typedef typename Dispatch<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT, true>::KernelPolicyT KernelPolicyT; SizeT grid_size = (parameters.frontier->queue_reset) ? (parameters.frontier->queue_length / KernelPolicyT::THREADS + 1) : (parameters.cuda_props->device_props.multiProcessorCount * KernelPolicyT::CTA_OCCUPANCY); Kernel<FLAG, InKeyT, OutKeyT, SizeT, ValueT, LabelT, FilterOpT> <<<grid_size, KernelPolicyT::THREADS, 0, parameters.stream>>>( parameters.frontier->queue_reset, (SizeT)(parameters.frontier->queue_index), (frontier_in == NULL) ? ((InKeyT *)NULL) : (frontier_in->GetPointer(util::DEVICE)), (parameters.values_in == NULL) ? ((ValueT *)NULL) : (parameters.values_in->GetPointer(util::DEVICE)), parameters.frontier->queue_length, parameters.label, (parameters.labels == NULL) ? ((LabelT *)NULL) : (parameters.labels->GetPointer(util::DEVICE)), (parameters.visited_masks == NULL) ? ((unsigned char *)NULL) : (parameters.visited_masks->GetPointer(util::DEVICE)), (frontier_out == NULL) ? ((OutKeyT *)NULL) : (frontier_out->GetPointer(util::DEVICE)), parameters.frontier->work_progress, filter_op); if (frontier_out != NULL) { parameters.frontier->queue_index++; } return cudaSuccess; } } // namespace CULL } // namespace oprtr } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include<iostream> #include<vector> #include<string> #include<fstream> #include<cassert> #include<algorithm> #include<utility> #include<queue> #include<limits> using namespace s2t::decodernet; using namespace s2t::sys; using namespace s2t::common; using namespace std; // kernel for decoder compuatations __global__ void decoder_concat(size_t in1_sz, size_t in2_sz, float* in1, const float* in2) { // concat in2 into in1 size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < in2_sz) { in1[in1_sz+index] = in2[index]; } } // min_first methods bool min_first::operator()(pair<float, long long> const& pair1, pair<float, long long> const& pair2) { // pair with minimum value of first will be at the top of priority queue return pair1.first > pair2.first; } // TrieNode methods TrieNode::TrieNode() { isCompleteWord = false; for (int i = 0; i < letters; ++i) { children[i] = NULL; } } TrieNode::~TrieNode() { } // Trie methods Trie::Trie() { root = new TrieNode(); all_trie_nodes.push_back(root); } bool Trie::insert_and_check(vector<size_t>& word) { /* returns true if word already exists; else returns false and inserts word */ auto current = root; for(int i = 0; i < word.size(); ++i) { int index = word[i]; if(!current->children[index]) { current->children[index] = new TrieNode(); all_trie_nodes.push_back(current->children[index]); } current = current->children[index]; } if(current->isCompleteWord) return true; current->isCompleteWord = true; return false; } Trie::~Trie() { for(int i=0; i<all_trie_nodes.size(); ++i) { delete all_trie_nodes[i]; } } // is_prefix : checks if sids_1 is a proper prefix of sids_2 bool is_prefix(vector<size_t>& sids_1, vector<size_t>& sids_2) { if(sids_1.size() >= sids_2.size()) { return false; } for(int i=0; i<sids_1.size(); ++i) { if(sids_1[i]!=sids_2[i]) return false; } return true; } // sorting comparator function for boosting phase bool compareSIDsLengths(pair<int, int>& pair1, pair<int, int>& pair2) { return (pair1.second < pair2.second); } // log sum exp function for a pair of float values float logsumexp(float x, float y) { float maxval = max(x, y); return log(exp(x - maxval) + exp(y - maxval)) + maxval; } // decoder methods decoder::decoder(size_t p_vocab_size, size_t p_blank_index) { vocab_size = p_vocab_size; blank_index = p_blank_index; // Read the subword file { string subword; ifstream subwords(hparams::subword_file); if(subwords.is_open()) { while(getline(subwords, subword)) { subword_map.push_back(subword); } subwords.close(); } else { cout << "Couldn't open vocabulary file!" << endl; } subword_map.push_back(""); // appending blank symbol at last assert(vocab_size==subword_map.size() && "Number of subwords in file and vocab_size do not match!"); assert(vocab_size==hparams::joint_net_logit_size && "hparams::joint_net_logit_size and vocab_size do not match!"); } // intialise prednet and jointnet { checkCUDNN(cudnnCreate(&cudnn)); prednet1.init(cudnn, ""); jointnet1.init(cudnn, ""); } // initialise the gpu variables { prednet_out.init(hparams::max_input_size, hparams::pred_net_logit_size); enc_pred_concated.init(hparams::max_input_size, hparams::enc_net_logit_size+hparams::pred_net_logit_size); // first 700 enocder, next 700 decoder jointnet_out.init(hparams::max_input_size, hparams::joint_net_logit_size); } // initialise the cpu variables { log_probs = (float*) malloc(hparams::joint_net_logit_size * sizeof(float)); boost_phase = hparams::boost_phase; } } void decoder::boost_prob(data_tuple& final, data_tuple& prefix) { float boost_log_prob = prefix.log_prob; size_t input_symbol = prefix.last_decoded_sid; int output_state_idx, input_state_idx; for(int i=prefix.beam_sids.size(); i<final.beam_sids.size(); ++i) { size_t output_symbol = final.beam_sids[i]; // compute log_prob float log_prob; { if(i==prefix.beam_sids.size()) { input_state_idx = prefix.hidden_idx; output_state_idx = -1; } else if(i==prefix.beam_sids.size()+1) { input_state_idx = output_state_idx; output_state_idx = -1; } else { swap(input_state_idx, output_state_idx); } // calls to jointnet and prednet int return_state_idx = prednet1(cudnn, input_symbol, prednet_out, input_state_idx, output_state_idx); if(i<=prefix.beam_sids.size()+1) { output_state_idx = return_state_idx; prednet1.reuse_state(output_state_idx); } else { assert(output_state_idx==return_state_idx && "output state index doesn't match return state index!"); } decoder_concat<<<1, 1024>>>(700, 700, enc_pred_concated.ptr, prednet_out.ptr); jointnet1(cudnn, enc_pred_concated, jointnet_out); // loading log_probs in float array size_t log_probs_N = jointnet_out.data_at_host(&log_probs); log_prob = log_probs[output_symbol]; } boost_log_prob += log_prob; input_symbol = output_symbol; } if(prefix.beam_sids.size()+1==final.beam_sids.size()) { prednet1.free_state(output_state_idx); } else { prednet1.free_state(input_state_idx); prednet1.free_state(output_state_idx); } final.log_prob = logsumexp(final.log_prob, boost_log_prob); } void decoder::operator() (const string& encoder_features_file, size_t beamsize, vector<pair<string, float>>& beams_and_logprobs_out) { // resetting state buffer for LSTM prednet1.reset_state_buffer(); auto encoder_features = cnpy::npy_load(encoder_features_file); size_t acoustic_time_steps = encoder_features.shape[0]; // T * 700 file // b_heap related data structures vector<data_tuple> data_b; priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> b_heap; // a_heap realted data structures vector<data_tuple> data_a; priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> a_heap; // initialse b_heap related data structures before t=0 int zeroed_dlsm_state_idx = prednet1.get_zerod_state(); data_tuple init_data_tuple = {"", 0.f, blank_index, zeroed_dlsm_state_idx /* hidden index */, {blank_index}}; prednet1.reuse_state(zeroed_dlsm_state_idx); data_b.push_back(init_data_tuple); b_heap.push(make_pair(0.f, 0)); for(int t=0; t<acoustic_time_steps; ++t) { enc_pred_concated.copy(encoder_features.data<float_t>() + hparams::enc_net_logit_size*t, hparams::enc_net_logit_size); // delete all for a_heap; { for(int i=0; i<data_a.size(); ++i) { prednet1.free_state(data_a[i].hidden_idx); } data_a.clear(); while(a_heap.size()) // reset it { a_heap.pop(); } } // put all data from b_heap in to a_heap and initialise empty b_heap; { // boost the probabilities in b_heap and push to a_heap; { vector<pair<int, int>> data_b_idx_sids_len_vector; while(b_heap.size()) { pair<float, int> log_prob_data_idx_pair = b_heap.top(); data_b_idx_sids_len_vector.push_back(make_pair(log_prob_data_idx_pair.second, data_b[log_prob_data_idx_pair.second].beam_sids.size())); b_heap.pop(); } sort(data_b_idx_sids_len_vector.begin(), data_b_idx_sids_len_vector.end(), compareSIDsLengths); for(int i=0; i<data_b_idx_sids_len_vector.size(); ++i) { for(int j=i-1; boost_phase && j>=0; --j) { // if data_b object at index j is a prefix of data_b object at index i; if(is_prefix(data_b[data_b_idx_sids_len_vector[j].first].beam_sids, data_b[data_b_idx_sids_len_vector[i].first].beam_sids)) { boost_prob(data_b[data_b_idx_sids_len_vector[i].first], data_b[data_b_idx_sids_len_vector[j].first]); break; } } // data_b object at index i is boosted so push to a_heap; a_heap.push(make_pair(-data_b[data_b_idx_sids_len_vector[i].first].log_prob, data_b_idx_sids_len_vector[i].first)); } } data_a = data_b; data_b.clear(); } // choose the most probable for a_heap and iterate pair<float, int> top_log_prob_data_idx_pair = a_heap.top(); a_heap.pop(); size_t top_id_data_a = top_log_prob_data_idx_pair.second; float top_log_prob_a = data_a[top_id_data_a].log_prob; float bmszth_top_log_prob_b = -numeric_limits<float>::infinity(); Trie trie; while(top_log_prob_a!=-numeric_limits<float>::infinity() && bmszth_top_log_prob_b<top_log_prob_a) { // compute next set of log probablities by calling lm and joint net size_t input_symbol = data_a[top_id_data_a].last_decoded_sid; // calls to jointnet and prednet int output_state_idx = prednet1(cudnn, input_symbol, prednet_out, data_a[top_id_data_a].hidden_idx); decoder_concat<<<1, 1024>>>(700, 700, enc_pred_concated.ptr, prednet_out.ptr); jointnet1(cudnn, enc_pred_concated, jointnet_out); // loading log_probs in float array size_t log_probs_N = jointnet_out.data_at_host(&log_probs); // add blank transition to B if(top_log_prob_a+log_probs[blank_index] > bmszth_top_log_prob_b && !trie.insert_and_check(data_a[top_id_data_a].beam_sids)) // and not already in trie: { data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string, top_log_prob_a + log_probs[blank_index], data_a[top_id_data_a].last_decoded_sid, data_a[top_id_data_a].hidden_idx, data_a[top_id_data_a].beam_sids}; prednet1.reuse_state(data_a[top_id_data_a].hidden_idx); b_heap.push(make_pair(next_data_tuple.log_prob, data_b.size())); data_b.push_back(next_data_tuple); if(b_heap.size()==beamsize+1) { b_heap.pop(); } if(b_heap.size()==beamsize) { pair<float, int> log_prob_data_idx_pair = b_heap.top(); bmszth_top_log_prob_b = data_b[log_prob_data_idx_pair.second].log_prob; } } // add non-blank transition to A for(int i=0; i<vocab_size; i++) { if(i == blank_index || top_log_prob_a+log_probs[i] <= bmszth_top_log_prob_b || log_probs[i] < hparams::prune_log_prob) continue; data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string + subword_map[i], top_log_prob_a + log_probs[i], size_t(i), output_state_idx, data_a[top_id_data_a].beam_sids}; prednet1.reuse_state(output_state_idx); next_data_tuple.beam_sids.push_back(i); a_heap.push(make_pair(-next_data_tuple.log_prob, data_a.size())); data_a.push_back(next_data_tuple); } // update top_id_data_a and top_log_prob_a top_log_prob_a = -numeric_limits<float>::infinity(); if(a_heap.size()) { top_log_prob_data_idx_pair = a_heap.top(); a_heap.pop(); top_id_data_a = top_log_prob_data_idx_pair.second; top_log_prob_a = data_a[top_id_data_a].log_prob; } } } // dealloc all hiddens floats for(int i=0; i<data_a.size(); ++i) prednet1.free_state(data_a[i].hidden_idx); for(int i=0; i<data_b.size(); ++i) prednet1.free_state(data_b[i].hidden_idx); // write to beams_and_logprobs_out while(b_heap.size()) { pair<float, int> log_prob_data_idx_pair = b_heap.top(); b_heap.pop(); int data_b_idx = log_prob_data_idx_pair.second; beams_and_logprobs_out.push_back(make_pair(data_b[data_b_idx].beam_string, data_b[data_b_idx].log_prob)); } } decoder::~decoder() { // de-initialise the cpu variables { free(log_probs); } }
the_stack
#include <nvbench/benchmark.cuh> #include <nvbench/callable.cuh> #include <nvbench/state.cuh> #include <nvbench/type_list.cuh> #include <nvbench/type_strings.cuh> #include <nvbench/types.cuh> #include "test_asserts.cuh" #include <fmt/format.h> #include <algorithm> #include <variant> #include <vector> template <typename T> std::vector<T> sort(std::vector<T> &&vec) { std::sort(vec.begin(), vec.end()); return std::move(vec); } void no_op_generator(nvbench::state &state) { fmt::memory_buffer params; fmt::format_to(params, "Params:"); const auto &axis_values = state.get_axis_values(); for (const auto &name : sort(axis_values.get_names())) { std::visit( [&params, &name](const auto &value) { fmt::format_to(params, " {}: {}", name, value); }, axis_values.get_value(name)); } // Marking as skipped to signal that this state is run: state.skip(fmt::to_string(std::move(params))); } NVBENCH_DEFINE_CALLABLE(no_op_generator, no_op_callable); using float_types = nvbench::type_list<nvbench::float32_t, nvbench::float64_t>; using int_types = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>; using misc_types = nvbench::type_list<bool, void>; using type_axes = nvbench::type_list<float_types, int_types, misc_types>; template <typename FloatT, typename IntT, typename MiscT> void template_no_op_generator(nvbench::state &state, nvbench::type_list<FloatT, IntT, MiscT>) { ASSERT(nvbench::type_strings<FloatT>::input_string() == state.get_string("FloatT")); ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT")); ASSERT(nvbench::type_strings<IntT>::input_string() == state.get_string("IntT")); // Enum params using non-templated version: no_op_generator(state); } NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator, template_no_op_callable); void test_empty() { using benchmark_type = nvbench::benchmark<no_op_callable>; using runner_type = nvbench::runner<benchmark_type>; benchmark_type bench; bench.set_devices(std::vector<int>{}); runner_type runner{bench}; runner.generate_states(); ASSERT(bench.get_states().size() == 1); ASSERT(bench.get_states().front().is_skipped() == false); runner.run(); ASSERT(bench.get_states().size() == 1); ASSERT(bench.get_states().front().is_skipped() == true); } void test_non_types() { using benchmark_type = nvbench::benchmark<no_op_callable>; using runner_type = nvbench::runner<benchmark_type>; benchmark_type bench; bench.set_devices(std::vector<int>{}); bench.add_int64_axis("Int", {1, 2, 3}); bench.add_float64_axis("Float", {11.0, 12.0, 13.0}); bench.add_string_axis("String", {"One", "Two", "Three"}); runner_type runner{bench}; runner.generate_states(); ASSERT(bench.get_states().size() == 27); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == false); } fmt::memory_buffer buffer; runner.run(); ASSERT(bench.get_states().size() == 27); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == true); fmt::format_to(buffer, "{}\n", state.get_skip_reason()); } const std::string ref = R"expected(Params: Float: 11 Int: 1 String: One Params: Float: 11 Int: 2 String: One Params: Float: 11 Int: 3 String: One Params: Float: 12 Int: 1 String: One Params: Float: 12 Int: 2 String: One Params: Float: 12 Int: 3 String: One Params: Float: 13 Int: 1 String: One Params: Float: 13 Int: 2 String: One Params: Float: 13 Int: 3 String: One Params: Float: 11 Int: 1 String: Two Params: Float: 11 Int: 2 String: Two Params: Float: 11 Int: 3 String: Two Params: Float: 12 Int: 1 String: Two Params: Float: 12 Int: 2 String: Two Params: Float: 12 Int: 3 String: Two Params: Float: 13 Int: 1 String: Two Params: Float: 13 Int: 2 String: Two Params: Float: 13 Int: 3 String: Two Params: Float: 11 Int: 1 String: Three Params: Float: 11 Int: 2 String: Three Params: Float: 11 Int: 3 String: Three Params: Float: 12 Int: 1 String: Three Params: Float: 12 Int: 2 String: Three Params: Float: 12 Int: 3 String: Three Params: Float: 13 Int: 1 String: Three Params: Float: 13 Int: 2 String: Three Params: Float: 13 Int: 3 String: Three )expected"; const std::string test = fmt::to_string(buffer); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } void test_types() { using benchmark_type = nvbench::benchmark<template_no_op_callable, type_axes>; using runner_type = nvbench::runner<benchmark_type>; benchmark_type bench; bench.set_devices(std::vector<int>{}); bench.set_type_axes_names({"FloatT", "IntT", "MiscT"}); runner_type runner{bench}; runner.generate_states(); ASSERT(bench.get_states().size() == 8); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == false); } fmt::memory_buffer buffer; runner.run(); ASSERT(bench.get_states().size() == 8); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == true); fmt::format_to(buffer, "{}\n", state.get_skip_reason()); } const std::string ref = R"expected(Params: FloatT: F32 IntT: I32 MiscT: bool Params: FloatT: F32 IntT: I32 MiscT: void Params: FloatT: F32 IntT: I64 MiscT: bool Params: FloatT: F32 IntT: I64 MiscT: void Params: FloatT: F64 IntT: I32 MiscT: bool Params: FloatT: F64 IntT: I32 MiscT: void Params: FloatT: F64 IntT: I64 MiscT: bool Params: FloatT: F64 IntT: I64 MiscT: void )expected"; const std::string test = fmt::to_string(buffer); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } void test_both() { using benchmark_type = nvbench::benchmark<template_no_op_callable, type_axes>; using runner_type = nvbench::runner<benchmark_type>; benchmark_type bench; bench.set_devices(std::vector<int>{}); bench.set_type_axes_names({"FloatT", "IntT", "MiscT"}); bench.add_int64_axis("Int", {1, 2, 3}); bench.add_float64_axis("Float", {11.0, 12.0, 13.0}); bench.add_string_axis("String", {"One", "Two", "Three"}); runner_type runner{bench}; runner.generate_states(); ASSERT(bench.get_states().size() == 8 * 27); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == false); } fmt::memory_buffer buffer; runner.run(); ASSERT(bench.get_states().size() == 8 * 27); for (const auto &state : bench.get_states()) { ASSERT(state.is_skipped() == true); fmt::format_to(buffer, "{}\n", state.get_skip_reason()); } const std::string ref = R"expected(Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 11 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 11 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F32 Int: 3 IntT: I64 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 1 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 2 IntT: I32 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 3 IntT: I32 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: One Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: One Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Two Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Two Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: bool String: Three Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: bool String: Three Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: One Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: One Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Two Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Two Params: Float: 11 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 11 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 12 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 1 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 2 IntT: I64 MiscT: void String: Three Params: Float: 13 FloatT: F64 Int: 3 IntT: I64 MiscT: void String: Three )expected"; const std::string test = fmt::to_string(buffer); ASSERT_MSG(test == ref, "Expected:\n\"{}\"\n\nActual:\n\"{}\"", ref, test); } int main() { test_empty(); test_non_types(); test_types(); test_both(); }
the_stack
#include "cooperative_groups.h" #include "cuda_profiler_api.h" namespace motis::raptor { using namespace cooperative_groups; // leader type must be unsigned 32bit // no leader is a zero ballot vote (all 0) minus 1 => with underflow all 1's constexpr unsigned int FULL_MASK = 0xFFFFffff; constexpr unsigned int NO_LEADER = FULL_MASK; __device__ __forceinline__ unsigned int get_block_thread_id() { return threadIdx.x + (blockDim.x * threadIdx.y); } __device__ __forceinline__ unsigned int get_global_thread_id() { return get_block_thread_id() + (blockDim.x * blockDim.y * blockIdx.x); } __device__ __forceinline__ unsigned int get_block_stride() { return blockDim.x * blockDim.y; } __device__ __forceinline__ unsigned int get_global_stride() { return get_block_stride() * gridDim.x * gridDim.y; } __device__ void mark(unsigned int* store, unsigned int const idx) { unsigned int const store_idx = (idx >> 5); // divide by 32 unsigned int const mask = 1 << (idx % 32); atomicOr(&store[store_idx], mask); } __device__ bool marked(unsigned int const* const store, unsigned int idx) { unsigned int const store_idx = (idx >> 5); // divide by 32 unsigned int const val = store[store_idx]; unsigned int const mask = 1 << (idx % 32); return (bool)(val & mask); } __device__ void reset_store(unsigned int* store, int const store_size) { auto const t_id = get_global_thread_id(); auto const stride = get_global_stride(); for (auto idx = t_id; idx < store_size; idx += stride) { store[idx] = 0; } } __device__ void convert_station_to_route_marks(unsigned int* station_marks, unsigned int* route_marks, bool* any_station_marked, device_gpu_timetable const& tt) { auto const global_t_id = get_global_thread_id(); auto const global_stride = get_global_stride(); for (auto idx = global_t_id; idx < tt.stop_count_; idx += global_stride) { if (marked(station_marks, idx)) { if (!*any_station_marked) { *any_station_marked = true; } auto const stop = tt.stops_[idx]; for (auto sri = stop.index_to_stop_routes_; sri < stop.index_to_stop_routes_ + stop.route_count_; ++sri) { mark(route_marks, tt.stop_routes_[sri]); } } } } __device__ bool update_arrival(time* const base, stop_id const s_id, time const val) { #if __CUDA_ARCH__ >= 700 auto old_value = base[s_id]; time assumed; do { if (old_value <= val) { return false; } assumed = old_value; old_value = atomicCAS(&base[s_id], assumed, val); } while (assumed != old_value); return true; #else // we have a 16-bit time value array, but only 32-bit atomic operations // therefore every two 16-bit time values are read as one 32-bit time value // then they are the corresponding part is updated and stored if a better // time value was found while the remaining 16 bit value part remains // unchanged time* const arr_address = &base[s_id]; unsigned int* base_address = (unsigned int*)((size_t)arr_address & ~2); unsigned int old_value, assumed, new_value, compare_val; old_value = *base_address; do { assumed = old_value; if ((size_t)arr_address & 2) { compare_val = (0x0000FFFF & assumed) ^ (((unsigned int)val) << 16); } else { compare_val = (0xFFFF0000 & assumed) ^ (unsigned int)val; } new_value = __vminu2(old_value, compare_val); if (new_value == old_value) { return false; } old_value = atomicCAS(base_address, assumed, new_value); } while (assumed != old_value); return true; #endif } __device__ void copy_marked_arrivals(time* const to, time const* const from, unsigned int* station_marks, device_gpu_timetable const& tt) { auto const global_stride = get_global_stride(); auto arr_idx = get_global_thread_id(); for (; arr_idx < tt.stop_count_; arr_idx += global_stride) { if (marked(station_marks, arr_idx)) { to[arr_idx] = from[arr_idx]; } else { to[arr_idx] = invalid<time>; } } } __device__ void copy_and_min_arrivals(time* const to, time* const from, device_gpu_timetable const& tt) { auto const global_stride = get_global_stride(); auto arr_idx = get_global_thread_id(); for (; arr_idx < tt.stop_count_; arr_idx += global_stride) { to[arr_idx] = min(from[arr_idx], to[arr_idx]); } } __device__ void update_route_larger32(gpu_route const& route, time const* const prev_arrivals, time* const arrivals, unsigned int* station_marks, device_gpu_timetable const& tt) { auto const t_id = threadIdx.x; stop_id stop_id_t = invalid<stop_id>; time prev_arrival = invalid<time>; time stop_arrival = invalid<time>; time stop_departure = invalid<time>; int active_stop_count = route.stop_count_; // this is ceil(stop_count / 32) int const stage_count = (route.stop_count_ + (32 - 1)) >> 5; int active_stage_count = stage_count; unsigned int leader = NO_LEADER; unsigned int any_arrival = 0; for (int trip_offset = 0; trip_offset < route.trip_count_; ++trip_offset) { for (int current_stage = 0; current_stage < active_stage_count; ++current_stage) { int stage_id = (current_stage << 5) + t_id; // load the prev arrivals for the current stage if (stage_id < active_stop_count) { stop_id_t = tt.route_stops_[route.index_to_route_stops_ + stage_id]; // prev_arrival = get_arrival(prev_arrivals, stop_id_t); prev_arrival = prev_arrivals[stop_id_t]; } any_arrival |= __any_sync(FULL_MASK, valid(prev_arrival)); if (current_stage == active_stage_count - 1 && !any_arrival) { return; } if (!any_arrival) { continue; } // load the stop times for the current stage if (stage_id < active_stop_count) { auto const st_idx = route.index_to_stop_times_ + (trip_offset * route.stop_count_) + stage_id; stop_departure = tt.stop_departures_[st_idx]; } // get the current stage leader unsigned int ballot = __ballot_sync( FULL_MASK, (stage_id < active_stop_count) && valid(prev_arrival) && valid(stop_departure) && (prev_arrival <= stop_departure)); leader = __ffs(ballot) - 1; if (leader != NO_LEADER) { leader += current_stage << 5; } // first update the current stage if (leader != NO_LEADER && stage_id < active_stop_count) { if (stage_id > leader) { auto const st_idx = route.index_to_stop_times_ + (trip_offset * route.stop_count_) + stage_id; stop_arrival = tt.stop_arrivals_[st_idx]; bool updated = update_arrival(arrivals, stop_id_t, stop_arrival); if (updated) { mark(station_marks, stop_id_t); } } } // then update all upward stages if (leader != NO_LEADER) { for (int upward_stage = current_stage + 1; upward_stage < active_stage_count; ++upward_stage) { int upwards_id = (upward_stage << 5) + t_id; if (upwards_id < active_stop_count) { auto const st_idx = route.index_to_stop_times_ + (trip_offset * route.stop_count_) + upwards_id; stop_arrival = tt.stop_arrivals_[st_idx]; stop_id_t = tt.route_stops_[route.index_to_route_stops_ + upwards_id]; bool updated = update_arrival(arrivals, stop_id_t, stop_arrival); if (updated) { mark(station_marks, stop_id_t); } } } // for this route we do not need to update any station higher than the // leader anymore active_stop_count = leader; active_stage_count = (active_stop_count + (32 - 1)) >> 5; leader = NO_LEADER; } } } } __device__ void update_route_smaller32(gpu_route const route, time const* const prev_arrivals, time* const arrivals, unsigned int* station_marks, device_gpu_timetable const& tt) { auto const t_id = threadIdx.x; stop_id stop_id_t = invalid<stop_id>; time prev_arrival = invalid<time>; time stop_arrival = invalid<time>; time stop_departure = invalid<time>; unsigned leader = route.stop_count_; unsigned int active_stop_count = route.stop_count_; if (t_id < active_stop_count) { stop_id_t = tt.route_stops_[route.index_to_route_stops_ + t_id]; // prev_arrival = get_arrival(prev_arrivals, stop_id_t); prev_arrival = prev_arrivals[stop_id_t]; } if (!__any_sync(FULL_MASK, valid(prev_arrival))) { return; } for (int trip_offset = 0; trip_offset < route.trip_count_; ++trip_offset) { if (t_id < active_stop_count) { auto const st_idx = route.index_to_stop_times_ + (trip_offset * route.stop_count_) + t_id; stop_departure = tt.stop_departures_[st_idx]; } // elect leader unsigned ballot = __ballot_sync( FULL_MASK, (t_id < active_stop_count) && valid(prev_arrival) && valid(stop_departure) && (prev_arrival <= stop_departure)); leader = __ffs(ballot) - 1; if (t_id > leader && t_id < active_stop_count) { auto const st_idx = route.index_to_stop_times_ + (trip_offset * route.stop_count_) + t_id; stop_arrival = tt.stop_arrivals_[st_idx]; bool updated = update_arrival(arrivals, stop_id_t, stop_arrival); if (updated) { mark(station_marks, stop_id_t); } } if (leader != NO_LEADER) { active_stop_count = leader; } leader = NO_LEADER; } } __device__ void update_footpaths_dev_scratch(time const* const read_arrivals, time* const write_arrivals, unsigned int* station_marks, device_gpu_timetable const& tt) { auto const global_stride = get_global_stride(); auto foot_idx = get_global_thread_id(); for (; foot_idx < tt.footpath_count_; foot_idx += global_stride) { auto const footpath = tt.footpaths_[foot_idx]; time const from_arrival = read_arrivals[footpath.from_]; time const new_arrival = from_arrival + footpath.duration_; if (valid(from_arrival) && marked(station_marks, footpath.from_)) { bool updated = update_arrival(write_arrivals, footpath.to_, new_arrival); if (updated) { mark(station_marks, footpath.to_); } } } } __device__ void update_routes_dev(time const* const prev_arrivals, time* const arrivals, unsigned int* station_marks, unsigned int* route_marks, bool* any_station_marked, device_gpu_timetable const& tt) { if (get_global_thread_id() == 0) { *any_station_marked = false; } convert_station_to_route_marks(station_marks, route_marks, any_station_marked, tt); this_grid().sync(); auto const station_store_size = (tt.stop_count_ / 32) + 1; reset_store(station_marks, station_store_size); this_grid().sync(); if (!*any_station_marked) { return; } auto const stride = blockDim.y * gridDim.x; auto const start_r_id = threadIdx.y + (blockDim.y * blockIdx.x); for (auto r_id = start_r_id; r_id < tt.route_count_; r_id += stride) { if (!marked(route_marks, r_id)) { continue; } auto const route = tt.routes_[r_id]; if (route.stop_count_ <= 32) { update_route_smaller32(route, prev_arrivals, arrivals, station_marks, tt); } else { update_route_larger32(route, prev_arrivals, arrivals, station_marks, tt); } } this_grid().sync(); auto const store_size = (tt.route_count_ / 32) + 1; reset_store(route_marks, store_size); } __device__ void init_arrivals_dev(base_query const& query, device_memory const& device_mem, device_gpu_timetable const& tt) { auto const t_id = get_global_thread_id(); auto const station_store_size = (tt.stop_count_ / 32) + 1; reset_store(device_mem.station_marks_, station_store_size); auto const route_store_size = (tt.route_count_ / 32) + 1; reset_store(device_mem.route_marks_, route_store_size); if (t_id == 0) { *device_mem.any_station_marked_ = false; } if (t_id == 0) { device_mem.result_[0][query.source_] = query.source_time_begin_; mark(device_mem.station_marks_, query.source_); } if (t_id < device_mem.additional_start_count_) { auto const& add_start = device_mem.additional_starts_[t_id]; auto const add_start_time = query.source_time_begin_ + add_start.offset_; bool updated = update_arrival(device_mem.result_[0], add_start.s_id_, add_start_time); if (updated) { mark(device_mem.station_marks_, add_start.s_id_); } } } __device__ void update_footpaths_dev(device_memory const& device_mem, raptor_round round_k, device_gpu_timetable const& tt) { time* const arrivals = device_mem.result_[round_k]; // we must only copy the marked arrivals, // since an earlier raptor query might have used a footpath // to generate the current arrival, a new optimum from this value // would be generated using a double walk -> not correct! copy_marked_arrivals(device_mem.footpaths_scratchpad_, arrivals, device_mem.station_marks_, tt); this_grid().sync(); update_footpaths_dev_scratch(device_mem.footpaths_scratchpad_, arrivals, device_mem.station_marks_, tt); this_grid().sync(); if (round_k == max_raptor_round - 1) { return; } time* const next_arrivals = device_mem.result_[round_k + 1]; copy_and_min_arrivals(next_arrivals, arrivals, tt); this_grid().sync(); } __global__ void gpu_raptor_kernel(base_query const query, device_memory const device_mem, device_gpu_timetable const tt) { init_arrivals_dev(query, device_mem, tt); this_grid().sync(); for (raptor_round round_k = 1; round_k < max_raptor_round; ++round_k) { time const* const prev_arrivals = device_mem.result_[round_k - 1]; time* const arrivals = device_mem.result_[round_k]; update_routes_dev(prev_arrivals, arrivals, device_mem.station_marks_, device_mem.route_marks_, device_mem.any_station_marked_, tt); this_grid().sync(); update_footpaths_dev(device_mem, round_k, tt); this_grid().sync(); } } void invoke_gpu_raptor(d_query const& dq) { void* kernel_args[] = {(void*)&dq, (void*)&(dq.mem_->device_), (void*)&(dq.tt_)}; launch_kernel(gpu_raptor_kernel, kernel_args, dq.mem_->context_, dq.mem_->context_.proc_stream_); cuda_check(); cuda_sync_stream(dq.mem_->context_.proc_stream_); cuda_check(); fetch_arrivals_async(dq, dq.mem_->context_.transfer_stream_); cuda_check(); cuda_sync_stream(dq.mem_->context_.transfer_stream_); cuda_check(); } } // namespace motis::raptor
the_stack
#include "unit_test.h" #include "matrix.h" #include "csr_multiply.h" namespace amgx { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename T > struct Epsilon {}; template<> struct Epsilon<float> { static __device__ __host__ __forceinline__ float value( ) { return 1.0e-4f; } }; template<> struct Epsilon<double> { static __device__ __host__ __forceinline__ double value( ) { return 1.0e-8; } }; DECLARE_UNITTEST_BEGIN(CsrMultiplyTests_Base); std::string base_keywords() { return "csr"; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Index_vector > void count_non_zeroes( const Index_vector &A_rows, const Index_vector &A_cols, const Index_vector &B_rows, const Index_vector &B_cols, Index_vector &C_rows ) { typedef typename Index_vector::value_type Index_type; int nRows = static_cast<int>( A_rows.size( ) - 1 ); #pragma omp parallel for shared(nRows) for ( int aRowId = 0 ; aRowId < nRows ; ++aRowId ) { #ifdef USE_CPP_TR1 std::tr1::unordered_set<Index_type> cCols; #else std::set<Index_type> cCols; #endif for ( Index_type aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt ) { Index_type bRowId = A_cols[aColIt]; for ( Index_type bColIt = B_rows[bRowId], bColEnd = B_rows[bRowId + 1] ; bColIt < bColEnd ; ++bColIt ) { cCols.insert( B_cols[bColIt] ); } } C_rows[aRowId] = static_cast<Index_type>( cCols.size( ) ); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Index_vector, typename Value_vector > void compute_values( const Index_vector &A_rows, const Index_vector &A_cols, const Value_vector &A_vals, const Index_vector &B_rows, const Index_vector &B_cols, const Value_vector &B_vals, const Index_vector &C_rows, Index_vector &C_cols, Value_vector &C_vals ) { typedef typename Index_vector::value_type Index_type; typedef typename Value_vector::value_type Value_type; #ifdef USE_CPP_TR1 typedef std::tr1::unordered_map<Index_type, Value_type> Map; typedef typename Map::iterator MapIterator; typedef std::pair<MapIterator, bool> MapQuery; typedef typename Map::const_iterator MapConstIterator; #else typedef std::map<Index_type, Value_type> Map; typedef typename Map::iterator MapIterator; typedef std::pair<MapIterator, bool> MapQuery; typedef typename Map::const_iterator MapConstIterator; #endif int nRows = static_cast<int>( A_rows.size( ) - 1 ); #pragma omp parallel for shared(nRows) for ( int aRowId = 0 ; aRowId < nRows ; ++aRowId ) { Map cVals; for ( unsigned aColIt = A_rows[aRowId], aColEnd = A_rows[aRowId + 1] ; aColIt < aColEnd ; ++aColIt ) { Index_type bRowId = A_cols[aColIt]; Value_type aValue = A_vals[aColIt]; for ( Index_type bColIt = B_rows[bRowId], bColEnd = B_rows[bRowId + 1] ; bColIt < bColEnd ; ++bColIt ) { Index_type bColId = B_cols[bColIt]; Value_type bValue = B_vals[bColIt]; Value_type value = aValue * bValue; MapQuery q = cVals.insert( typename Map::value_type( bColId, value ) ); if ( !q.second ) { q.first->second += value; } } } Index_type cRowIt = C_rows[aRowId]; for ( MapConstIterator it = cVals.begin( ) ; it != cVals.end( ) ; ++it, ++cRowIt ) { assert( cRowIt < C_rows[aRowId + 1] ); C_cols[cRowIt] = it->first; C_vals[cRowIt] = it->second; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Config > void compare_matrices( Matrix<Config> &A, Matrix<Config> &B ) { A.sortByRowAndColumn(); B.sortByRowAndColumn(); UNITTEST_ASSERT_EQUAL_DESC( "Rows", A.row_offsets, B.row_offsets ); UNITTEST_ASSERT_EQUAL_DESC( "Cols", A.col_indices, B.col_indices ); typedef typename MatPrecisionMap<Config::matPrec>::Type Value_type; const Value_type epsilon = Epsilon<Value_type>::value(); UNITTEST_ASSERT_EQUAL_TOL_DESC( "Vals", A.values, B.values, epsilon ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_square( const Matrix<TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> > &A_h, void *wk ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; typedef Matrix<Config_d> Matrix_d; Matrix_h B_h( A_h ), C_h; C_h.set_num_rows( A_h.get_num_rows() ); C_h.set_num_cols( B_h.get_num_rows() ); C_h.row_offsets.resize( A_h.get_num_rows() + 1 ); std::ostringstream buffer; { count_non_zeroes( A_h.row_offsets, A_h.col_indices, B_h.row_offsets, B_h.col_indices, C_h.row_offsets ); thrust::exclusive_scan( C_h.row_offsets.begin( ), C_h.row_offsets.end( ), C_h.row_offsets.begin( ) ); cudaCheckError(); int nVals = C_h.row_offsets[A_h.get_num_rows()]; C_h.col_indices.resize( nVals ); C_h.values.resize( nVals ); C_h.set_num_nz( nVals ); compute_values( A_h.row_offsets, A_h.col_indices, A_h.values, B_h.row_offsets, B_h.col_indices, B_h.values, C_h.row_offsets, C_h.col_indices, C_h.values ); } Matrix_d C_d; { Matrix_d A_d( A_h ), B_d( B_h ); CSR_Multiply<Config_d>::csr_multiply( A_d, B_d, C_d, wk ); } Matrix_d C_d_ref( C_h ); compare_matrices( C_d, C_d_ref ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_multiply_poisson( int points, int nx, int ny, int nz, AMG_Config &cfg ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; Matrix_h A_h; A_h.set_initialized(0); switch (points) { case 5: case 7: case 9: case 27: generatePoissonForTest(A_h, 1, 0, points, nx, ny, nz); break; default: printf("Error invalid number of poisson points specified, valid numbers are 5, 7, 9, 27\n"); } A_h.set_initialized(1); void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" ); check_csr_square( A_h, wk ); CSR_Multiply<Config_d>::csr_workspace_delete( wk ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision VecPrecision, AMGX_MatPrecision MatPrecision > void check_csr_multiply_file( const std::string &filename, AMG_Config &cfg, bool one_value = false ) { typedef TemplateConfig<AMGX_host, VecPrecision, MatPrecision, AMGX_indInt> Config_h; typedef TemplateConfig<AMGX_device, VecPrecision, MatPrecision, AMGX_indInt> Config_d; typedef Matrix<Config_h> Matrix_h; typedef Vector<Config_h> Vector_h; Matrix_h A_h; Vector_h x_h, b_h; A_h.set_initialized(0); A_h.addProps(CSR); UNITTEST_ASSERT_TRUE(MatrixIO<Config_h>::readSystem( filename.c_str(), A_h, b_h, x_h ) == AMGX_OK); // set all values to 1 to avoid numerical problems if (one_value) for (int i = 0; i < A_h.values.size(); i++) { A_h.values[i] = 1.0; } A_h.set_initialized(1); void *wk = CSR_Multiply<Config_d>::csr_workspace_create( cfg, "default" ); check_csr_square( A_h, wk ); CSR_Multiply<Config_d>::csr_workspace_delete( wk ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Base); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson5_10_10, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson5_10_10) CsrMultiplyTests_Poisson5_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson5_10_10_dDDI; CsrMultiplyTests_Poisson5_10_10<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson5_10_10_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson5_100_100, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson5_100_100) CsrMultiplyTests_Poisson5_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson5_100_100_dDDI; CsrMultiplyTests_Poisson5_100_100<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson5_100_100_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson7_10_10, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson7_10_10) CsrMultiplyTests_Poisson7_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson7_10_10_dDDI; CsrMultiplyTests_Poisson7_10_10<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson7_10_10_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson7_100_100, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson7_100_100) CsrMultiplyTests_Poisson7_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson7_100_100_dDDI; CsrMultiplyTests_Poisson7_100_100<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson7_100_100_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson9_10_10, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson9_10_10) CsrMultiplyTests_Poisson9_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson9_10_10_dDDI; CsrMultiplyTests_Poisson9_10_10<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson9_10_10_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson9_100_100, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson9_100_100) CsrMultiplyTests_Poisson9_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson9_100_100_dDDI; CsrMultiplyTests_Poisson9_100_100<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson9_100_100_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson27_10_10, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 10, 10, 10, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson27_10_10) CsrMultiplyTests_Poisson27_10_10<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson27_10_10_dDDI; CsrMultiplyTests_Poisson27_10_10<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson27_10_10_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// DECLARE_UNITTEST_BEGIN_EXTD(CsrMultiplyTests_Poisson27_100_100, CsrMultiplyTests_Base<T_Config>); void run() { AMG_Config cfg; CsrMultiplyTests_Base<T_Config>::template check_csr_multiply_poisson<T_Config::vecPrec, T_Config::matPrec>( 5, 100, 100, 100, cfg ); } DECLARE_UNITTEST_END(CsrMultiplyTests_Poisson27_100_100) CsrMultiplyTests_Poisson27_100_100<TemplateMode<AMGX_mode_dDDI>::Type> CsrMultiplyTests_Poisson27_100_100_dDDI; CsrMultiplyTests_Poisson27_100_100<TemplateMode<AMGX_mode_dFFI>::Type> CsrMultiplyTests_Poisson27_100_100_dFFI; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace amgx
the_stack
#include "common.hpp" namespace HugeCTR { namespace { __global__ void reverse_relu_kernel(__half* dRelu, __half* mask, const __half* dY, int n) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t num_threads = blockDim.x * gridDim.x; const __half2 zero = TypeFunc<__half2>::zero(); __half2* dRelu2 = reinterpret_cast<__half2*>(dRelu); __half2* mask2 = reinterpret_cast<__half2*>(mask); const __half2* dY2 = reinterpret_cast<const __half2*>(dY); __half2 m = __hgt2(mask2[tid], zero); dRelu2[tid] = __hmul2(__ldg(dY2 + tid), m); m = __hgt2(mask2[tid + num_threads], zero); dRelu2[tid + num_threads] = __hmul2(__ldg(dY2 + tid + num_threads), m); } __global__ void reverse_relu_kernel_not_aligned(__half* dRelu, __half* mask, const __half* dY, int n) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t num_threads = blockDim.x * gridDim.x; const __half zero = TypeFunc<__half>::zero(); __half m = __hgt(mask[tid], zero); dRelu[tid] = __hmul(__ldg(dY + tid), m); } } // namespace FusedReluBiasFullyConnectedLayer::FusedReluBiasFullyConnectedLayer( const std::shared_ptr<BufferBlock2<float>>& master_weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_grad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const Tensor2<__half>& train_in_tensor, const Tensor2<__half>& mask_in_tensor, const Tensor2<__half>& dRelu_in_tensor, const Tensor2<__half>& db_in_tensor, const Tensor2<__half>& train_out_tensor, const Tensor2<__half>& mask_out_tensor, const Tensor2<__half>& dRelu_out_tensor, Tensor2<__half>& db_out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, const FcPosition_t& pos, const Activation_t& act, const bool& skip_dgrad, std::vector<Initializer_t> initializer_types, const bool async_mlp_wgrad, const bool head_mask_in) : Layer(gpu_resource, initializer_types), balgo_k_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_x_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_b_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), pos_(pos), act_(act), skip_dgrad_(skip_dgrad), async_mlp_wgrad_(async_mlp_wgrad), head_mask_in_(head_mask_in), event_overlap_created_(false) { const auto& bottom_tensor_dim = train_in_tensor.get_dimensions(); const auto& top_tensor_dim = train_out_tensor.get_dimensions(); if (bottom_tensor_dim.size() != 2 || top_tensor_dim.size() != 2) { HCTR_OWN_THROW(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; std::vector<size_t> kernel_dim = {k, n}; std::vector<size_t> bias_dim = {1, n}; std::vector<size_t> identity_dim = {1, m}; { Tensor2<float> tensor; master_weights_buff->reserve(kernel_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; master_weights_buff->reserve(bias_dim, &tensor); weights_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(kernel_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(bias_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(kernel_dim, &tensor); weights_grad_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(bias_dim, &db_out_tensor); weights_grad_.push_back(db_out_tensor); } blobs_buff->reserve(identity_dim, &identity_tensor_); train_in_tensor_ = train_in_tensor; // if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) { // // mask_in_tensor_ = train_in_tensor; // } else { mask_in_tensor_ = mask_in_tensor; dRelu_in_tensor_ = dRelu_in_tensor; db_in_tensor_ = db_in_tensor; // } train_out_tensor_ = train_out_tensor; mask_out_tensor_ = mask_out_tensor; dRelu_out_tensor_ = dRelu_out_tensor; db_out_tensor_ = db_out_tensor; blobs_buff->reserve(kernel_dim, &bias_grad_tensor_); std::vector<size_t> mask_dim = {m, n}; blobs_buff->reserve(mask_dim, &mask_in_tensor_temp_); if (async_mlp_wgrad_) cublas_handle_wgrad_ = gpu_resource->get_cublas_handle_wgrad(); else cublas_handle_wgrad_ = gpu_resource->get_cublas_handle(); } void FusedReluBiasFullyConnectedLayer::initialize() { CudaDeviceContext context(get_device_id()); HCTR_LIB_THROW(cudaEventCreate(&event_overlap_)); event_overlap_created_ = true; // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); __half* identity = identity_tensor_.get_ptr(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; initialize_array<<<(m - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(identity, m, __float2half(1.0f)); HCTR_LIB_THROW(cublasLtMatmulDescCreate(&cublas_op_desc_, CUBLAS_COMPUTE_32F, CUDA_R_32F)); cublasOperation_t trans = CUBLAS_OP_N; HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSA, &trans, sizeof(trans))); HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSB, &trans, sizeof(trans))); cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_RELU_AUX_BIAS; if (act_ == Activation_t::None) epi = CUBLASLT_EPILOGUE_BIAS; HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); const __half* bias = weights_half_[1].get_ptr(); HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias))); if (act_ != Activation_t::None) { __half* reluMask = mask_out_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = n; cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } HCTR_LIB_THROW(cublasLtMatrixLayoutCreate(&cublas_kernel_desc_, CUDA_R_16F, n, k, n)); HCTR_LIB_THROW(cublasLtMatrixLayoutCreate(&cublas_bottom_desc_, CUDA_R_16F, k, m, k)); HCTR_LIB_THROW(cublasLtMatrixLayoutCreate(&cublas_top_desc_, CUDA_R_16F, n, m, n)); HCTR_LIB_THROW(cublasLtMatmulPreferenceCreate(&cublas_preference_)); cublaslt_workspace_size_ = 1024 * 1024 * 16; // Set it to 16MB for now HCTR_LIB_THROW(cudaMalloc(&cublaslt_workspace_, cublaslt_workspace_size_)); HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute( cublas_preference_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute(cublas_preference_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, 1, &heuristic_result, &returned_res)); memcpy(&falgo_k_, &heuristic_result.algo, sizeof(falgo_k_)); if (returned_res == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } initialize_dgrad(); initialize_wgrad(); } void FusedReluBiasFullyConnectedLayer::initialize_dgrad() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; HCTR_LIB_THROW(cublasLtMatmulDescCreate(&cublas_op_desc_bprop_, CUBLAS_COMPUTE_32F, CUDA_R_32F)); cublasOperation_t transA = CUBLAS_OP_T; cublasOperation_t transB = CUBLAS_OP_N; HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA))); HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB))); if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DEFAULT; HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); } else if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DRELU_BGRAD; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi)); __half* bgrad = db_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad, sizeof(bgrad)); __half* reluMask = mask_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = k; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } HCTR_LIB_THROW(cublasLtMatrixLayoutCreate(&cublas_dRelu_top_desc_, CUDA_R_16F, n, m, n)); HCTR_LIB_THROW(cublasLtMatrixLayoutCreate(&cublas_dRelu_bottom_desc_, CUDA_R_16F, k, m, k)); HCTR_LIB_THROW(cublasLtMatmulPreferenceCreate(&cublas_preference_dRelu_)); cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now HCTR_LIB_THROW(cudaMalloc(&cublaslt_workspace_dRelu_, cublaslt_workspace_size_)); HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute( cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute(cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, 1, &heuristic_result, &returned_res)); memcpy(&balgo_dRelu_, &heuristic_result.algo, sizeof(balgo_dRelu_)); if (returned_res == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } } void FusedReluBiasFullyConnectedLayer::initialize_wgrad() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; HCTR_LIB_THROW(cublasLtMatmulDescCreate(&cublas_op_desc_wgrad_, CUBLAS_COMPUTE_32F, CUDA_R_32F)); cublasOperation_t transA = CUBLAS_OP_N; cublasOperation_t transB = CUBLAS_OP_T; HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_wgrad_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA))); HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_wgrad_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB))); cublasLtEpilogue_t epi; if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) { epi = CUBLASLT_EPILOGUE_BGRADA; __half* bgrad = db_out_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_wgrad_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad, sizeof(bgrad)); } else { epi = CUBLASLT_EPILOGUE_DEFAULT; } HCTR_LIB_THROW(cublasLtMatmulDescSetAttribute(cublas_op_desc_wgrad_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); HCTR_LIB_THROW(cublasLtMatmulPreferenceCreate(&cublas_preference_wgrad_)); cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now HCTR_LIB_THROW(cudaMalloc(&cublaslt_workspace_wgrad_, cublaslt_workspace_size_)); HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute( cublas_preference_wgrad_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; HCTR_LIB_THROW(cublasLtMatmulPreferenceSetAttribute(cublas_preference_wgrad_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_wgrad_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_kernel_desc_, cublas_kernel_desc_, cublas_preference_wgrad_, 1, &heuristic_result, &returned_res)); memcpy(&balgo_wgrad_, &heuristic_result.algo, sizeof(balgo_wgrad_)); if (returned_res == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } } void FusedReluBiasFullyConnectedLayer::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); const __half* bias = weights_half_[1].get_ptr(); const __half* bottom = get_bottom_tensor_fprop(is_train).get_ptr(); __half* top_fprop = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_fprop(is_train).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.start", get_gpu().get_stream()); HCTR_LIB_THROW(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top_fprop, cublas_top_desc_, top_fprop, cublas_top_desc_, &falgo_k_, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream())); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.stop", get_gpu().get_stream()); if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) && act_ != Activation_t::None) { size_t len = train_out_tensor_.get_num_elements(); HCTR_LIB_THROW(cudaMemcpyAsync(mask_out, top_fprop, len * sizeof(__half), cudaMemcpyDeviceToDevice, get_gpu().get_stream())); } PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.stop", get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); HCTR_LIB_THROW(cudaGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::bprop() { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); const __half* train_out = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); //__half* bottom_bprop = get_bottom_tensor_bprop(true).get_ptr(); float* bias_grad_float = bias_grad_tensor_.get_ptr(); __half* dRelu_top = dRelu_out_tensor_.get_ptr(); const __half* identity = identity_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta_k = 1.0f; const float beta_x = 0.0f; const float beta_b = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.dRelu.start", get_gpu().get_stream()); // dRelu if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) { if (act_ != Activation_t::None) { if ((m * n) % 4 == 0) { reverse_relu_kernel<<<(m * n / 4 - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>( dRelu_top, mask_out, train_out, m * n / 4); } else reverse_relu_kernel_not_aligned<<<(m * n - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(dRelu_top, mask_out, train_out, m * n); } else dRelu_top = train_out_tensor_.get_ptr(); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.dRelu.stop", get_gpu().get_stream()); // wait for dRelu if (async_mlp_wgrad_) { HCTR_LIB_THROW(cudaEventRecord(event_overlap_, get_gpu().get_stream())); HCTR_LIB_THROW(cudaStreamWaitEvent(get_gpu().get_comp_overlap_stream(), event_overlap_)); } // dgrad if (!skip_dgrad_) { __half* bottom_bprop; if (head_mask_in_) { bottom_bprop = mask_in_tensor_.get_ptr(); } else { bottom_bprop = train_in_tensor_.get_ptr(); } if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { bottom_bprop = dRelu_in_tensor_.get_ptr(); } HCTR_LIB_THROW(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, dRelu_top, cublas_dRelu_top_desc_, &beta_x, bottom_bprop, cublas_dRelu_bottom_desc_, bottom_bprop, cublas_dRelu_bottom_desc_, &balgo_dRelu_, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream())); } // bgrad+wgrad if (n == 1) { HCTR_LIB_THROW(cublasGemmEx(cublas_handle_wgrad_, CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m, &alpha, dRelu_top, CUDA_R_16F, n, identity, CUDA_R_16F, m, &beta_b, bias_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_b_)); HCTR_LIB_THROW(cublasGemmEx(cublas_handle_wgrad_, CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, dRelu_top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta_k, kernel_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_k_)); } else { HCTR_LIB_THROW(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_wgrad_, &alpha, dRelu_top, cublas_dRelu_top_desc_, bottom, cublas_dRelu_bottom_desc_, &beta_k, kernel_grad, cublas_kernel_desc_, kernel_grad, cublas_kernel_desc_, &balgo_wgrad_, cublaslt_workspace_wgrad_, cublaslt_workspace_size_, get_gpu().get_comp_overlap_stream())); } if (async_mlp_wgrad_ && pos_ == FcPosition_t::Head) { get_gpu().set_wgrad_event_sync(get_gpu().get_comp_overlap_stream()); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.stop", get_gpu().get_stream()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); HCTR_LIB_THROW(cudaGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::search_algorithm() { // Set to the CUDA device where this layer assigned to CudaDeviceContext context(get_device_id()); const size_t repeat_num = 100; const int max_algo_count = 16; // Device Tensors to be used __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); __half* top = train_out_tensor_.get_ptr(); __half* kernel = weights_half_[0].get_ptr(); __half* bias = weights_half_[1].get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); __half* identity = identity_tensor_.get_ptr(); // Tensor dim const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; // Record time for each algorithm float shortestTime = std::numeric_limits<float>::max(); float time; cudaEvent_t start, stop; HCTR_LIB_THROW(cudaEventCreate(&start)); HCTR_LIB_THROW(cudaEventCreate(&stop)); cublasLtMatmulHeuristicResult_t heuristic_result[max_algo_count] = {0}; int algo_count = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, max_algo_count, heuristic_result, &algo_count)); if (algo_count == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } // if(get_device_id()==0) HCTR_LOG(INFO, WORLD, "M: %d, N: %d, K: %d\n", m, n, k); for (int algoIdx = 0; algoIdx < algo_count; algoIdx++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top, cublas_top_desc_, top, cublas_top_desc_, &heuristic_result[algoIdx].algo, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream()); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for fprop, skipped.\n", // testAlgo); continue; } // if(get_device_id()==0) HCTR_LOG(INFO, WORLD, "Algo: %d, wavesCount: %f, time: %f\n", // (int)heuristic_result[algoIdx].algo, // heuristic_result[algoIdx].wavesCount, // time); // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&falgo_k_, &heuristic_result[algoIdx].algo, sizeof(falgo_k_)); // if(get_device_id()==0) HCTR_LOG(INFO, WORLD, "Picked algorithm: %d", // heuristic_result[algoIdx].algo); } } // dRelu in backward pass // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); cublasLtMatmulHeuristicResult_t heuristic_result_dRelu[max_algo_count] = {0}; int algo_count_dRelu = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, max_algo_count, heuristic_result_dRelu, &algo_count_dRelu)); if (algo_count_dRelu == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } for (int algoIdx = 0; algoIdx < algo_count_dRelu; algoIdx++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, top, cublas_dRelu_top_desc_, &beta, bottom, cublas_dRelu_bottom_desc_, bottom, cublas_dRelu_bottom_desc_, &heuristic_result_dRelu[algoIdx].algo, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream()); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for fprop, skipped.\n", // testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&balgo_dRelu_, &heuristic_result_dRelu[algoIdx].algo, sizeof(balgo_dRelu_)); } } // wgrad in backward pass // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); cublasLtMatmulHeuristicResult_t heuristic_result_wgrad[max_algo_count] = {0}; int algo_count_wgrad = 0; HCTR_LIB_THROW(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_wgrad_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_kernel_desc_, cublas_kernel_desc_, cublas_preference_wgrad_, max_algo_count, heuristic_result_wgrad, &algo_count_wgrad)); if (algo_count_wgrad == 0) { HCTR_LIB_THROW(CUBLAS_STATUS_NOT_SUPPORTED); } for (int algoIdx = 0; algoIdx < algo_count_wgrad; algoIdx++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 1.0f; HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_wgrad_, &alpha, top, cublas_dRelu_top_desc_, bottom, cublas_dRelu_bottom_desc_, &beta, kernel, cublas_kernel_desc_, kernel, cublas_kernel_desc_, &heuristic_result_wgrad[algoIdx].algo, cublaslt_workspace_wgrad_, cublaslt_workspace_size_, get_gpu().get_stream()); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // HCTR_LOG(INFO, WORLD, "algoIdx: %d, time: %f, shortest time: %f\n", algoIdx, time, // shortestTime); Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for fprop, skipped.\n", // testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; // HCTR_LOG(INFO, WORLD, "wgrad cublasMatmul algoIdx: %d, time: %f\n", algoIdx, shortestTime); memcpy(&balgo_wgrad_, &heuristic_result_wgrad[algoIdx].algo, sizeof(balgo_wgrad_)); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Start, end for search const cublasGemmAlgo_t startAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; const cublasGemmAlgo_t endAlgo = CUBLAS_GEMM_ALGO15_TENSOR_OP; // Search all the algorithm for balgo_k_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 1.0f; // Record start event HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta, kernel_grad, CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for bprop_W, skipped.\n", // testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; // HCTR_LOG(INFO, WORLD, "wgrad cublasGemmEx algoIdx: %d, time: %f\n", testAlgo, // shortestTime); balgo_k_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_b_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; // Record start event HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m, &alpha, top, CUDA_R_16F, n, identity, CUDA_R_16F, m, &beta, bias_grad, CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for bprop_W, skipped.\n", // testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_b_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_x_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const __half alpha = 1.0f; const __half beta = 0.0f; // Record start event HCTR_LIB_THROW(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, kernel, CUDA_R_16F, n, top, CUDA_R_16F, n, &beta, bottom, CUDA_R_16F, k, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } HCTR_LIB_THROW(cudaEventRecord(stop, get_gpu().get_stream())); HCTR_LIB_THROW(cudaEventSynchronize(stop)); HCTR_LIB_THROW(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // HCTR_LOG(INFO, WORLD, "The algorithms %d is not supported for bprop_Xn, skipped.\n", // testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_x_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Print selection information // HCTR_LOG(INFO, WORLD, "The algorithm selection for falgo_k_, balgo_k_, balgo_x_ are: %d, %d and // %d.\n", // (int)falgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_x_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP); // Output msg // HCTR_LOG(INFO, ROOT, "The fully-connected layer has finished choosing the algorithm for cublas // Gemm.\n"); Clean-up HCTR_LIB_THROW(cudaEventDestroy(start)); HCTR_LIB_THROW(cudaEventDestroy(stop)); } // namespace HugeCTR std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::make_unique<UniformDataSimulator>(-1 * limit, limit); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_norm_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_default_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, bottom_dim, top_dim)); } else if (1 == index) { float stddev = sqrt(1.f / top_dim); simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev)); } else { HCTR_OWN_THROW(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } } // namespace HugeCTR
the_stack
#include <cuComplex.h> #include "memtransfer.h" using namespace std; int ALLOCGPUMEM1D_PLAN(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "plan" stage. Melody Shih 11/21/21 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int nf1 = d_plan->nf1; int maxbatchsize = d_plan->maxbatchsize; d_plan->byte_now=0; switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ int numbins = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins*sizeof(int))); } } break; case 2: { int numbins = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); checkCudaErrors(cudaMalloc(&d_plan->numsubprob,numbins*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->subprobstartpts,(numbins+1)*sizeof(int))); } break; default: cerr << "err: invalid method " << endl; } if(!d_plan->opts.gpu_spreadinterponly){ checkCudaErrors(cudaMalloc(&d_plan->fw, maxbatchsize*nf1*sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf1,(nf1/2+1)*sizeof(FLT))); } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } int ALLOCGPUMEM1D_NUPTS(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "setNUpts" stage. Melody Shih 11/21/21 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int M = d_plan->M; if(d_plan->sortidx ) checkCudaErrors(cudaFree(d_plan->sortidx)); if(d_plan->idxnupts) checkCudaErrors(cudaFree(d_plan->idxnupts)); switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort) checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); } break; case 2: case 3: { checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); } break; default: cerr<<"err: invalid method" << endl; } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } void FREEGPUMEMORY1D(CUFINUFFT_PLAN d_plan) /* wrapper for freeing gpu memory. Melody Shih 11/21/21 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); if(!d_plan->opts.gpu_spreadinterponly){ checkCudaErrors(cudaFree(d_plan->fw)); checkCudaErrors(cudaFree(d_plan->fwkerhalf1)); } switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); }else{ checkCudaErrors(cudaFree(d_plan->idxnupts)); } } break; case 2: { checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->numsubprob)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); checkCudaErrors(cudaFree(d_plan->subprobstartpts)); checkCudaErrors(cudaFree(d_plan->subprob_to_bin)); } break; } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); } int ALLOCGPUMEM2D_PLAN(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "plan" stage. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int maxbatchsize = d_plan->maxbatchsize; d_plan->byte_now=0; switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ int numbins[2]; numbins[0] = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); numbins[1] = ceil((FLT) nf2/d_plan->opts.gpu_binsizey); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins[0]* numbins[1]*sizeof(int))); } } break; case 2: { int numbins[2]; numbins[0] = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); numbins[1] = ceil((FLT) nf2/d_plan->opts.gpu_binsizey); checkCudaErrors(cudaMalloc(&d_plan->numsubprob,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->subprobstartpts, (numbins[0]*numbins[1]+1)*sizeof(int))); } break; case 3: { int numbins[2]; numbins[0] = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); numbins[1] = ceil((FLT) nf2/d_plan->opts.gpu_binsizey); checkCudaErrors(cudaMalloc(&d_plan->finegridsize,nf1*nf2* sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->fgstartpts,nf1*nf2* sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->numsubprob,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins[0]* numbins[1]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->subprobstartpts, (numbins[0]*numbins[1]+1)*sizeof(int))); } break; default: cerr << "err: invalid method " << endl; } if(!d_plan->opts.gpu_spreadinterponly){ checkCudaErrors(cudaMalloc(&d_plan->fw, maxbatchsize*nf1*nf2* sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf1,(nf1/2+1)*sizeof(FLT))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf2,(nf2/2+1)*sizeof(FLT))); } cudaStream_t* streams =(cudaStream_t*) malloc(d_plan->opts.gpu_nstreams* sizeof(cudaStream_t)); for(int i=0; i<d_plan->opts.gpu_nstreams; i++) checkCudaErrors(cudaStreamCreate(&streams[i])); d_plan->streams = streams; // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } int ALLOCGPUMEM2D_NUPTS(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "setNUpts" stage. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int M = d_plan->M; if(d_plan->sortidx ) checkCudaErrors(cudaFree(d_plan->sortidx)); if(d_plan->idxnupts) checkCudaErrors(cudaFree(d_plan->idxnupts)); switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort) checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); } break; case 2: case 3: { checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); } break; default: cerr<<"err: invalid method" << endl; } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } void FREEGPUMEMORY2D(CUFINUFFT_PLAN d_plan) /* wrapper for freeing gpu memory. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); if(!d_plan->opts.gpu_spreadinterponly){ checkCudaErrors(cudaFree(d_plan->fw)); checkCudaErrors(cudaFree(d_plan->fwkerhalf1)); checkCudaErrors(cudaFree(d_plan->fwkerhalf2)); } switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); }else{ checkCudaErrors(cudaFree(d_plan->idxnupts)); } } break; case 2: { checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->numsubprob)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); checkCudaErrors(cudaFree(d_plan->subprobstartpts)); checkCudaErrors(cudaFree(d_plan->subprob_to_bin)); } break; case 3: { checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->numsubprob)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->finegridsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); checkCudaErrors(cudaFree(d_plan->subprobstartpts)); checkCudaErrors(cudaFree(d_plan->subprob_to_bin)); } break; } for(int i=0; i<d_plan->opts.gpu_nstreams; i++) checkCudaErrors(cudaStreamDestroy(d_plan->streams[i])); // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); } int ALLOCGPUMEM3D_PLAN(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "plan" stage. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nf3 = d_plan->nf3; int maxbatchsize = d_plan->maxbatchsize; d_plan->byte_now=0; switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ int numbins[3]; numbins[0] = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); numbins[1] = ceil((FLT) nf2/d_plan->opts.gpu_binsizey); numbins[2] = ceil((FLT) nf3/d_plan->opts.gpu_binsizez); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins[0]* numbins[1]*numbins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins[0]* numbins[1]*numbins[2]*sizeof(int))); } } break; case 2: { int numbins[3]; numbins[0] = ceil((FLT) nf1/d_plan->opts.gpu_binsizex); numbins[1] = ceil((FLT) nf2/d_plan->opts.gpu_binsizey); numbins[2] = ceil((FLT) nf3/d_plan->opts.gpu_binsizez); checkCudaErrors(cudaMalloc(&d_plan->numsubprob,numbins[0]* numbins[1]*numbins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binsize,numbins[0]* numbins[1]*numbins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts,numbins[0]* numbins[1]*numbins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->subprobstartpts, (numbins[0]*numbins[1]*numbins[2]+1)*sizeof(int))); } break; case 4: { int numobins[3], numbins[3]; int binsperobins[3]; numobins[0] = ceil((FLT) nf1/d_plan->opts.gpu_obinsizex); numobins[1] = ceil((FLT) nf2/d_plan->opts.gpu_obinsizey); numobins[2] = ceil((FLT) nf3/d_plan->opts.gpu_obinsizez); binsperobins[0] = d_plan->opts.gpu_obinsizex/ d_plan->opts.gpu_binsizex; binsperobins[1] = d_plan->opts.gpu_obinsizey/ d_plan->opts.gpu_binsizey; binsperobins[2] = d_plan->opts.gpu_obinsizez/ d_plan->opts.gpu_binsizez; numbins[0] = numobins[0]*(binsperobins[0]+2); numbins[1] = numobins[1]*(binsperobins[1]+2); numbins[2] = numobins[2]*(binsperobins[2]+2); checkCudaErrors(cudaMalloc(&d_plan->numsubprob, numobins[0]*numobins[1]*numobins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binsize, numbins[0]*numbins[1]*numbins[2]*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->binstartpts, (numbins[0]*numbins[1]*numbins[2]+1)*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->subprobstartpts,(numobins[0] *numobins[1]*numobins[2]+1)*sizeof(int))); } break; default: cerr << "err: invalid method" << endl; } if(!d_plan->opts.gpu_spreadinterponly){ checkCudaErrors(cudaMalloc(&d_plan->fw, maxbatchsize*nf1*nf2*nf3* sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf1,(nf1/2+1)*sizeof(FLT))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf2,(nf2/2+1)*sizeof(FLT))); checkCudaErrors(cudaMalloc(&d_plan->fwkerhalf3,(nf3/2+1)*sizeof(FLT))); } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } int ALLOCGPUMEM3D_NUPTS(CUFINUFFT_PLAN d_plan) /* wrapper for gpu memory allocation in "setNUpts" stage. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); int M = d_plan->M; d_plan->byte_now=0; if(d_plan->sortidx ) checkCudaErrors(cudaFree(d_plan->sortidx)); if(d_plan->idxnupts) checkCudaErrors(cudaFree(d_plan->idxnupts)); switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort) checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); } break; case 2: { checkCudaErrors(cudaMalloc(&d_plan->idxnupts,M*sizeof(int))); checkCudaErrors(cudaMalloc(&d_plan->sortidx, M*sizeof(int))); } break; case 4: { checkCudaErrors(cudaMalloc(&d_plan->sortidx,M*sizeof(int))); } break; default: cerr << "err: invalid method" << endl; } // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); return 0; } void FREEGPUMEMORY3D(CUFINUFFT_PLAN d_plan) /* wrapper for freeing gpu memory. Melody Shih 07/25/19 */ { // Mult-GPU support: set the CUDA Device ID: int orig_gpu_device_id; cudaGetDevice(& orig_gpu_device_id); cudaSetDevice(d_plan->opts.gpu_device_id); if(!d_plan->opts.gpu_spreadinterponly){ cudaFree(d_plan->fw); cudaFree(d_plan->fwkerhalf1); cudaFree(d_plan->fwkerhalf2); cudaFree(d_plan->fwkerhalf3); } switch(d_plan->opts.gpu_method) { case 1: { if(d_plan->opts.gpu_sort){ checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); }else{ checkCudaErrors(cudaFree(d_plan->idxnupts)); } } break; case 2: { checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->numsubprob)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); checkCudaErrors(cudaFree(d_plan->subprobstartpts)); checkCudaErrors(cudaFree(d_plan->subprob_to_bin)); } break; case 4: { checkCudaErrors(cudaFree(d_plan->idxnupts)); checkCudaErrors(cudaFree(d_plan->sortidx)); checkCudaErrors(cudaFree(d_plan->numsubprob)); checkCudaErrors(cudaFree(d_plan->binsize)); checkCudaErrors(cudaFree(d_plan->binstartpts)); checkCudaErrors(cudaFree(d_plan->subprobstartpts)); checkCudaErrors(cudaFree(d_plan->subprob_to_bin)); } break; } for(int i=0; i<d_plan->opts.gpu_nstreams; i++) checkCudaErrors(cudaStreamDestroy(d_plan->streams[i])); // Multi-GPU support: reset the device ID cudaSetDevice(orig_gpu_device_id); }
the_stack
#include "include/common.h" // 一个二维的Eigen向量, 不强制使用字节对齐 using Vec2ida = Eigen::Matrix<int, 2, 1, Eigen::DontAlign>; namespace kinectfusion { namespace internal { namespace cuda { // 更新 TSDF 模型的核函数 __global__ void update_tsdf_kernel( const PtrStepSz<float> depth_image, // 原始大小深度图 const PtrStepSz<uchar3> color_image, // 原始大小彩色图 PtrStepSz<short2> tsdf_volume, PtrStepSz<uchar3> color_volume, int3 volume_size, float voxel_scale, CameraParameters cam_params, // 原始图层上的相机内参 const float truncation_distance, Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, // 旋转矩阵 -- 这里要求Eigen编译的时候使能cuda Vec3fda translation) // 平移向量 { // step 1 获取当前线程的id, 并检查是否落在 volume 中. // 这里实际上是每个线程对应(x,y,*),每一个线程负责处理z轴上的所有数据 const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; // 合法性检查 if (x >= volume_size.x || y >= volume_size.y) return; // step 2 处理z轴上的每一个体素的数据 for (int z = 0; z < volume_size.z; ++z) { // step 2.1 计算该体素中心点在当前帧相机坐标系下的坐标, 然后投影到图像中得到投影点坐标, 其中进行合法性检查 // 获取当前要处理的体素中心点在空间中的实际位置. 其中的0.5表示的是计算得到体素的中心, * voxel_scale 对应为实际空间尺度下体素的中心 const Vec3fda position((static_cast<float>(x) + 0.5f) * voxel_scale, (static_cast<float>(y) + 0.5f) * voxel_scale, (static_cast<float>(z) + 0.5f) * voxel_scale); // 将上面的在世界坐标系下的表示变换到在当前相机坐标系下的坐标 const Vec3fda camera_pos = rotation * position + translation; // 合法性检查1: 如果这个体素相机看不到那么我们就不管了 if (camera_pos.z() <= 0) continue; // int __float2int_rn(float) : 求最近的偶数 // ? 为什么要求偶数? -- 我怀疑作者写错了, 这里应该是求整数吧 // 计算空间点在图像上的投影点, 并且认为这个投影点就是对这个空间体素的观测 const Vec2ida uv( __float2int_rn(camera_pos.x() / camera_pos.z() * cam_params.focal_x + cam_params.principal_x), __float2int_rn(camera_pos.y() / camera_pos.z() * cam_params.focal_y + cam_params.principal_y)); // 合法性检查2: 查看投影点是否正确地投影在了图像范围内 if (uv.x() < 0 || uv.x() >= depth_image.cols || uv.y() < 0 || uv.y() >= depth_image.rows) continue; // 获取该体素中心点的深度的观测值(相对于当前图像来说) const float depth = depth_image.ptr(uv.y())[uv.x()]; // 合法性检查3: 深度的观测值应该非负 if (depth <= 0) continue; // step 2.2 计算论文中公式7的 lambda // Tips: | 1/fx 0 -cx/fx | // K^(-1) = | 0 1/fy -cy/fy | // | 0 0 1 | const Vec3fda xylambda( (uv.x() - cam_params.principal_x) / cam_params.focal_x, // (x/z) (uv.y() - cam_params.principal_y) / cam_params.focal_y, // (y/z) 1.f); // 计算得到公式7中的 lambda const float lambda = xylambda.norm(); // step 2.3 计算 SDF, 参考论文公式6中括号的部分 // 这里的 camera_pos 已经是该体素中心点在当前世界坐标系下的坐标了, 论文中的公式的意思是计算相机光心到该点的距离, 就相当于这个坐标直接取欧式二范数 // 前面乘的负号是因为, 咱们定义 SDF 表示中平面前的部分为正, 平面后的部分为负 // SDF 其实也就是表示了空间体素点的(预测值 - 观测值) const float sdf = (-1.f) * ((1.f / lambda) * camera_pos.norm() - depth); // step 2.4 计算 TSDF, 参考论文公式9 // 如果根据我们得到的 SDF 告诉我们, 这个距离我们能够观测到 (即相当于观测的距离是在 -u 之前的) //0---TSDF------------------------ | // | |\ | // | | \ | // | | \ | // | | \ | // | | \ | // | | \ | //0---camera|-------------------(u)----(0)----(-u)------------distance // | | \ | // | | \ | // | | \ | // | | \ | // | | \ | // | | \| // | | |-x-x-x-x-x-x-x-x-x-x- // | 截断区 | TSDF表示区 | 不可观测区 // if (sdf >= -truncation_distance) { // 说明当前的 SDF 表示获得的观测的深度值, 在我们构建的TSDF模型中, 是可观测的 // step 2.4.1 计算当前次观测得到的 TSDF 值 // 注意这里的 TSDF 值一直都是小于1的. 后面会利用这个特点来将浮点型的 TSDF 值保存为 uint16_t 类型 const float new_tsdf = fmin(1.f, sdf / truncation_distance); // step 2.4.2 获取当前的global模型中已经存储的 TSDF 值和权重 // 有点z对应行, y对应列的感觉 // volume 的下标组织: 二维 GpuMat //0 ---x0 x1 x2 x3 x4 ... x511 ----------->(x) // ( z0, y0)| // ( z0, y1)| // ( z0, y2)| // ( z0, y3)| // ( z0, ...)| // ( z0,y511)| // ( z1, y0)| // ( z1, y1)| // ( z1, y2)| // ( z1, y3)| // ( z1, ...)| // ( z1,y511)| // ( ..., ...)| // (z511, y0)| // (z511, y1)| // (z511, y2)| // (z511, y3)| // (z511, ...)| // (z511,y511)| // ^ // 获取对应的 TSDF 体素中已经存储的 TSDF 值和权重 (注意获取的数据是个向量) short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x]; // 这里的 current 表示已经存储在全局模型中的数据, 对应在论文公式11中为下标(k-1)的符号 // 由于TSDF值现在是按照 uint16_t 的格式来存储的,所以为了变换成为float型需要进行变换, 乘DIVSHORTMAX // 使用乘法代替除法, 运算更快. 这个部分可以参考下面 浮点型 TSDF值是怎么存储为 uint16_t 格式的 const float current_tsdf = static_cast<float>(voxel_tuple.x) * DIVSHORTMAX; const int current_weight = voxel_tuple.y; // step 2.4.3 更新 TSDF 值和权重值 // 见下 const int add_weight = 1; // 参考论文公式11, 计算得到该体素中更新后的 TSDF 值, 符号对应关系如下: // current_weight => W_{k-1}(p) // current_tsdf => F_{k-1}(p) // add_weight => W_{R_k}(p) // new_tsdf => F_{R_k}(p) const float updated_tsdf = (current_weight * current_tsdf + add_weight * new_tsdf) / (current_weight + add_weight); // 论文公式 13 对权重 进行更新 const int new_weight = min(current_weight + add_weight, MAX_WEIGHT); // 将 浮点的 TSDF 值经过 int32_t 类型 保存为 uint16_t 类型. 限幅是因为理想情况下 无论是当前帧计算的还是融合之后的TSDF值都应该是小于1的 // (所以对应的值属于 -SHORTMAX ~ SHORTMAX) // 类型中转是因为不这样做, updated_tsdf 一旦越界会出现截断, 导致 min max 函数都无法有效工作 const int new_value = max(-SHORTMAX, min(SHORTMAX, static_cast<int>(updated_tsdf * SHORTMAX))); // step 2.4.4 保存计算结果 tsdf_volume.ptr(z * volume_size.y + y)[x] = make_short2(static_cast<short>(new_value), static_cast<short>(new_weight)); // step 2.4.5 对 彩色图进行更新 // 前提是当前的这个体素的中心观测值在 TSDF 的1/2未截断区域内. 注意这里的约束其实更加严格, 这里是截断距离除了2 if (sdf <= truncation_distance / 2 && sdf >= -truncation_distance / 2) { // step 2.4.5.1 获取当前体素对应的投影点的颜色的观测值和之前的储存值 // 储存值 uchar3& model_color = color_volume.ptr(z * volume_size.y + y)[x]; // 观测值 const uchar3 image_color = color_image.ptr(uv.y())[uv.x()]; // step 2.4.5.2 颜色均匀化之后再写入, 仿照 TSDF 值的加权更新方式 model_color.x = static_cast<uchar>( (current_weight * model_color.x + add_weight * image_color.x) / (current_weight + add_weight)); model_color.y = static_cast<uchar>( (current_weight * model_color.y + add_weight * image_color.y) / (current_weight + add_weight)); model_color.z = static_cast<uchar>( (current_weight * model_color.z + add_weight * image_color.z) / (current_weight + add_weight)); }// 对彩色图进行更新 }// 如果根据我们得到的 SDF 告诉我们, 这个距离我们能够观测到, 那么更新 TSDF }// 处理z轴上的每一个体素的数据 }// 核函数 // 实现表面的重建, 即将当前帧的相机位姿已知的时候, 根据当前帧的surface mearsurment,融合到Global TSDF Model 中 // 主机端函数 void surface_reconstruction(const cv::cuda::GpuMat& depth_image, const cv::cuda::GpuMat& color_image, VolumeData& volume, const CameraParameters& cam_params, const float truncation_distance, const Eigen::Matrix4f& model_view) { // step 1 根据TSDF Volume的大小, 计算核函数的大小 const dim3 threads(32, 32); const dim3 blocks((volume.volume_size.x + threads.x - 1) / threads.x, (volume.volume_size.y + threads.y - 1) / threads.y); // step 2 启动核函数 update_tsdf_kernel<<<blocks, threads>>>( depth_image, // 原始大小的深度图像 color_image, // 原始大小的彩色图像 volume.tsdf_volume, // TSDF Volume, GpuMat volume.color_volume, // color Volume, GpuMat volume.volume_size, // Volume 的大小, int3 volume.voxel_scale, // 尺度缩放, float cam_params, // 在当前图层上的相机内参 truncation_distance, // 截断距离u model_view.block(0, 0, 3, 3), // 提取旋转矩阵 // (Index startRow, Index startCol, Index blockRows, Index blockCols) model_view.block(0, 3, 3, 1)); // 提取平移向量 // step 3 等待所有的并行线程结束 cudaThreadSynchronize(); } } } }
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #define tx threadIdx.x #define ty threadIdx.y #define bx blockIdx.x #define by blockIdx.y #define bdx blockDim.x #define bdy blockDim.y #define BORDER_SIZE 5 #define MAX_KSIZE_HALF 100 namespace cv { namespace cuda { namespace device { namespace optflow_farneback { __constant__ float c_g[8]; __constant__ float c_xg[8]; __constant__ float c_xxg[8]; __constant__ float c_ig11, c_ig03, c_ig33, c_ig55; template <int polyN> __global__ void polynomialExpansion( const int height, const int width, const PtrStepf src, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * (bdx - 2*polyN) + tx - polyN; if (y < height) { extern __shared__ float smem[]; volatile float *row = smem + tx; int xWarped = ::min(::max(x, 0), width - 1); row[0] = src(y, xWarped) * c_g[0]; row[bdx] = 0.f; row[2*bdx] = 0.f; for (int k = 1; k <= polyN; ++k) { float t0 = src(::max(y - k, 0), xWarped); float t1 = src(::min(y + k, height - 1), xWarped); row[0] += c_g[k] * (t0 + t1); row[bdx] += c_xg[k] * (t1 - t0); row[2*bdx] += c_xxg[k] * (t0 + t1); } __syncthreads(); if (tx >= polyN && tx + polyN < bdx && x < width) { float b1 = c_g[0] * row[0]; float b3 = c_g[0] * row[bdx]; float b5 = c_g[0] * row[2*bdx]; float b2 = 0, b4 = 0, b6 = 0; for (int k = 1; k <= polyN; ++k) { b1 += (row[k] + row[-k]) * c_g[k]; b4 += (row[k] + row[-k]) * c_xxg[k]; b2 += (row[k] - row[-k]) * c_xg[k]; b3 += (row[k + bdx] + row[-k + bdx]) * c_g[k]; b6 += (row[k + bdx] - row[-k + bdx]) * c_xg[k]; b5 += (row[k + 2*bdx] + row[-k + 2*bdx]) * c_g[k]; } dst(y, xWarped) = b3*c_ig11; dst(height + y, xWarped) = b2*c_ig11; dst(2*height + y, xWarped) = b1*c_ig03 + b5*c_ig33; dst(3*height + y, xWarped) = b1*c_ig03 + b4*c_ig33; dst(4*height + y, xWarped) = b6*c_ig55; } } } void setPolynomialExpansionConsts( int polyN, const float *g, const float *xg, const float *xxg, float ig11, float ig03, float ig33, float ig55) { cudaSafeCall(cudaMemcpyToSymbol(c_g, g, (polyN + 1) * sizeof(*g))); cudaSafeCall(cudaMemcpyToSymbol(c_xg, xg, (polyN + 1) * sizeof(*xg))); cudaSafeCall(cudaMemcpyToSymbol(c_xxg, xxg, (polyN + 1) * sizeof(*xxg))); cudaSafeCall(cudaMemcpyToSymbol(c_ig11, &ig11, sizeof(ig11))); cudaSafeCall(cudaMemcpyToSymbol(c_ig03, &ig03, sizeof(ig03))); cudaSafeCall(cudaMemcpyToSymbol(c_ig33, &ig33, sizeof(ig33))); cudaSafeCall(cudaMemcpyToSymbol(c_ig55, &ig55, sizeof(ig55))); } void polynomialExpansionGpu(const PtrStepSzf &src, int polyN, PtrStepSzf dst, cudaStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x - 2*polyN), src.rows); int smem = 3 * block.x * sizeof(float); if (polyN == 5) polynomialExpansion<5><<<grid, block, smem, stream>>>(src.rows, src.cols, src, dst); else if (polyN == 7) polynomialExpansion<7><<<grid, block, smem, stream>>>(src.rows, src.cols, src, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __constant__ float c_border[BORDER_SIZE + 1]; __global__ void updateMatrices( const int height, const int width, const PtrStepf flowx, const PtrStepf flowy, const PtrStepf R0, const PtrStepf R1, PtrStepf M) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float dx = flowx(y, x); float dy = flowy(y, x); float fx = x + dx; float fy = y + dy; int x1 = floorf(fx); int y1 = floorf(fy); fx -= x1; fy -= y1; float r2, r3, r4, r5, r6; if (x1 >= 0 && y1 >= 0 && x1 < width - 1 && y1 < height - 1) { float a00 = (1.f - fx) * (1.f - fy); float a01 = fx * (1.f - fy); float a10 = (1.f - fx) * fy; float a11 = fx * fy; r2 = a00 * R1(y1, x1) + a01 * R1(y1, x1 + 1) + a10 * R1(y1 + 1, x1) + a11 * R1(y1 + 1, x1 + 1); r3 = a00 * R1(height + y1, x1) + a01 * R1(height + y1, x1 + 1) + a10 * R1(height + y1 + 1, x1) + a11 * R1(height + y1 + 1, x1 + 1); r4 = a00 * R1(2*height + y1, x1) + a01 * R1(2*height + y1, x1 + 1) + a10 * R1(2*height + y1 + 1, x1) + a11 * R1(2*height + y1 + 1, x1 + 1); r5 = a00 * R1(3*height + y1, x1) + a01 * R1(3*height + y1, x1 + 1) + a10 * R1(3*height + y1 + 1, x1) + a11 * R1(3*height + y1 + 1, x1 + 1); r6 = a00 * R1(4*height + y1, x1) + a01 * R1(4*height + y1, x1 + 1) + a10 * R1(4*height + y1 + 1, x1) + a11 * R1(4*height + y1 + 1, x1 + 1); r4 = (R0(2*height + y, x) + r4) * 0.5f; r5 = (R0(3*height + y, x) + r5) * 0.5f; r6 = (R0(4*height + y, x) + r6) * 0.25f; } else { r2 = r3 = 0.f; r4 = R0(2*height + y, x); r5 = R0(3*height + y, x); r6 = R0(4*height + y, x) * 0.5f; } r2 = (R0(y, x) - r2) * 0.5f; r3 = (R0(height + y, x) - r3) * 0.5f; r2 += r4*dy + r6*dx; r3 += r6*dy + r5*dx; float scale = c_border[::min(x, BORDER_SIZE)] * c_border[::min(y, BORDER_SIZE)] * c_border[::min(width - x - 1, BORDER_SIZE)] * c_border[::min(height - y - 1, BORDER_SIZE)]; r2 *= scale; r3 *= scale; r4 *= scale; r5 *= scale; r6 *= scale; M(y, x) = r4*r4 + r6*r6; M(height + y, x) = (r4 + r5)*r6; M(2*height + y, x) = r5*r5 + r6*r6; M(3*height + y, x) = r4*r2 + r6*r3; M(4*height + y, x) = r6*r2 + r5*r3; } } void setUpdateMatricesConsts() { static const float border[BORDER_SIZE + 1] = {0.14f, 0.14f, 0.4472f, 0.4472f, 0.4472f, 1.f}; cudaSafeCall(cudaMemcpyToSymbol(c_border, border, (BORDER_SIZE + 1) * sizeof(*border))); } void updateMatricesGpu( const PtrStepSzf flowx, const PtrStepSzf flowy, const PtrStepSzf R0, const PtrStepSzf R1, PtrStepSzf M, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); updateMatrices<<<grid, block, 0, stream>>>(flowx.rows, flowx.cols, flowx, flowy, R0, R1, M); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __global__ void updateFlow( const int height, const int width, const PtrStepf M, PtrStepf flowx, PtrStepf flowy) { const int y = by * bdy + ty; const int x = bx * bdx + tx; if (y < height && x < width) { float g11 = M(y, x); float g12 = M(height + y, x); float g22 = M(2*height + y, x); float h1 = M(3*height + y, x); float h2 = M(4*height + y, x); float detInv = 1.f / (g11*g22 - g12*g12 + 1e-3f); flowx(y, x) = (g11*h2 - g12*h1) * detInv; flowy(y, x) = (g22*h1 - g12*h2) * detInv; } } void updateFlowGpu(const PtrStepSzf M, PtrStepSzf flowx, PtrStepSzf flowy, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(flowx.cols, block.x), divUp(flowx.rows, block.y)); updateFlow<<<grid, block, 0, stream>>>(flowx.rows, flowx.cols, M, flowx, flowy); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } /*__global__ void boxFilter( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); row[i] = src(y, xExt); for (int j = 1; j <= ksizeHalf; ++j) row[i] += src(::max(y - j, 0), xExt) + src(::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res = row[0]; for (int i = 1; i <= ksizeHalf; ++i) res += row[-i] + row[i]; dst(y, x) = res * boxAreaInv; } } } void boxFilterGpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { dim3 block(256); dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter<<<grid, block, smem, stream>>>(src.rows, src.cols, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); }*/ __global__ void boxFilter5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const float boxAreaInv, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = ::min(::max(xExt, 0), width - 1); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt); for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += src(k*height + ::max(y - j, 0), xExt) + src(k*height + ::min(y + j, height - 1), xExt); } if (x < width) { __syncthreads(); // Horizontal passs row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += row[k*smw - i] + row[k*smw + i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k] * boxAreaInv; } } } void boxFilter5Gpu(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void boxFilter5Gpu_CC11(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(128); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf)); boxFilter5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, boxAreaInv, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } __constant__ float c_gKer[MAX_KSIZE_HALF + 1]; template <typename Border> __global__ void gaussianBlur( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; volatile float *row = smem + ty * (bdx + 2*ksizeHalf); if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); row[i] = src(y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) row[i] += (src(b.idx_row_low(y - j), xExt) + src(b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res = row[0] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) res += (row[-i] + row[i]) * c_gKer[i]; dst(y, x) = res; } } } void setGaussianBlurKernel(const float *gKer, int ksizeHalf) { cudaSafeCall(cudaMemcpyToSymbol(c_gKer, gKer, (ksizeHalf + 1) * sizeof(*gKer))); } template <typename Border> void gaussianBlurCaller(const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows; int width = src.cols; dim3 block(256); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * block.y * sizeof(float); Border b(height, width); gaussianBlur<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, b, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void gaussianBlurGpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlurCaller<BrdConstant<float> >*/, gaussianBlurCaller<BrdReplicate<float> >, 0 /*gaussianBlurCaller<BrdReflect<float> >*/, 0 /*gaussianBlurCaller<BrdWrap<float> >*/, gaussianBlurCaller<BrdReflect101<float> > }; callers[borderMode](src, ksizeHalf, dst, stream); } template <typename Border> __global__ void gaussianBlur5( const int height, const int width, const PtrStepf src, const int ksizeHalf, const Border b, PtrStepf dst) { const int y = by * bdy + ty; const int x = bx * bdx + tx; extern __shared__ float smem[]; const int smw = bdx + 2*ksizeHalf; // shared memory "width" volatile float *row = smem + 5 * ty * smw; if (y < height) { // Vertical pass for (int i = tx; i < bdx + 2*ksizeHalf; i += bdx) { int xExt = int(bx * bdx) + i - ksizeHalf; xExt = b.idx_col(xExt); #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] = src(k*height + y, xExt) * c_gKer[0]; for (int j = 1; j <= ksizeHalf; ++j) #pragma unroll for (int k = 0; k < 5; ++k) row[k*smw + i] += (src(k*height + b.idx_row_low(y - j), xExt) + src(k*height + b.idx_row_high(y + j), xExt)) * c_gKer[j]; } if (x < width) { __syncthreads(); // Horizontal pass row += tx + ksizeHalf; float res[5]; #pragma unroll for (int k = 0; k < 5; ++k) res[k] = row[k*smw] * c_gKer[0]; for (int i = 1; i <= ksizeHalf; ++i) #pragma unroll for (int k = 0; k < 5; ++k) res[k] += (row[k*smw - i] + row[k*smw + i]) * c_gKer[i]; #pragma unroll for (int k = 0; k < 5; ++k) dst(k*height + y, x) = res[k]; } } } template <typename Border, int blockDimX> void gaussianBlur5Caller( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, cudaStream_t stream) { int height = src.rows / 5; int width = src.cols; dim3 block(blockDimX); dim3 grid(divUp(width, block.x), divUp(height, block.y)); int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float); Border b(height, width); gaussianBlur5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, b, dst); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void gaussianBlur5Gpu( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,256>*/, gaussianBlur5Caller<BrdReplicate<float>,256>, 0 /*gaussianBlur5Caller<BrdReflect<float>,256>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,256>*/, gaussianBlur5Caller<BrdReflect101<float>,256> }; callers[borderMode](src, ksizeHalf, dst, stream); } void gaussianBlur5Gpu_CC11( const PtrStepSzf src, int ksizeHalf, PtrStepSzf dst, int borderMode, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzf, int, PtrStepSzf, cudaStream_t); static const caller_t callers[] = { 0 /*gaussianBlur5Caller<BrdConstant<float>,128>*/, gaussianBlur5Caller<BrdReplicate<float>,128>, 0 /*gaussianBlur5Caller<BrdReflect<float>,128>*/, 0 /*gaussianBlur5Caller<BrdWrap<float>,128>*/, gaussianBlur5Caller<BrdReflect101<float>,128> }; callers[borderMode](src, ksizeHalf, dst, stream); } }}}} // namespace cv { namespace cuda { namespace cudev { namespace optflow_farneback #endif /* CUDA_DISABLER */
the_stack
#include <algorithm> #include <limits> #include "cml/cml_blas.cuh" #include "cml/cml_linalg.cuh" #include "cml/cml_matrix.cuh" #include "matrix/matrix_dense.h" #include "projector/projector_direct.h" #include "projector_helper.cuh" #include "util.h" #include "timer.h" #include "solver/glm.h" #include "../include/cuda_utils.h" namespace h2o4gpu { namespace { template<typename T> struct GpuData { T *AA, *L, s; cublasHandle_t handle; GpuData() : AA(0), L(0), s(static_cast<T>(-1.)) { cublasCreate(&handle); CUDA_CHECK_ERR(); } ~GpuData() { cublasDestroy(handle); CUDA_CHECK_ERR(); } }; } // namespace template <typename T, typename M> ProjectorDirect<T, M>::ProjectorDirect(int wDev, const M& A) : _wDev(wDev), _A(A) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit()); // Set GPU specific this->_info. PUSH_RANGE("PDnew",PDnew,1); GpuData<T> *info = new GpuData<T>(); this->_info = reinterpret_cast<void*>(info); POP_RANGE("PDnew",PDnew,1); } template <typename T, typename M> ProjectorDirect<T, M>::ProjectorDirect(const M& A) : _wDev(A._wDev), _A(A) { checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); DEBUG_FPRINTF(stderr,"Rows=%d Cols=%d done_init=%d\n",(int)_A.Rows(),(int)_A.Cols(),_A.IsInit()); // Set GPU specific this->_info. PUSH_RANGE("PDnew",PDnew,1); GpuData<T> *info = new GpuData<T>(); this->_info = reinterpret_cast<void*>(info); POP_RANGE("PDnew",PDnew,1); } template <typename T, typename M> ProjectorDirect<T, M>::~ProjectorDirect() { if(1){ // FIXME: segfaults sometimes checkwDev(_wDev); CUDACHECK(cudaSetDevice(_wDev)); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); if (info->AA) { cudaFree(info->AA); info->AA = 0; CUDA_CHECK_ERR(); } if (info->L) { cudaFree(info->L); info->L = 0; CUDA_CHECK_ERR(); } delete info; this->_info = 0; } } template <typename T, typename M> int ProjectorDirect<T, M>::Init() { if (this->_done_init) return 1; this->_done_init = true; CUDACHECK(cudaSetDevice(_wDev)); ASSERT(_A.IsInit()); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); size_t min_dim = std::min(_A.Rows(), _A.Cols()); PUSH_RANGE("AAalloc",AAalloc,1); cudaMalloc(&(info->AA), min_dim * min_dim * sizeof(T)); cudaMalloc(&(info->L), min_dim * min_dim * sizeof(T)); cudaMemset(info->AA, 0, min_dim * min_dim * sizeof(T)); cudaMemset(info->L, 0, min_dim * min_dim * sizeof(T)); DEBUG_FPRINTF(stderr,"TEST: r=%d c=%d : %d %d\n",(int)_A.Rows(), (int)_A.Cols(), (int)min_dim,(int)sizeof(T)); CUDA_CHECK_ERR(); POP_RANGE("AAalloc",AAalloc,1); cublasOperation_t op_type = _A.Rows() > _A.Cols() ? CUBLAS_OP_T : CUBLAS_OP_N; // Compute AA (i.e. Gramian matrix) PUSH_RANGE("AAcompute(gram)",AAcompute,1); double t0 = timer<double>(); if (_A.Order() == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); //C := alpha*A*A' + beta*C cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } else { const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } double t1 = timer<double>() - t0; DEBUG_FPRINTF(stderr,"Time to compute the Gram: %f\n", t1); CUDA_CHECK_ERR(); POP_RANGE("AAcompute(gram)",AAcompute,1); return 0; } template <typename T, typename M> int ProjectorDirect<T, M>::Project(const T *x0, const T *y0, T s, T *x, T *y, T tol) { DEBUG_EXPECT(this->_done_init); if (!this->_done_init || s < static_cast<T>(0.)) return 1; CUDACHECK(cudaSetDevice(_wDev)); // Get Cublas handle GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; PUSH_RANGE("P1alloc",P1alloc,2); size_t min_dim = std::min(_A.Rows(), _A.Cols()); // Set up views for raw vectors. cml::vector<T> y_vec = cml::vector_view_array(y, _A.Rows()); // y^{k+1/2} to be updated to y^{k+1} const cml::vector<T> y0_vec = cml::vector_view_array(y0, _A.Rows()); // \tilde{y}^{k} input only cml::vector<T> x_vec = cml::vector_view_array(x, _A.Cols()); // x^{k+1/2} to be updated to x^{k+1} const cml::vector<T> x0_vec = cml::vector_view_array(x0, _A.Cols()); // \tilde{x}^{k} input only // Set (x, y) = (x0, y0). cml::vector_memcpy(&x_vec, &x0_vec); cml::vector_memcpy(&y_vec, &y0_vec); CUDA_CHECK_ERR(); POP_RANGE("P1alloc",P1alloc,2); double t0 = timer<double>(); if (_A.Order() == MatrixDense<T>::ROW) { PUSH_RANGE("P1(row)",P1row,2); const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasRowMajor> L = cml::matrix_view_array<T, CblasRowMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); POP_RANGE("P1(row)",P1row,2); if (s != info->s) { PUSH_RANGE("P1r_diagonal",P1r_diagonal,2); cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); // vector view of diagonal of L cml::vector_add_constant(&diagL, s); // add s=kOne=1 to diagonal of L wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device CUDA_CHECK_ERR(); POP_RANGE("P1r_diagonal",P1r_diagonal,2); PUSH_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2); // L input contains AA + I, L on output has cholesky of input cml::linalg_cholesky_decomp(hdl, &L); wrapcudaDeviceSynchronize(); // not needed as next call is cuda call that will occur sequentially on device CUDA_CHECK_ERR(); POP_RANGE("P1r_cholesky_decomp",P1r_cholesky_decomp,2); } if (_A.Rows() > _A.Cols()) { PUSH_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2); // 1*A*y + 1*x -> x cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); POP_RANGE("P1r_gemv(r>c)",P1r_gemvrgc,2); PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2); // Solve LL^T x=b for x (where output for x_vec:= x^{k+1} := (A^T A + I)^{-1} (c + A^t d) in h2o4gpu paper) cml::linalg_cholesky_svx(hdl, &L, &x_vec); POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2); PUSH_RANGE("P1r_gemv2",P1r_gemv2,2); // 1*A*x + 0*y -> y (y^{k+1} := A x^{k+1} in h2o4gpu paper) cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); POP_RANGE("P1r_gemv2",P1r_gemv2,2); } else { PUSH_RANGE("P1r_gemv",P1r_gemv,2); cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); POP_RANGE("P1r_gemv",P1r_gemv,2); PUSH_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2); cml::linalg_cholesky_svx(hdl, &L, &y_vec); POP_RANGE("P1r_cholesky_svx",P1r_cholesky_svx,2); PUSH_RANGE("P1r_gemv2",P1r_gemv2,2); cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); POP_RANGE("P1r_gemv2",P1r_gemv2,2); PUSH_RANGE("P1r_axpy",P1r_axpy,2); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); POP_RANGE("P1r_axpy",P1r_axpy,2); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); } else { PUSH_RANGE("P1(col)",P1col,2); const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasColMajor> L = cml::matrix_view_array<T, CblasColMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); POP_RANGE("P1(col)",P1col,2); if (s != info->s) { PUSH_RANGE("P1c_diagonal",P1c_diagonal,2); cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); cml::vector_add_constant(&diagL, s); wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); POP_RANGE("P1c_diagonal",P1c_diagonal,2); PUSH_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2); cml::linalg_cholesky_decomp(hdl, &L); wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); POP_RANGE("P1c_cholesky_decomp",P1c_cholesky_decomp,2); } if (_A.Rows() > _A.Cols()) { PUSH_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2); cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); POP_RANGE("P1c_gemv(r>c)",P1c_gemvrgc,2); PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2); cml::linalg_cholesky_svx(hdl, &L, &x_vec); POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2); PUSH_RANGE("P1c_gemv2",P1c_gemv2,2); cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); POP_RANGE("P1c_gemv2",P1c_gemv2,2); } else { PUSH_RANGE("P1c_gemv",P1c_gemv,2); cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); POP_RANGE("P1c_gemv",P1c_gemv,2); PUSH_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2); cml::linalg_cholesky_svx(hdl, &L, &y_vec); POP_RANGE("P1c_cholesky_svx",P1c_cholesky_svx,2); PUSH_RANGE("P1c_gemv2",P1c_gemv2,2); cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); POP_RANGE("P1c_gemv2",P1c_gemv2,2); PUSH_RANGE("P1c_axpy",P1c_axpy,2); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); POP_RANGE("P1c_axpy",P1c_axpy,2); } wrapcudaDeviceSynchronize(); CUDA_CHECK_ERR(); } PUSH_RANGE("P2",P2,1); #ifdef DEBUG double t1 = timer<double>() - t0; printf("Time to compute Cholesky decomp and backward solve: %f\n", t1); // Verify that projection was successful. CheckProjection(&_A, x0, y0, x, y, s, static_cast<T>(1e3) * std::numeric_limits<T>::epsilon()); #endif cudaDeviceSynchronize(); // added synch POP_RANGE("P2",P2,1); info->s = s; return 0; } #if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1 template class ProjectorDirect<double, MatrixDense<double> >; #endif #if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1 template class ProjectorDirect<float, MatrixDense<float> >; #endif } // namespace h2o4gpu
the_stack
#include "pcl/gpu/utils/safe_call.hpp" #include "thrust/transform.h" #include "thrust/device_ptr.h" namespace pcl { namespace device { //[spinimage][angles] = [0..FSize][..FSize] extern __shared__ float simage_angles[]; template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return *(float3*)&ptr[index]; } //template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return tr(ptr[index]); } struct UseCustomAxis { float3 rotation_axis; __device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& /*normal*/) const { return rotation_axis; } }; struct UseCustomAxesCloud { const NormalType* rotation_axes_cloud; __device__ __forceinline__ float3 getRotationAxes(int index, const float3& /*normal*/) const { return fetch(rotation_axes_cloud, index); } }; struct UseOriginNormal { __device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& normal) const { return normal; } }; struct Div12eps { __device__ __forceinline__ float operator()(float v1, float v2) const { return (float)(v1 / ( v2 + numeric_limits<double>::epsilon() )); } }; struct DivValIfNonZero { float val; __device__ __forceinline__ DivValIfNonZero(float value) : val(value) {} __device__ __forceinline__ float operator()(float v) const { return val == 0 ? v : v/val; } }; template<bool radial, bool angular, typename AxesStrategy> struct SpinImpl : public AxesStrategy { enum { CTA_SIZE = 192 }; int work_size; const int* indices; const PointType* input_cloud; const NormalType* input_normals; const PointType* surface; const NormalType* normals; PtrStep<int> neighbor_indices; const int* neighbor_indices_sizes; float support_angle_cos; int min_neighb; int image_width; float bin_size; int FSize; mutable PtrStep<float> output; static __device__ __host__ __forceinline__ int computeFSize(int image_width) { int cols = 1 + image_width * 2; int rows = 1 + image_width; return cols * rows; } __device__ __forceinline__ void operator()() const { int i_input = blockIdx.x + gridDim.x * blockIdx.y; int index = indices[i_input]; int neighb_count = neighbor_indices_sizes[i_input]; const int *ginds = neighbor_indices.ptr (i_input); if (neighb_count < min_neighb) return; //set zeros to spin image Block::fill(simage_angles, simage_angles + FSize, 0.f); if (angular) //set zeros to angles Block::fill(simage_angles + FSize, simage_angles + FSize + FSize, 0.f); __syncthreads(); float3 origin_point = fetch(input_cloud, index); float3 origin_normal = input_normals ? fetch(input_normals, index) : make_float3(0.f, 0.f, 0.f); origin_normal = normalized_safe(origin_normal); //normalize if non-zero float3 rotation_axis = AxesStrategy::getRotationAxes(index, origin_normal); rotation_axis = normalized_safe(rotation_axis); //normalize if non-zero const float eps = numeric_limits<float>::epsilon (); for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) { int neighb_index = ginds[i_neighb]; // first, skip the points with distant normals float cos_between_normals = -2.f; if (angular || support_angle_cos > 0.f) // not bogus { float3 normal = normalized(fetch(normals, neighb_index)); cos_between_normals = dot(origin_normal, normal); cos_between_normals = fmax (-1.f, fmin (1.f, cos_between_normals)); if (fabs(cos_between_normals) < support_angle_cos) // allow counter-directed normals continue; cos_between_normals = fabs(cos_between_normals); // the normal is not used explicitly from now } // now compute the coordinate in cylindric coordinate system associated with the origin point float3 direction = fetch(surface, neighb_index) - origin_point; float direction_norm = norm (direction); // ignore the point itself; it does not contribute really if (direction_norm < 10 * eps) continue; // the angle between the normal vector and the direction to the point float cos_dir_axis = dot(direction, rotation_axis) / direction_norm; cos_dir_axis = fmax(-1.f, fmin(1.f, cos_dir_axis)); // compute coordinates w.r.t. the reference frame float beta = numeric_limits<float>::quiet_NaN(); float alpha = numeric_limits<float>::quiet_NaN(); if (radial) // radial spin image structure { beta = asinf(cos_dir_axis); // yes, arc sine! to get the angle against tangent, not normal! alpha = direction_norm; } else // rectangular spin-image structure { beta = direction_norm * cos_dir_axis; alpha = direction_norm * sqrt (1.0 - cos_dir_axis*cos_dir_axis); if (fabs (beta) >= bin_size * image_width || alpha >= bin_size * image_width) continue; // outside the cylinder } // bilinear interpolation float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size; int beta_bin = floorf(beta / beta_bin_size) + image_width; int alpha_bin = floorf(alpha / bin_size); //alpha_bin = min(simage_cols, max(0, alpha_bin)); //beta_bin = min(simage_rows, max(0, beta_bin)); if (alpha_bin == image_width) // border points { alpha_bin--; // HACK: to prevent a > 1 alpha = bin_size * (alpha_bin + 1) - eps; } if (beta_bin == 2*image_width ) // border points { beta_bin--; // HACK: to prevent b > 1 beta = beta_bin_size * (beta_bin - image_width + 1) - eps; } float a = alpha/bin_size - alpha_bin; float b = beta/beta_bin_size - float(beta_bin-image_width); incSpinI(alpha_bin, beta_bin, (1-a) * (1-b)); incSpinI(alpha_bin+1, beta_bin, a * (1-b)); incSpinI(alpha_bin, beta_bin+1, (1-a) * b ); incSpinI(alpha_bin+1, beta_bin+1, a * b ); if (angular) { float anlge_betwwn_normals = acos(cos_between_normals); incAngle(alpha_bin, beta_bin, anlge_betwwn_normals * (1-a) * (1-b)); incAngle(alpha_bin+1, beta_bin, anlge_betwwn_normals * a * (1-b)); incAngle(alpha_bin, beta_bin+1, anlge_betwwn_normals * (1-a) * b ); incAngle(alpha_bin+1, beta_bin+1, anlge_betwwn_normals * a * b ); } } /* for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) */ __syncthreads(); if (angular) { //transform sum to average dividing angle/spinimage element-wize. const float *amgles_beg = simage_angles + FSize; const float *amgles_end = amgles_beg + FSize; const float *images_beg = simage_angles; Block::transfrom(amgles_beg, amgles_end, images_beg, output.ptr(i_input), Div12eps()); ////Block::copy(amgles_beg, amgles_end, output.ptr(i_input)); //Block::copy(images_beg, images_beg + FSize, output.ptr(i_input)); } else { // copy to compute sum Block::copy(simage_angles, simage_angles + FSize, simage_angles + FSize); __syncthreads(); //compute sum Block::reduce_n(simage_angles + FSize, FSize, pcl::device::plus<float>()); __syncthreads(); float sum = simage_angles[FSize]; Block::transfrom(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum)); } } __device__ __forceinline__ void incSpinI(int y, int x, float value) const { atomicAdd(simage_angles + y * (2*image_width + 1) + x, value); } __device__ __forceinline__ void incAngle(int y, int x, float value) const { atomicAdd(simage_angles+FSize + y * (2*image_width + 1) + x, value); } }; template<typename Impl> __global__ void computeSpinKernel(const Impl impl) { impl(); } template<typename Impl> inline void computeSpinImages_caller(Impl& impl, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output) { impl.work_size = (int)indices.size(); impl.indices = indices; impl.input_cloud = input_cloud; impl.input_normals = input_normals; impl.surface = surface; impl.normals = normals; impl.neighbor_indices = neighbours; impl.neighbor_indices_sizes = neighbours.sizes; impl.min_neighb = min_neighb; impl.image_width = image_width; impl.bin_size = bin_size; impl.support_angle_cos = support_angle_cos; impl.FSize = Impl::computeFSize(image_width); impl.output = output; const int total = (int)indices.size(); const int max_grid_dim = 65535; const int smem_size = 2 * Impl::computeFSize(image_width) * sizeof(float); dim3 block(Impl::CTA_SIZE); dim3 grid(min(total, max_grid_dim), divUp(total, max_grid_dim)); computeSpinKernel<Impl><<<grid, block, smem_size>>>(impl); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } template<bool radial, bool angular> void computeSpinImagesOriginNormalEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output) { SpinImpl<radial, angular, UseOriginNormal> si; computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output); } template<bool radial, bool angular> void computeSpinImagesCustomAxesEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output) { SpinImpl<radial, angular, UseCustomAxis> si; si.rotation_axis = rotation_axis; computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output); } template<bool radial, bool angular> void computeSpinImagesCustomAxesCloudEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output) { SpinImpl<radial, angular, UseCustomAxesCloud> si; si.rotation_axes_cloud = rotation_axes_cloud; computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output); } } } void pcl::device::computeSpinImagesOrigigNormal(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output) { typedef void (*originNormal)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int , int , float, PtrStep<float>); const originNormal table[2][2] = { { computeSpinImagesOriginNormalEx<false, false>, computeSpinImagesOriginNormalEx<false, true> }, { computeSpinImagesOriginNormalEx<true, false>, computeSpinImagesOriginNormalEx<true, true> } }; table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output); } void pcl::device::computeSpinImagesCustomAxes(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output) { typedef void (*customAxes)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const float3&, PtrStep<float>); const customAxes table[2][2] = { { computeSpinImagesCustomAxesEx<false, false>, computeSpinImagesCustomAxesEx<false, true> }, { computeSpinImagesCustomAxesEx<true, false>, computeSpinImagesCustomAxesEx<true, true> } }; table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axis, output); } void pcl::device::computeSpinImagesCustomAxesCloud(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals, const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output) { typedef void (*customAxesCloud)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const Normals&, PtrStep<float>); const customAxesCloud table[2][2] = { { computeSpinImagesCustomAxesCloudEx<false, false>, computeSpinImagesCustomAxesCloudEx<false, true> }, { computeSpinImagesCustomAxesCloudEx<true, false>, computeSpinImagesCustomAxesCloudEx<true, true> } }; table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axes_cloud, output); }; namespace pcl { namespace device { struct GtThan { int val; GtThan(int value) : val(value) {} __device__ __forceinline__ unsigned char operator()(int size) const { return size > val ? 1 : 0; } }; } } void pcl::device::computeMask(const NeighborIndices& neighbours, int min_neighb, DeviceArray<unsigned char>& mask) { thrust::device_ptr<int> beg((int*)neighbours.sizes.ptr()); thrust::device_ptr<int> end = beg + neighbours.sizes.size(); thrust::device_ptr<unsigned char> out(mask.ptr()); thrust::transform(beg, end, out, GtThan(min_neighb)); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); }
the_stack
#include <algorithm> #include <iostream> #include <device_launch_parameters.h> #include <common/Constants.h> #include "hashing/TicketBoardSet.cuh" namespace surfelwarp { namespace device { __global__ void buildLatticeIndexKernel( cudaTextureObject_t rgb_image, const unsigned subsampled_rows, const unsigned subsampled_cols, //Normalizing constants const float sigma_alpha, const float sigma_beta, //The information for hash table unsigned* ticket_board, LatticeCoordKey<5>* table, const unsigned table_size, const uint2 primary_hash, const uint2 step_hash ) { //Compute the position const auto x = threadIdx.x + blockDim.x * blockIdx.x; const auto y = threadIdx.y + blockDim.y * blockIdx.y; if(x >= subsampled_cols || y >= subsampled_rows) return; //Construct the feature vector const auto rgb_x = crf_subsample_rate * x; const auto rgb_y = crf_subsample_rate * y; const float4 normalized_rgba = tex2D<float4>(rgb_image, rgb_x, rgb_y); //Construct the feature for this pixel float feature[5]; feature[0] = float(x) / sigma_alpha; feature[1] = float(y) / sigma_alpha; feature[2] = normalized_rgba.x * 255.f / sigma_beta; feature[3] = normalized_rgba.y * 255.f / sigma_beta; feature[4] = normalized_rgba.z * 255.f / sigma_beta; //Compute the lattice key LatticeCoordKey<5> lattice_coord_keys[6]; float lattice_weights[7]; permutohedral_lattice(feature, lattice_coord_keys, lattice_weights); //Insert into the hash table for(auto i = 0; i < 6; i++) { const auto hashed_lattice = lattice_coord_keys[i].hash(); hashing::device::insertTicketSetEntry( lattice_coord_keys[i], hashed_lattice, ticket_board, table, table_size, primary_hash, step_hash ); } } enum { kSplatBlockDim = 8, kSplatBlockSize = kSplatBlockDim * kSplatBlockDim, }; __global__ void foregroundPermutohedralSplatKernel( //The input maps cudaTextureObject_t meanfield_foreground_in, cudaTextureObject_t rgb_image, const unsigned subsampled_rows, const unsigned subsampled_cols, //Normalizing constants const float sigma_alpha, const float sigma_beta, //The hash table attributes const typename hashing::TicketBoardSet<LatticeCoordKey<5>>::Device lattice_set, //The splat value PtrSz<float2> lattice_value_array ) { //Compute the position const auto x = threadIdx.x + blockDim.x * blockIdx.x; const auto y = threadIdx.y + blockDim.y * blockIdx.y; if(x >= subsampled_cols || y >= subsampled_rows) return; //Construct the feature vector const auto rgb_x = crf_subsample_rate * x; const auto rgb_y = crf_subsample_rate * y; const float4 normalized_rgba = tex2D<float4>(rgb_image, rgb_x, rgb_y); //Construct the feature for this pixel float feature[5]; feature[0] = float(x) / sigma_alpha; feature[1] = float(y) / sigma_alpha; feature[2] = normalized_rgba.x * 255.f / sigma_beta; feature[3] = normalized_rgba.y * 255.f / sigma_beta; feature[4] = normalized_rgba.z * 255.f / sigma_beta; //Compute the lattice key LatticeCoordKey<5> lattice_coord_keys[6]; float lattice_weights[7]; permutohedral_lattice(feature, lattice_coord_keys, lattice_weights); //The shared index array __shared__ unsigned lattice_index[6 * kSplatBlockSize]; //Query the index in the compacted array const unsigned inblock_offset = threadIdx.x + kSplatBlockDim * threadIdx.y; unsigned* compacted_index = lattice_index + 6 * inblock_offset; for(auto i = 0; i < 6; i++) { const auto hashed_lattice = lattice_coord_keys[i].hash(); compacted_index[i] = hashing::device::retrieveTicketSetKeyIndex<LatticeCoordKey<5>>( lattice_coord_keys[i], hashed_lattice, lattice_set.ticket_board, lattice_set.table, lattice_set.table_size, lattice_set.primary_hash, lattice_set.step_hash ); //Debug //if(compacted_index[i] == 0xffffffffu) { // printf("Incorrect retrieve\n"); //} } //Compute the energy for this pixel const float prev_foreground_prob = tex2D<float>(meanfield_foreground_in, x, y); const float prev_backround_prob = 1.0f - prev_foreground_prob; __shared__ float2 lattice_energy[6 * kSplatBlockSize]; float2* energy_thread = lattice_energy + 6 * inblock_offset; for(auto i = 0; i < 6; i++) { energy_thread[i].x = prev_backround_prob * lattice_weights[i]; energy_thread[i].y = prev_foreground_prob * lattice_weights[i]; } //Sync threads here __syncthreads(); for(auto i = 0; i < 6; i++) { const auto curr_lattice = compacted_index[i]; float2 energy = make_float2(0.0f, 0.0f); bool write_to = true; //The loop to iterate through the shared memory //The energy of this thread is also counted here for(auto j = 0; j < 6 * kSplatBlockSize; j++) { if(curr_lattice == lattice_index[j]) { energy.x += lattice_energy[j].x; energy.y += lattice_energy[j].y; const auto j_thread = j / 6; if(j_thread < inblock_offset) write_to = false; } } //write to the global memory if required if(write_to && curr_lattice < lattice_value_array.size) { float* foreground_energy_pos = &(lattice_value_array[curr_lattice].x); float* background_energy_pos = foreground_energy_pos + 1; atomicAdd(foreground_energy_pos, energy.x); atomicAdd(background_energy_pos, energy.y); } } // energy adding loop } // energy splat kernel __global__ void foregroundPermutohedralSliceKernel( cudaTextureObject_t meanfield_foreground_in, cudaTextureObject_t rgb_image, PtrStepSz<const float2> unary_energy_map, //Normalizing constants const float sigma_alpha, const float sigma_beta, const float sigma_gamma, //The weight constants const float appearance_weight, const float smooth_weight, //The search structure and compacted value const typename hashing::TicketBoardSet<LatticeCoordKey<5>>::Device lattice_set, const PtrSz<const float2> lattice_energy, //The output cudaSurfaceObject_t meanfield_foreground_out ) { //Compute the position const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; if(x >= unary_energy_map.cols || y >= unary_energy_map.rows) return; //Construct the feature vector const int rgb_x = crf_subsample_rate * x; const int rgb_y = crf_subsample_rate * y; const float4 normalized_rgba = tex2D<float4>(rgb_image, rgb_x, rgb_y); //Construct the feature for this pixel float feature[5]; feature[0] = float(x) / sigma_alpha; feature[1] = float(y) / sigma_alpha; feature[2] = normalized_rgba.x * 255.f / sigma_beta; feature[3] = normalized_rgba.y * 255.f / sigma_beta; feature[4] = normalized_rgba.z * 255.f / sigma_beta; //Compute the lattice key LatticeCoordKey<5> lattice_coord_keys[6]; float lattice_weights[7]; permutohedral_lattice(feature, lattice_coord_keys, lattice_weights); //Collect the energy from lattice float e_foreground = 0.0f, e_background = 0.0f; for(auto i = 0; i < 6; i++) { const auto hashed_lattice = lattice_coord_keys[i].hash(); //the index in the compacted array const auto compacted_index = hashing::device::retrieveTicketSetKeyIndex<LatticeCoordKey<5>>( lattice_coord_keys[i], hashed_lattice, lattice_set.ticket_board, lattice_set.table, lattice_set.table_size, lattice_set.primary_hash, lattice_set.step_hash ); //Collect the energy if(compacted_index < lattice_energy.size) { const float2 energy = lattice_energy[compacted_index]; const float weight = appearance_weight; e_foreground += weight * lattice_weights[i] * energy.x; e_background += weight * lattice_weights[i] * energy.y; } } //Collect the smooth energy const int halfsize = 7; for(int neighbor_y = y - halfsize; neighbor_y <= y + halfsize; neighbor_y++) { for(int neighbor_x = x - halfsize; neighbor_x <= x + halfsize; neighbor_x++) { //Compute the kernel value const float kernel_value = smooth_weight * smooth_kernel(x, y, neighbor_x, neighbor_y, sigma_gamma); //Message passing const float neighbor_foreground_prob = tex2D<float>(meanfield_foreground_in, neighbor_x, neighbor_y); const float neighbor_backround_prob = 1.0f - neighbor_foreground_prob; //Note that the window might be outside, in that case, tex2D should return zero if(neighbor_x >= 0 && neighbor_y >= 0 && neighbor_x < unary_energy_map.cols && neighbor_y < unary_energy_map.rows) { e_foreground += (neighbor_backround_prob * kernel_value); e_background += (neighbor_foreground_prob * kernel_value); } } }// Collect of the smooth kernel // subtract self-energy const float prev_foreground_prob = tex2D<float>(meanfield_foreground_in, x, y); const float prev_backround_prob = 1.0f - prev_foreground_prob; e_foreground -= prev_backround_prob * (appearance_weight + smooth_weight); e_background -= prev_foreground_prob * (appearance_weight + smooth_weight); //Update the mean field locally const float2 unary_energy = unary_energy_map.ptr(y)[x]; const float foreground_energy = unary_energy.x + e_foreground; const float background_energy = unary_energy.y + e_background; const float energy_diff = foreground_energy - background_energy; //Note the numerical problem involved with expf float foreground_prob; const float energy_cutoff = 20.0f; if(energy_diff < - energy_cutoff) { foreground_prob = 1.0f; } else if(energy_diff > energy_cutoff) { foreground_prob = 0.0f; } else { const float exp_energy_diff = __expf(energy_diff); foreground_prob = 1.0f / (1.0f + exp_energy_diff); } //Well there might be numerical errors if (foreground_prob > 1.0f) { foreground_prob = 1.0f; } else if(foreground_prob < 0.0f) { foreground_prob = 1e-3f; } //Write to the surface surf2Dwrite(foreground_prob, meanfield_foreground_out, x * sizeof(float), y); } //The slice kernel }; // namespace device }; // namespace surfelwarp void surfelwarp::ForegroundSegmenterPermutohedral::AllocateBuffer( unsigned clip_rows, unsigned clip_cols ) { //Do subsampling here const auto subsampled_rows = clip_rows / crf_subsample_rate; const auto subsampled_cols = clip_cols / crf_subsample_rate; //Allocate the buffer for meanfield q createFloat1TextureSurface(subsampled_rows, subsampled_cols, m_meanfield_foreground_collect_subsampled[0]); createFloat1TextureSurface(subsampled_rows, subsampled_cols, m_meanfield_foreground_collect_subsampled[1]); //Allocate the buffer for unary energy m_unary_energy_map_subsampled.create(subsampled_rows, subsampled_cols); //Allocate the buffer for segmentation mask createUChar1TextureSurface(subsampled_rows, subsampled_cols, m_segmented_mask_collect_subsampled); //Allocate sub-buffers allocateLatticeIndexBuffer(); allocateLatticeValueBuffer(); //Allocate the upsampled buffer createUChar1TextureSurface(clip_rows, clip_cols, m_foreground_mask_collect_upsampled); createFloat1TextureSurface(clip_rows, clip_cols, m_filter_foreground_mask_collect_upsampled); } void surfelwarp::ForegroundSegmenterPermutohedral::ReleaseBuffer() { releaseTextureCollect(m_meanfield_foreground_collect_subsampled[0]); releaseTextureCollect(m_meanfield_foreground_collect_subsampled[1]); releaseTextureCollect(m_segmented_mask_collect_subsampled); m_unary_energy_map_subsampled.release(); //Release other buffers releaseLatticeIndexBuffer(); releaseLatticeValueBuffer(); } void surfelwarp::ForegroundSegmenterPermutohedral::SetInputImages( cudaTextureObject_t clip_normalized_rgb_img, cudaTextureObject_t raw_depth_img, cudaTextureObject_t clip_depth_img, int frame_idx, cudaTextureObject_t clip_background_rgb ) { m_input_texture.clip_normalize_rgb_img = clip_normalized_rgb_img; m_input_texture.raw_depth_img = raw_depth_img; m_input_texture.clip_depth_img = clip_depth_img; } void surfelwarp::ForegroundSegmenterPermutohedral::Segment(cudaStream_t stream) { //Init the mean field initMeanfieldUnaryEnergy(stream); //Build the index buildLatticeIndex(stream); //The inference loop const auto max_iters = Constants::kMeanfieldSegmentIteration; for(auto i = 0; i < max_iters; i++) { //Debug //saveMeanfieldApproximationMap(i); //The inference iters splatEnergy(stream); slice(stream); } //Write to the segmentation mask writeSegmentationMask(stream); upsampleFilterForegroundMask(stream); } cudaTextureObject_t surfelwarp::ForegroundSegmenterPermutohedral::ForegroundMask() const { return m_foreground_mask_collect_upsampled.texture; } cudaTextureObject_t surfelwarp::ForegroundSegmenterPermutohedral::FilterForegroundMask() const { return m_filter_foreground_mask_collect_upsampled.texture; } cudaTextureObject_t surfelwarp::ForegroundSegmenterPermutohedral::SubsampledForegroundMask() const { return m_segmented_mask_collect_subsampled.texture; } void surfelwarp::ForegroundSegmenterPermutohedral::initMeanfieldUnaryEnergy(cudaStream_t stream) { initMeanfieldUnaryForegroundSegmentation( m_input_texture.raw_depth_img, m_input_texture.clip_depth_img, m_unary_energy_map_subsampled, m_meanfield_foreground_collect_subsampled[0].surface, stream ); m_updated_meanfield_idx = 0; } /* Method to build the hash index of lattice coordinate */ void surfelwarp::ForegroundSegmenterPermutohedral::allocateLatticeIndexBuffer() { //The size of this set is almost emperical m_lattice_set.AllocateBuffer(kMaxUniqueLattices); } void surfelwarp::ForegroundSegmenterPermutohedral::releaseLatticeIndexBuffer() { m_lattice_set.ReleaseBuffer(); } void surfelwarp::ForegroundSegmenterPermutohedral::buildLatticeIndex( cudaStream_t stream ) { //Reset the table LatticeCoordKey<5> empty; empty.set_null(); m_lattice_set.ResetTable(empty, stream); //Construct the size const unsigned subsampled_rows = m_unary_energy_map_subsampled.rows(); const unsigned subsampled_cols = m_unary_energy_map_subsampled.cols(); dim3 blk(8, 8); dim3 grid(divUp(subsampled_cols, blk.x), divUp(subsampled_rows, blk.y)); //Invoke the insert kernel device::buildLatticeIndexKernel<<<grid, blk, 0, stream>>>( //The image information m_input_texture.clip_normalize_rgb_img, subsampled_rows, subsampled_cols, //The gaussian sigma sigma_alpha_, sigma_beta_, //The hash table information m_lattice_set.TicketBoard(), m_lattice_set.Table(), m_lattice_set.TableSize(), m_lattice_set.PrimaryHash(), m_lattice_set.StepHash() ); //Build index on the lattice set m_lattice_set.BuildIndex(stream); //Debug //m_lattice_set.IndexInformation(); } /* The method to perform splat */ void surfelwarp::ForegroundSegmenterPermutohedral::allocateLatticeValueBuffer() { m_lattice_energy_array.create(kMaxUniqueLattices); } void surfelwarp::ForegroundSegmenterPermutohedral::releaseLatticeValueBuffer() { m_lattice_energy_array.release(); } void surfelwarp::ForegroundSegmenterPermutohedral::splatEnergy(cudaStream_t stream) { //First clear the value cudaSafeCall(cudaMemsetAsync( m_lattice_energy_array.ptr(), 0, m_lattice_energy_array.size() * sizeof(float2), stream )); //Construct the size const unsigned subsampled_rows = m_unary_energy_map_subsampled.rows(); const unsigned subsampled_cols = m_unary_energy_map_subsampled.cols(); dim3 blk(device::kSplatBlockDim, device::kSplatBlockDim); dim3 grid(divUp(subsampled_cols, blk.x), divUp(subsampled_rows, blk.y)); //Constrcuct the device hash const auto device_set = m_lattice_set.OnDevice(); //Invoke the kernel device::foregroundPermutohedralSplatKernel<<<grid, blk, 0, stream>>>( m_meanfield_foreground_collect_subsampled[m_updated_meanfield_idx].texture, m_input_texture.clip_normalize_rgb_img, subsampled_rows, subsampled_cols, sigma_alpha_, sigma_beta_, device_set, m_lattice_energy_array ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::ForegroundSegmenterPermutohedral::slice(cudaStream_t stream) { //Constrcuct the device hash const auto device_set = m_lattice_set.OnDevice(); //The output index const auto meanfield_input_idx = m_updated_meanfield_idx; const auto meanfield_output_idx = (m_updated_meanfield_idx + 1) % 2; //Local constants const float sigma_gamma = 3; const float apperance_weight = 1.0f; const float smooth_weight = 0.5f; //Invoke the kernel const unsigned subsampled_rows = m_unary_energy_map_subsampled.rows(); const unsigned subsampled_cols = m_unary_energy_map_subsampled.cols(); dim3 blk(8, 8); dim3 grid(divUp(subsampled_cols, blk.x), divUp(subsampled_rows, blk.y)); device::foregroundPermutohedralSliceKernel<<<grid, blk, 0, stream>>>( m_meanfield_foreground_collect_subsampled[meanfield_input_idx].texture, m_input_texture.clip_normalize_rgb_img, m_unary_energy_map_subsampled, sigma_alpha_, sigma_beta_, sigma_gamma, apperance_weight, smooth_weight, device_set, m_lattice_energy_array, m_meanfield_foreground_collect_subsampled[meanfield_output_idx].surface ); //Update the index here? m_updated_meanfield_idx = meanfield_output_idx; //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::ForegroundSegmenterPermutohedral::writeSegmentationMask( cudaStream_t stream ) { const auto write_idx = m_updated_meanfield_idx % 2; writeForegroundSegmentationMask( m_meanfield_foreground_collect_subsampled[write_idx].texture, m_unary_energy_map_subsampled.rows(), m_unary_energy_map_subsampled.cols(), m_segmented_mask_collect_subsampled.surface, stream ); } void surfelwarp::ForegroundSegmenterPermutohedral::upsampleFilterForegroundMask(cudaStream_t stream) { ForegroundSegmenter::UpsampleFilterForegroundMask( m_segmented_mask_collect_subsampled.texture, m_unary_energy_map_subsampled.rows(), m_unary_energy_map_subsampled.cols(), crf_subsample_rate, Constants::kForegroundSigma, m_foreground_mask_collect_upsampled.surface, m_filter_foreground_mask_collect_upsampled.surface, stream ); } void surfelwarp::ForegroundSegmenterPermutohedral::saveMeanfieldApproximationMap(const unsigned int iter) { std::stringstream ss; ss << iter; std::string file_name = "meanfield-"; file_name += ss.str(); file_name += ".png"; Visualizer::SaveBinaryMeanfield(m_meanfield_foreground_collect_subsampled[m_updated_meanfield_idx].texture, file_name); }
the_stack
#include <stdio.h> #include <array> #include <string> #include <type_traits> #include "gromacs/gpu_utils/device_stream.h" #include "gromacs/gpu_utils/gputraits.cuh" #include "gromacs/math/vec.h" #include "gromacs/math/vectypes.h" #include "gromacs/utility/exceptions.h" #include "gromacs/utility/fatalerror.h" #include "gromacs/utility/gmxassert.h" #include "gromacs/utility/stringutil.h" namespace gmx { namespace { /*! \brief Add the API information on the specific error to the error message. * * \param[in] deviceError The error to assert cudaSuccess on. * * \returns A description of the API error. Returns '(CUDA error #0 (cudaSuccess): no error)' in case deviceError is cudaSuccess. */ inline std::string getDeviceErrorString(const cudaError_t deviceError) { return formatString("CUDA error #%d (%s): %s.", deviceError, cudaGetErrorName(deviceError), cudaGetErrorString(deviceError)); } /*! \brief Check if API returned an error and throw an exception with information on it. * * \param[in] deviceError The error to assert cudaSuccess on. * \param[in] errorMessage Undecorated error message. * * \throws InternalError if deviceError is not a success. */ inline void checkDeviceError(const cudaError_t deviceError, const std::string& errorMessage) { if (deviceError != cudaSuccess) { GMX_THROW(gmx::InternalError(errorMessage + " " + getDeviceErrorString(deviceError))); } } /*! \brief Helper function to ensure no pending error silently * disrupts error handling. * * Asserts in a debug build if an unhandled error is present. Issues a * warning at run time otherwise. * * \param[in] errorMessage Undecorated error message. */ inline void ensureNoPendingDeviceError(const std::string& errorMessage) { // Ensure there is no pending error that would otherwise affect // the behaviour of future error handling. cudaError_t deviceError = cudaGetLastError(); if (deviceError == cudaSuccess) { return; } // If we would find an error in a release build, we do not know // what is appropriate to do about it, so assert only for debug // builds. const std::string fullErrorMessage = errorMessage + " An unhandled error from a previous CUDA operation was detected. " + gmx::getDeviceErrorString(deviceError); GMX_ASSERT(deviceError == cudaSuccess, fullErrorMessage.c_str()); // TODO When we evolve a better logging framework, use that // for release-build error reporting. gmx_warning("%s", fullErrorMessage.c_str()); } } // namespace } // namespace gmx enum class GpuApiCallBehavior; /* TODO error checking needs to be rewritten. We have 2 types of error checks needed based on where they occur in the code: - non performance-critical: these errors are unsafe to be ignored and must be _always_ checked for, e.g. initializations - performance critical: handling errors might hurt performance so care need to be taken when/if we should check for them at all, e.g. in cu_upload_X. However, we should be able to turn the check for these errors on! Probably we'll need two sets of the macros below... */ #define CHECK_CUDA_ERRORS #ifdef CHECK_CUDA_ERRORS /*! Check for CUDA error on the return status of a CUDA RT API call. */ # define CU_RET_ERR(deviceError, msg) \ do \ { \ if ((deviceError) != cudaSuccess) \ { \ gmx_fatal(FARGS, "%s\n", ((msg) + gmx::getDeviceErrorString(deviceError)).c_str()); \ } \ } while (0) #else /* CHECK_CUDA_ERRORS */ # define CU_RET_ERR(status, msg) \ do \ { \ } while (0) #endif /* CHECK_CUDA_ERRORS */ // TODO: the 2 functions below are pretty much a constructor/destructor of a simple // GPU table object. There is also almost self-contained fetchFromParamLookupTable() // in cuda_kernel_utils.cuh. They could all live in a separate class/struct file. /*! \brief Add a triplets stored in a float3 to an rvec variable. * * \param[out] a Rvec to increment * \param[in] b Float triplet to increment with. */ static inline void rvec_inc(rvec a, const float3 b) { rvec tmp = { b.x, b.y, b.z }; rvec_inc(a, tmp); } /*! \brief Returns true if all tasks in \p s have completed. * * \param[in] deviceStream CUDA stream to check. * * \returns True if all tasks enqueued in the stream \p deviceStream (at the time of this call) have completed. */ static inline bool haveStreamTasksCompleted(const DeviceStream& deviceStream) { cudaError_t stat = cudaStreamQuery(deviceStream.stream()); if (stat == cudaErrorNotReady) { // work is still in progress in the stream return false; } GMX_ASSERT(stat != cudaErrorInvalidResourceHandle, ("Stream identifier not valid. " + gmx::getDeviceErrorString(stat)).c_str()); // cudaSuccess and cudaErrorNotReady are the expected return values CU_RET_ERR(stat, "Unexpected cudaStreamQuery failure. "); GMX_ASSERT(stat == cudaSuccess, ("Values other than cudaSuccess should have been explicitly handled. " + gmx::getDeviceErrorString(stat)) .c_str()); return true; } /* Kernel launch helpers */ /*! \brief * A function for setting up a single CUDA kernel argument. * This is the tail of the compile-time recursive function below. * It has to be seen by the compiler first. * * \tparam totalArgsCount Number of the kernel arguments * \tparam KernelPtr Kernel function handle type * \param[in] argIndex Index of the current argument */ template<size_t totalArgsCount, typename KernelPtr> void prepareGpuKernelArgument(KernelPtr /*kernel*/, std::array<void*, totalArgsCount>* /* kernelArgsPtr */, size_t gmx_used_in_debug argIndex) { GMX_ASSERT(argIndex == totalArgsCount, "Tail expansion"); } /*! \brief * Compile-time recursive function for setting up a single CUDA kernel argument. * This function copies a kernel argument pointer \p argPtr into \p kernelArgsPtr, * and calls itself on the next argument, eventually calling the tail function above. * * \tparam CurrentArg Type of the current argument * \tparam RemainingArgs Types of remaining arguments after the current one * \tparam totalArgsCount Number of the kernel arguments * \tparam KernelPtr Kernel function handle type * \param[in] kernel Kernel function handle * \param[in,out] kernelArgsPtr Pointer to the argument array to be filled in * \param[in] argIndex Index of the current argument * \param[in] argPtr Pointer to the current argument * \param[in] otherArgsPtrs Pack of pointers to arguments remaining to process after the current one */ template<typename CurrentArg, typename... RemainingArgs, size_t totalArgsCount, typename KernelPtr> void prepareGpuKernelArgument(KernelPtr kernel, std::array<void*, totalArgsCount>* kernelArgsPtr, size_t argIndex, const CurrentArg* argPtr, const RemainingArgs*... otherArgsPtrs) { (*kernelArgsPtr)[argIndex] = const_cast<void*>(static_cast<const void*>(argPtr)); prepareGpuKernelArgument(kernel, kernelArgsPtr, argIndex + 1, otherArgsPtrs...); } /*! \brief * A wrapper function for setting up all the CUDA kernel arguments. * Calls the recursive functions above. * * \tparam KernelPtr Kernel function handle type * \tparam Args Types of all the kernel arguments * \param[in] kernel Kernel function handle * \param[in] argsPtrs Pointers to all the kernel arguments * \returns A prepared parameter pack to be used with launchGpuKernel() as the last argument. */ template<typename KernelPtr, typename... Args> std::array<void*, sizeof...(Args)> prepareGpuKernelArguments(KernelPtr kernel, const KernelLaunchConfig& /*config */, const Args*... argsPtrs) { std::array<void*, sizeof...(Args)> kernelArgs; prepareGpuKernelArgument(kernel, &kernelArgs, 0, argsPtrs...); return kernelArgs; } /*! \brief Launches the CUDA kernel and handles the errors. * * \tparam Args Types of all the kernel arguments * \param[in] kernel Kernel function handle * \param[in] config Kernel configuration for launching * \param[in] deviceStream GPU stream to launch kernel in * \param[in] kernelName Human readable kernel description, for error handling only * \param[in] kernelArgs Array of the pointers to the kernel arguments, prepared by * prepareGpuKernelArguments() \throws gmx::InternalError on kernel launch failure */ template<typename... Args> void launchGpuKernel(void (*kernel)(Args...), const KernelLaunchConfig& config, const DeviceStream& deviceStream, CommandEvent* /*timingEvent */, const char* kernelName, const std::array<void*, sizeof...(Args)>& kernelArgs) { dim3 blockSize(config.blockSize[0], config.blockSize[1], config.blockSize[2]); dim3 gridSize(config.gridSize[0], config.gridSize[1], config.gridSize[2]); cudaLaunchKernel(reinterpret_cast<void*>(kernel), gridSize, blockSize, const_cast<void**>(kernelArgs.data()), config.sharedMemorySize, deviceStream.stream()); gmx::ensureNoPendingDeviceError("GPU kernel (" + std::string(kernelName) + ") failed to launch."); } #endif
the_stack
#define FULL_MASK 0xffffffff #define TILESIZE 32 #define WARPSIZE 32 template <typename RealType> void __global__ k_find_block_bounds( const int N, // Number of atoms const int D, // Box dimensions, typically 3 const int T, // Number of tiles const double *__restrict__ coords, // [N*3] const double *__restrict__ box, // [D*3] double *block_bounds_ctr, // [T*3] double *block_bounds_ext // [T*3] ) { // Algorithm taken from https://github.com/openmm/openmm/blob/master/platforms/cuda/src/kernels/findInteractingBlocks.cu#L7 // Computes smaller bounding boxes than simpler form by accounting for periodic box conditions // each thread processes one tile const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= T) { return; } const RealType bx = box[0 * 3 + 0]; const RealType by = box[1 * 3 + 1]; const RealType bz = box[2 * 3 + 2]; const RealType inv_bx = 1 / bx; const RealType inv_by = 1 / by; const RealType inv_bz = 1 / bz; const int base = index * TILESIZE; RealType pos_x = coords[base * 3 + 0]; RealType pos_y = coords[base * 3 + 1]; RealType pos_z = coords[base * 3 + 2]; RealType minPos_x = pos_x; RealType minPos_y = pos_y; RealType minPos_z = pos_z; RealType maxPos_x = pos_x; RealType maxPos_y = pos_y; RealType maxPos_z = pos_z; const int last = min(base + TILESIZE, N); for (int i = base + 1; i < last; i++) { pos_x = coords[i * 3 + 0]; pos_y = coords[i * 3 + 1]; pos_z = coords[i * 3 + 2]; // Build up center over time, and recenter before computing // min and max, to reduce overall size of box thanks to accounting // for periodic boundary conditions RealType center_x = static_cast<RealType>(0.5) * (maxPos_x + minPos_x); RealType center_y = static_cast<RealType>(0.5) * (maxPos_y + minPos_y); RealType center_z = static_cast<RealType>(0.5) * (maxPos_z + minPos_z); pos_x -= bx * nearbyint((pos_x - center_x) * inv_bx); pos_y -= by * nearbyint((pos_y - center_y) * inv_by); pos_z -= bz * nearbyint((pos_z - center_z) * inv_bz); minPos_x = min(minPos_x, pos_x); minPos_y = min(minPos_y, pos_y); minPos_z = min(minPos_z, pos_z); maxPos_x = max(maxPos_x, pos_x); maxPos_y = max(maxPos_y, pos_y); maxPos_z = max(maxPos_z, pos_z); } block_bounds_ctr[index * 3 + 0] = static_cast<RealType>(0.5) * (maxPos_x + minPos_x); block_bounds_ctr[index * 3 + 1] = static_cast<RealType>(0.5) * (maxPos_y + minPos_y); block_bounds_ctr[index * 3 + 2] = static_cast<RealType>(0.5) * (maxPos_z + minPos_z); block_bounds_ext[index * 3 + 0] = static_cast<RealType>(0.5) * (maxPos_x - minPos_x); block_bounds_ext[index * 3 + 1] = static_cast<RealType>(0.5) * (maxPos_y - minPos_y); block_bounds_ext[index * 3 + 2] = static_cast<RealType>(0.5) * (maxPos_z - minPos_z); } void __global__ k_compact_trim_atoms( const int NC, const int Y, unsigned int *__restrict__ trim_atoms, unsigned int *__restrict__ interactionCount, int *__restrict__ interactingTiles, unsigned int *__restrict__ interactingAtoms) { __shared__ int ixn_j_buffer [64]; // we can probably get away with using only 32 if we do some fancier remainder tricks, but this isn't a huge save ixn_j_buffer[threadIdx.x] = NC; ixn_j_buffer[WARPSIZE + threadIdx.x] = NC; const int indexInWarp = threadIdx.x % WARPSIZE; const int warpMask = (1 << indexInWarp) - 1; const int row_block_idx = blockIdx.x; __shared__ volatile int sync_start[1]; int neighborsInBuffer = 0; for (int trim_block_idx = 0; trim_block_idx < Y; trim_block_idx++) { int atom_j_idx = trim_atoms[row_block_idx * Y * WARPSIZE + trim_block_idx * WARPSIZE + threadIdx.x]; bool interacts = atom_j_idx < NC; int includeAtomFlags = __ballot_sync(FULL_MASK, interacts); if (interacts) { // only interacting atoms partake in this int index = neighborsInBuffer + __popc(includeAtomFlags & warpMask); // where to store this in shared memory ixn_j_buffer[index] = atom_j_idx; } neighborsInBuffer += __popc(includeAtomFlags); if (neighborsInBuffer > WARPSIZE) { int tilesToStore = 1; if (indexInWarp == 0) { sync_start[0] = atomicAdd(interactionCount, tilesToStore); } __syncwarp(); interactingTiles[sync_start[0]] = row_block_idx; // IS THIS CORRECT? CONTESTED interactingAtoms[sync_start[0] * WARPSIZE + threadIdx.x] = ixn_j_buffer[threadIdx.x]; ixn_j_buffer[threadIdx.x] = ixn_j_buffer[WARPSIZE + threadIdx.x]; ixn_j_buffer[WARPSIZE + threadIdx.x] = NC; // reset old values neighborsInBuffer -= WARPSIZE; } } if (neighborsInBuffer > 0) { int tilesToStore = 1; if (indexInWarp == 0) { sync_start[0] = atomicAdd(interactionCount, tilesToStore); } __syncwarp(); interactingTiles[sync_start[0]] = row_block_idx; interactingAtoms[sync_start[0] * WARPSIZE + threadIdx.x] = ixn_j_buffer[threadIdx.x]; } } /* This is a simplified algorithm of find_interacting_blocks.cu from OpenMM. This is launched with a threadblock size of 32, (i.e. one warp). Each block proceeds as follows: 1. Loads its own row block (32 atoms). 2. Compare the row block against 32 other column blocks via bounding box tests. 3. Determine which blocks potentially interact using warp-level programming. 4. Loop over each interacting block j, and see which row block atoms may interact with j's bbox. 5. For atoms that interact, do a fine-grained comparison of each row block again against each col block atom. 6. Store the column atoms that interact into shared memory 7. Store the leftover trim into its own buffer. */ template <typename RealType, bool UPPER_TRIAG> void __global__ k_find_blocks_with_ixns( const int NC, // Number of atoms in columns const int NR, // Number of atoms in rows const double *__restrict__ col_bb_ctr, // [NC * 3] column block centers const double *__restrict__ col_bb_ext, // [NC * 3] column block extants const double *__restrict__ row_bb_ctr, // [NR * 3] row block centers const double *__restrict__ row_bb_ext, // [NR * 3] row block extants const double *__restrict__ col_coords, //TBD make float32 version const double *__restrict__ row_coords, //TBD make float32 version const double *__restrict__ box, unsigned int *__restrict__ interactionCount, // number of tiles that have interactions int *__restrict__ interactingTiles, // the row block idx of the tile that is interacting unsigned int *__restrict__ interactingAtoms, // the col block of the atoms that are interacting unsigned int *__restrict__ trim_atoms, // the left-over trims that will later be compacted const double cutoff) { const int indexInWarp = threadIdx.x % WARPSIZE; const int warpMask = (1 << indexInWarp) - 1; __shared__ int ixn_j_buffer [64]; // we can probably get away with using only 32 if we do some fancier remainder tricks, but this isn't a huge save // initialize ixn_j_buffer[threadIdx.x] = NC; ixn_j_buffer[WARPSIZE + threadIdx.x] = NC; __shared__ volatile int sync_start[1]; const int row_block_idx = blockIdx.x; // Retrieve the center coords of row's box and outer limits of row box. RealType row_bb_ctr_x = row_bb_ctr[row_block_idx * 3 + 0]; RealType row_bb_ctr_y = row_bb_ctr[row_block_idx * 3 + 1]; RealType row_bb_ctr_z = row_bb_ctr[row_block_idx * 3 + 2]; RealType row_bb_ext_x = row_bb_ext[row_block_idx * 3 + 0]; RealType row_bb_ext_y = row_bb_ext[row_block_idx * 3 + 1]; RealType row_bb_ext_z = row_bb_ext[row_block_idx * 3 + 2]; int neighborsInBuffer = 0; const unsigned int atom_i_idx = blockIdx.x * blockDim.x + threadIdx.x; RealType pos_i_x = atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 0] : 0; RealType pos_i_y = atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 1] : 0; RealType pos_i_z = atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 2] : 0; const int NUM_COL_BLOCKS = (NC + TILESIZE - 1) / TILESIZE; RealType bx = box[0 * 3 + 0]; RealType by = box[1 * 3 + 1]; RealType bz = box[2 * 3 + 2]; RealType inv_bx = 1 / bx; RealType inv_by = 1 / by; RealType inv_bz = 1 / bz; RealType non_periodic_dist_i = 0; RealType non_periodic_dist_j = 0; // Determine if the row block can be translated into a periodic box // to optimize distance calculations // https://github.com/proteneer/timemachine/issues/320 const bool single_periodic_box = (0.5f * bx - row_bb_ext_x >= cutoff && 0.5f * by - row_bb_ext_y >= cutoff && 0.5f * bz - row_bb_ext_z >= cutoff); if (single_periodic_box) { pos_i_x -= bx * nearbyint((pos_i_x - row_bb_ctr_x) * inv_bx); pos_i_y -= by * nearbyint((pos_i_y - row_bb_ctr_y) * inv_by); pos_i_z -= bz * nearbyint((pos_i_z - row_bb_ctr_z) * inv_bz); non_periodic_dist_i = static_cast<RealType>(0.5) * (pos_i_x * pos_i_x + pos_i_y * pos_i_y + pos_i_z * pos_i_z); } const RealType cutoff_squared = static_cast<RealType>(cutoff) * static_cast<RealType>(cutoff); int col_block_base = blockIdx.y * TILESIZE; int col_block_idx = col_block_base + indexInWarp; bool include_col_block = (col_block_idx < NUM_COL_BLOCKS) && (!UPPER_TRIAG || col_block_idx >= row_block_idx); if (include_col_block) { // Compute center of column box and extent coords. RealType col_bb_ctr_x = col_bb_ctr[col_block_idx * 3 + 0]; RealType col_bb_ctr_y = col_bb_ctr[col_block_idx * 3 + 1]; RealType col_bb_ctr_z = col_bb_ctr[col_block_idx * 3 + 2]; RealType col_bb_ext_x = col_bb_ext[col_block_idx * 3 + 0]; RealType col_bb_ext_y = col_bb_ext[col_block_idx * 3 + 1]; RealType col_bb_ext_z = col_bb_ext[col_block_idx * 3 + 2]; // Find delta between boxes RealType box_box_dx = row_bb_ctr_x - col_bb_ctr_x; RealType box_box_dy = row_bb_ctr_y - col_bb_ctr_y; RealType box_box_dz = row_bb_ctr_z - col_bb_ctr_z; // Recenter delta box box_box_dx -= bx * nearbyint(box_box_dx * inv_bx); box_box_dy -= by * nearbyint(box_box_dy * inv_by); box_box_dz -= bz * nearbyint(box_box_dz * inv_bz); // If boxes overlap, treat distance as 0 box_box_dx = max(static_cast<RealType>(0.0), fabs(box_box_dx) - row_bb_ext_x - col_bb_ext_x); box_box_dy = max(static_cast<RealType>(0.0), fabs(box_box_dy) - row_bb_ext_y - col_bb_ext_y); box_box_dz = max(static_cast<RealType>(0.0), fabs(box_box_dz) - row_bb_ext_z - col_bb_ext_z); // Check if the deltas between boxes are within cutoff include_col_block &= (box_box_dx * box_box_dx + box_box_dy * box_box_dy + box_box_dz * box_box_dz) < (cutoff_squared); } // __ballot returns bit flags to indicate which thread in the warp identified a column block within the cutoff. unsigned includeBlockFlags = __ballot_sync(FULL_MASK, include_col_block); // Loop over the col blocks we identified as potentially containing neighbors. while (includeBlockFlags != 0) { // (ytz): CUDA ffs returns an inclusive [0,32] such that: // ffs(0) == 0 // ffs(2^0=1) == 1 // ffs(2^1=2) == 2 // ffs(2^2=4) == 3 // ffs(2^3=8) == 4 // ffs(2^31) == 32 int offset = __ffs(includeBlockFlags) - 1; includeBlockFlags &= includeBlockFlags - 1; int col_block = col_block_base + offset; int atom_j_idx = col_block * WARPSIZE + threadIdx.x; // each thread loads a different atom // Compute overlap between column bounding box and row atom RealType col_bb_ctr_x = col_bb_ctr[col_block * 3 + 0]; RealType col_bb_ctr_y = col_bb_ctr[col_block * 3 + 1]; RealType col_bb_ctr_z = col_bb_ctr[col_block * 3 + 2]; RealType col_bb_ext_x = col_bb_ext[col_block * 3 + 0]; RealType col_bb_ext_y = col_bb_ext[col_block * 3 + 1]; RealType col_bb_ext_z = col_bb_ext[col_block * 3 + 2]; // Don't use pos_i_* here, as might have been shifted to center of row box RealType atom_box_dx = (atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 0] : 0) - col_bb_ctr_x; RealType atom_box_dy = (atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 1] : 0) - col_bb_ctr_y; RealType atom_box_dz = (atom_i_idx < NR ? row_coords[atom_i_idx * 3 + 2] : 0) - col_bb_ctr_z; atom_box_dx -= bx * nearbyint(atom_box_dx * inv_bx); atom_box_dy -= by * nearbyint(atom_box_dy * inv_by); atom_box_dz -= bz * nearbyint(atom_box_dz * inv_bz); atom_box_dx = max(static_cast<RealType>(0.0), fabs(atom_box_dx) - col_bb_ext_x); atom_box_dy = max(static_cast<RealType>(0.0), fabs(atom_box_dy) - col_bb_ext_y); atom_box_dz = max(static_cast<RealType>(0.0), fabs(atom_box_dz) - col_bb_ext_z); bool check_column_atoms = atom_i_idx < NR && atom_box_dx * atom_box_dx + atom_box_dy * atom_box_dy + atom_box_dz * atom_box_dz < cutoff_squared; // Find rows where the row atom and column boxes are within cutoff unsigned atomFlags = __ballot_sync(FULL_MASK, check_column_atoms); bool interacts = false; // threadIdx // 0 1 2 3 4 5 // 0 0 0 0 0 0 0 // a 1 0 1 0 1 1 0 row_atom // t 0 0 0 0 0 0 0 // o 0 0 0 0 0 0 0 // m 0 0 0 0 0 0 0 // f 1 1 0 0 0 1 1 row_atom // l 0 0 0 0 0 0 0 // a 1 0 1 0 0 1 0 row_atom // g 1 1 1 0 0 0 1 row_atom // s 0 0 0 0 0 0 0 // 0 0 0 0 0 0 0 RealType pos_j_x = atom_j_idx < NC ? col_coords[atom_j_idx * 3 + 0] : 0; RealType pos_j_y = atom_j_idx < NC ? col_coords[atom_j_idx * 3 + 1] : 0; RealType pos_j_z = atom_j_idx < NC ? col_coords[atom_j_idx * 3 + 2] : 0; if (single_periodic_box) { // Recenter using **row** box center pos_j_x -= bx * nearbyint((pos_j_x - row_bb_ctr_x) * inv_bx); pos_j_y -= by * nearbyint((pos_j_y - row_bb_ctr_y) * inv_by); pos_j_z -= bz * nearbyint((pos_j_z - row_bb_ctr_z) * inv_bz); non_periodic_dist_j = static_cast<RealType>(0.5) * (pos_j_x * pos_j_x + pos_j_y * pos_j_y + pos_j_z * pos_j_z); } unsigned includeAtomFlags = 0; while (atomFlags) { const int row_atom = __ffs(atomFlags) - 1; atomFlags &= atomFlags - 1; RealType row_i_x = __shfl_sync(FULL_MASK, pos_i_x, row_atom); RealType row_i_y = __shfl_sync(FULL_MASK, pos_i_y, row_atom); RealType row_i_z = __shfl_sync(FULL_MASK, pos_i_z, row_atom); if (!single_periodic_box) { RealType atom_atom_dx = row_i_x - pos_j_x; RealType atom_atom_dy = row_i_y - pos_j_y; RealType atom_atom_dz = row_i_z - pos_j_z; atom_atom_dx -= bx * nearbyint(atom_atom_dx * inv_bx); atom_atom_dy -= by * nearbyint(atom_atom_dy * inv_by); atom_atom_dz -= bz * nearbyint(atom_atom_dz * inv_bz); interacts |= (atom_atom_dx * atom_atom_dx + atom_atom_dy * atom_atom_dy + atom_atom_dz * atom_atom_dz) < cutoff_squared; } else { // All threads in warp need single_periodic_box to be true for this not to hang RealType corrected_i = __shfl_sync(FULL_MASK, non_periodic_dist_i, row_atom); // Below is half the magnitude of the distance equation, expanded. RealType half_dist = corrected_i + non_periodic_dist_j - row_i_x * pos_j_x - row_i_y * pos_j_y - row_i_z * pos_j_z; interacts |= half_dist < (static_cast<RealType>(0.5) * cutoff_squared); } includeAtomFlags = __ballot_sync(FULL_MASK, interacts); // If all threads in the warp have found interactions, can terminate early if (includeAtomFlags == FULL_MASK) { break; } } // Add any interacting atoms to the buffer. if (interacts) { int index = neighborsInBuffer + __popc(includeAtomFlags & warpMask); // where to store this in shared memory // Indices can be at most 64 ixn_j_buffer[index] = atom_j_idx; } neighborsInBuffer += __popc(includeAtomFlags); if (neighborsInBuffer > WARPSIZE) { int tilesToStore = 1; if (indexInWarp == 0) { sync_start[0] = atomicAdd(interactionCount, tilesToStore); } __syncwarp(); interactingTiles[sync_start[0]] = row_block_idx; interactingAtoms[sync_start[0] * WARPSIZE + threadIdx.x] = ixn_j_buffer[threadIdx.x]; ixn_j_buffer[threadIdx.x] = ixn_j_buffer[WARPSIZE + threadIdx.x]; ixn_j_buffer[WARPSIZE + threadIdx.x] = NC; // reset old values neighborsInBuffer -= WARPSIZE; } } // store trim const int Y = gridDim.y; trim_atoms[blockIdx.x * Y * WARPSIZE + blockIdx.y * WARPSIZE + threadIdx.x] = ixn_j_buffer[threadIdx.x]; }
the_stack
namespace reduction { // Utility functions template <typename _dim3> __device__ __forceinline__ nvfuser_index_t size(const _dim3& d) { return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z; } #define isize(d) ((d).x * (d).y * (d).z) template <typename _dim3pos, typename _dim3dim> __device__ __forceinline__ nvfuser_index_t offset(const _dim3pos& pos, const _dim3dim& dim) { return (nvfuser_index_t)pos.x + (nvfuser_index_t)pos.y * (nvfuser_index_t)dim.x + (nvfuser_index_t)pos.z * (nvfuser_index_t)dim.x * (nvfuser_index_t)dim.y; } #define ioffset(pos, dim) \ ((pos).x + (pos).y * (dim).x + (pos).z * (dim).x * (dim).y) // Returns dim3 of each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) { return dim3{ X_BLOCK ? (unsigned)grid_dim.x : 1U, Y_BLOCK ? (unsigned)grid_dim.y : 1U, Z_BLOCK ? (unsigned)grid_dim.z : 1U}; } // Returns the number of blocks in each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ nvfuser_index_t size_of_reduction_segment(const _dim3& grid_dim) { return size( dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim)); } // Returns the total number of reduction segments. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __device__ nvfuser_index_t number_of_reduction_segments(const _dim3& grid_dim) { return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) * (Z_BLOCK ? 1 : grid_dim.z); } // Returns the 1-D index of the segment of thread block of block_idx. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ nvfuser_index_t index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { nvfuser_index_t seg_idx = 0; if (!Z_BLOCK) seg_idx += block_idx.z; if (!Y_BLOCK) seg_idx = seg_idx * grid_dim.y + block_idx.y; if (!X_BLOCK) seg_idx = seg_idx * grid_dim.x + block_idx.x; return seg_idx; } // Returns the offset of thread block in its reduction segment. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __device__ nvfuser_index_t offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { nvfuser_index_t offset = 0; if (Z_BLOCK) offset = offset * grid_dim.z + block_idx.z; if (Y_BLOCK) offset = offset * grid_dim.y + block_idx.y; if (X_BLOCK) offset = offset * grid_dim.x + block_idx.x; return offset; } // Returns dim3 of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) { return dim3{ X_THREAD ? (unsigned)block_dim.x : 1U, Y_THREAD ? (unsigned)block_dim.y : 1U, Z_THREAD ? (unsigned)block_dim.z : 1U}; } // Returns the number of threads of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __device__ int size_of_reduction_block(const _dim3& block_dim) { auto tmp_dim = dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim); return isize(tmp_dim); } // Returns the linear offset of a thread in a reduction block. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3ti, typename _dim3bd> __device__ int offset_in_reduction_block( const _dim3ti& thread_idx, const _dim3bd& block_dim) { int offset = 0; if (Z_THREAD) offset += thread_idx.z; if (Y_THREAD) offset = offset * block_dim.y + thread_idx.y; if (X_THREAD) offset = offset * block_dim.x + thread_idx.x; return offset; } // Reduces all the reduction blocks in each reduction segment. // // This is only used by one thread block per reduction segment. The input // reduction blocks of the segment are stored in an intermediate buffer pointed // by parameter in. Template parameters X/Y/Z_THREAD denote how the reduction // block is formed. // // The size of a reduction block is by definition smaller or equal to the size // of a thread block. We use the remaining threads to parallelize reductions // across reduction blocks. For example, when X/Y/Z_THREAD = {true, false, // false}, we use blockDim.y*blockDim.z threads for each output value. This is // done first by loading the input values in parallel and then by reducing // across threads of dimensions whose XYZ_THREAD are false. // // Note that what is done here after the loading from global memory is similar // to what the existing blockReduce function does. The main difference is that // the logical block to reduce is a 2D domain where the leading dimension is the // size of a reduction block and the second dimension is the remaining factor in // each thread block. For example, when X/Y/Z_THREAD = {false, true, false}, the // threads are arranged as (blockDim.y, blockDim.x*blockDim.z). We do not reduce // along the first dimension but only the second dimension. So, it is possible // to reuse the existing blockReduce with dim3{blockDim.y, // blockDim.x*blockDim.z} instead of blockDim and with X_THREAD and Y_THREAD // being false and true, respectively. Also, it still need to shuffle the final // output values to their actual corresponding threads. In the case of when // X/Y/Z_THREAD = {false, true, false}, after the intra-block reduction, the // final results will still be held by the first blockDim.y threads, which need // to be transferred to threads at threadIdx.x == 0 and threadIdx.z == 0. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ void gridReduceLastBlock( T& out, const T* in, const nvfuser_index_t in_size, Func reduction_op, T* shared_buf, bool write_pred, T init_val) { const int tid = ioffset(threadIdx, blockDim); const int block_size = isize(blockDim); const int rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); T inp = init_val; if (tid < in_size) { inp = in[tid]; } for (nvfuser_index_t i = tid + block_size; i < in_size; i += block_size) { reduction_op(inp, in[i]); } const auto should_write = (X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0); auto rem_size = block_size / rblock_size; if (rem_size > 1) { const int rblock_offset = tid % rblock_size; const int rblock_idx = tid / rblock_size; T inp_tmp = init_val; blockReduce<false, true, false>( inp_tmp, inp, reduction_op, dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0}, dim3{(unsigned)rblock_size, (unsigned)rem_size}, shared_buf, true, init_val); block_sync::sync(); inp = inp_tmp; if (tid < rblock_size) { shared_buf[tid] = inp; } block_sync::sync(); if (should_write) { inp = shared_buf[offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim)]; } } if (should_write && write_pred) { reduction_op(out, inp); } } // Reduces per-thread values across thread blocks. // // Function parameters: // - out: Per-thread output location // - inp_val: Per-thread input value // - reduction_op: Scalar reduction function // - work_buf: Temporary buffer for cross-block reductions // - sync_flags: A vector of integers for synchronizations // - shared_buf: Shared memory buffer for intra-block reduction // // Return true when the thread block has the valid result. // // Template parameters: // - X/Y/Z_BLOCK: When true, reduces across thread blocks along the X/Y/Z // dimensions // - X/Y/Z_THREAD: When true, all threads along the X/Y/Z dimensions participate // in the cross-block reduction. Otherwise, only threads at offset 0 do. // - T: Scalar data type of input/output data // - Func: Type of scalara reduction function // // Template parameters X/Y/Z_BLOCK define a group of thread blocks that are // reduced together. We call it a reduction segment. Some examples are: // // Case 1: X/Y/Z_BLOCK == true/true/true -> There is only one segment, which // includes all thread blocks. It is effecively the same as the grid. // // Case 2: X/Y/Z_BLOCK == false/false/false -> Each thread block comprises an // individual segment by itself. // // Case 3: X/Y/Z_BLOCK == true/false/false -> Each segment contains thread // blocks that have the same blockDim.x. There will be blockDim.y*blockDim.z // such segments. // // X/Y/Z_THREAD defines a sub region of a thread block that should be reduced // with the sub regions of other thread blocks. We call it a reduction block. // E.g., // // Case 1: X/Y/Z_THREAD == false/false/false -> Only thread 0 participates in // the cross-block reductions. The reduction block is 1x1x1 with thread 0. // // Case 2: X/Y/Z_THREAD == true/true/true-> All threads in a thread block // participate in the cross-block reductions. The reduction block in this case // is equivalent to the thread block. // // After the function completes, only one thread block per reduction segment // gets valid reduction results. There is no guarantee which particular block // gets the final results. // template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename Func> __device__ bool gridReduce( T& out, const T& inp_val, Func reduction_op, volatile T* work_buf, Tensor<int64_t, 1> sync_flags, T* shared_buf, bool read_pred, bool write_pred, T init_val) { // Number of values to reduce in the grid dimensions const auto seg_size = size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim); // Index of the reduction we're performing out of the seg_size const auto seg_idx = index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim); // Number of threads we can use in final reduction, Seems to assume all // threads in the block participate const auto rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); // advance to the offset for this segment // index of reduction * size of the reduction * size of threads work_buf += seg_idx * seg_size * rblock_size; if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0)) { auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>( blockIdx, gridDim); auto thread_offset = offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim); auto work_buf_offset = rblock_size * rblock_offset + thread_offset; if (read_pred) { work_buf[work_buf_offset] = inp_val; } else { work_buf[work_buf_offset] = init_val; } } block_sync::sync(); __shared__ bool last_block; if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __threadfence(); // printf("%ld\n", sync_flags[seg_idx]); auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1); last_block = old + 1 == seg_size; // printf("Last_block = %d + 1 == %d\n", (int)old, (int)seg_size); } block_sync::sync(); if (last_block) { // printf("Last block %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); // final reduction gridReduceLastBlock<X_THREAD, Y_THREAD, Z_THREAD>( out, (T*)work_buf, seg_size * rblock_size, reduction_op, shared_buf, write_pred, init_val); return true; } else { // printf("Not last block %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z); return false; } } } // namespace reduction #undef isize #undef ioffset
the_stack
#include <torch/script.h> #include <c10/cuda/CUDACachingAllocator.h> #include <cuda.h> #include <cuda_runtime.h> #include <solr/solr.cuh> #include <cnpy/cnpy.h> extern inline void cudaPrintError(const char* file, const int line); #define CUDA_PRINT_ERROR() cudaPrintError(__FILE__, __LINE__) //#define DEBUG #ifdef DEBUG # define STRINGIFY2(X) #X # define STRINGIFY(X) STRINGIFY2(X) # define TIMER solr::PerfTimer timer = solr::PerfTimer() # define TIMER_CHECK(x) timer.check(x) # define DEBUG_PRINT(x) std::cout << STRINGIFY(x) ":" << x << std::endl #else # define TIMER # define TIMER_CHECK(x) # define DEBUG_PRINT(x) #endif #define PROBE_CHECK(x) timer.check(x) namespace F = torch::nn::functional; namespace I = torch::indexing; SDF::SDF(void) { } SDF::~SDF(void) { cudaFree(pyramid_cf); cudaFree(m_pyramid); cudaFree(m_res); } void SDF::loadWeights(std::string filename) { cnpy::npz_t F = cnpy::npz_load(filename); auto h_opt = torch::TensorOptions().dtype(torch::kHalf); cnpy::NpyArray w0_npz = F["w0"]; long w0_shape[3] = { static_cast<long>(w0_npz.shape[0]), static_cast<long>(w0_npz.shape[1]), static_cast<long>(w0_npz.shape[2]) }; torch::Tensor _w0 = torch::from_blob( w0_npz.data<short>(), w0_shape, h_opt); for (long i=0; i<w0_shape[0]; ++i) { w0.push_back(_w0.index({i})); w0[i] = w0[i].to(torch::kFloat); w0[i] = w0[i].to(torch::kCUDA).transpose(0,1); } cnpy::NpyArray w1_npz = F["w1"]; long w1_shape[3] = { static_cast<long>(w1_npz.shape[0]), static_cast<long>(w1_npz.shape[1]), static_cast<long>(w1_npz.shape[2]) }; torch::Tensor _w1 = torch::from_blob( w1_npz.data<short>(), w1_shape, h_opt); for (long i=0; i<w1_shape[0]; ++i) { w1.push_back(_w1.index({i})); w1[i] = w1[i].to(torch::kFloat); w1[i] = w1[i].to(torch::kCUDA).transpose(0,1); } cnpy::NpyArray b0_npz = F["b0"]; long b0_shape[2] = { static_cast<long>(b0_npz.shape[0]), static_cast<long>(b0_npz.shape[1]) }; torch::Tensor _b0 = torch::from_blob( b0_npz.data<short>(), b0_shape, h_opt); for (long i=0; i<b0_shape[0]; ++i) { b0.push_back(_b0.index({i})); b0[i] = b0[i].to(torch::kFloat); b0[i] = b0[i].to(torch::kCUDA); } cnpy::NpyArray b1_npz = F["b1"]; long b1_shape[2] = { static_cast<long>(b1_npz.shape[0]), static_cast<long>(b1_npz.shape[1]) }; torch::Tensor _b1 = torch::from_blob( b1_npz.data<short>(), b1_shape, h_opt); for (long i=0; i<b1_shape[0]; ++i) { b1.push_back(_b1.index({i})); b1[i] = b1[i].to(torch::kFloat); b1[i] = b1[i].to(torch::kCUDA); } cnpy::NpyArray cc_npz = F["cc"]; auto b_opt = torch::TensorOptions().dtype(torch::kByte); long cc_shape[2] = { static_cast<long>(cc_npz.shape[0]), static_cast<long>(cc_npz.shape[1]) }; cc = torch::from_blob(cc_npz.data<char>(), cc_shape, b_opt); cc = cc.to(torch::kInt); cc = cc.to(torch::kCUDA); cnpy::NpyArray cf_npz = F["cf"]; long cf_shape[2] = { static_cast<long>(cf_npz.shape[0]), static_cast<long>(cf_npz.shape[1]) }; cf = torch::from_blob(cf_npz.data<char>(), cf_shape, h_opt); cf = cf.to(torch::kFloat); cf = cf.to(torch::kCUDA); cnpy::NpyArray pyramid_npz = F["pyramid"]; for (long i=0; i<w0_shape[0]; ++i) { pyramid_cf_cpu.push_back(pyramid_npz.data<long>()[i]); } } void SDF::initTrinkets(uint num_pts, uint num_levels, uint* pyramid, ushort4* points) { m_points = points; cudaMalloc<solr::Trinket>(&trinkets, num_pts * sizeof(solr::Trinket)); size_t sz = (num_levels+1) * sizeof(uint); cudaMallocManaged((void **)&m_pyramid, sz); for (int i=0; i<num_levels+1; ++i) { if (i == 0) { m_pyramid[i] = pyramid[i]; } else { m_pyramid[i] = pyramid[i] + m_pyramid[i-1]; } } size_t sz_res = (num_levels-1) * sizeof(uint); cudaMallocManaged((void **)&m_res, sz_res); for (int i=0; i<num_levels-1; ++i) { m_res[i] = 4 * std::pow(2, i); } uint offset_cf = 0; for (int i=2; i<num_levels+1; ++i) { uint offset = 0; uint parent_offset = 0; int parent_i = std::max(i-1, 2); for (int j=0; j<parent_i; j++) { parent_offset = m_pyramid[j]; } for (int j=0; j<i; j++) { offset = m_pyramid[j]; } # ifdef VERBOSE printf("offset on cf level %d : %d\n", i-2, offset_cf); printf("# elem in cf level %d : %d\n", i-2, pyramid_cf_cpu[i-2]); printf("offset on parent nuggets array %d: %d\n", parent_i, parent_offset); printf("# elem in parent nuggets array %d: %d\n", parent_i, pyramid[parent_i]); printf("offset on nuggets array %d : %d\n", i, offset); printf("# elem in nuggets level %d : %d\n", i, pyramid[i]); printf("\n"); # endif const int threads = 1024; const int blocks = (pyramid[i] + threads - 1) / threads; // generalize this to multiple levels solr::index_trinket_kernel<<<blocks, threads>>>( points, cc.data_ptr<int>(), cf.data_ptr<float>(), trinkets, pyramid_cf_cpu[i-2], offset_cf, pyramid[i], offset, pyramid[parent_i], parent_offset, i); offset_cf += pyramid_cf_cpu[i-2]; # ifdef DEBUG const int _tnum = 50; solr::Trinket* hosttrinkets = new solr::Trinket[_tnum]; cudaMemcpy(hosttrinkets, trinkets+offset, _tnum*sizeof(solr::Trinket), cudaMemcpyDeviceToHost); for (int i=0; i<_tnum; ++i) { solr::Trinket _t = hosttrinkets[i]; printf("%d: %d %d %d %d %d %d %d %d %d\n", i+offset, _t.v[0], _t.v[1], _t.v[2], _t.v[3], _t.v[4], _t.v[5], _t.v[6], _t.v[7], _t.parent); } # endif } } torch::Tensor SDF::getNormal( const torch::Tensor & x, const torch::Tensor & pidx, const torch::Tensor & hit, const int lod) { int nr = x.size(0); int nf = cf.size(0); int nl = w0.size(); int fdim = cf.size(1); torch::Tensor active_x = x.index({ hit }); torch::Tensor active_pidx = pidx.index({ hit }); torch::Tensor active_idxes = torch::nonzero(hit).to(torch::kInt); int active_n = active_idxes.size(0); const int threads = 128; const int blocks = (active_n + threads - 1) / threads; auto f_opt = torch::TensorOptions().dtype(torch::kF32).device(x.device()); torch::Tensor normal = torch::ones({ nr, 3 }, f_opt); std::vector<torch::Tensor> eps = { torch::tensor({0.001f, 0.0f, 0.0f}, f_opt), torch::tensor({0.0f, 0.001f, 0.0f}, f_opt), torch::tensor({0.0f, 0.0f, 0.001f}, f_opt) }; #pragma unroll for (int i=0; i<3; ++i) { torch::Tensor x_fwd = x + eps[i]; torch::Tensor x_bck = x - eps[i]; torch::Tensor xs_fwd = torch::zeros({ active_n, fdim+3 }, f_opt); torch::Tensor xs_bck = torch::zeros({ active_n, fdim+3 }, f_opt); sparse_grid_sample_kernel<<<blocks, threads>>>( x_fwd.data_ptr<float>(), pidx.data_ptr<int>(), active_idxes.data_ptr<int>(), trinkets, cf.data_ptr<float>(), m_pyramid, m_res, xs_fwd.data_ptr<float>(), active_n, nf, fdim, nl, lod, // debug stuff cc.data_ptr<int>() ); sparse_grid_sample_kernel<<<blocks, threads>>>( x_bck.data_ptr<float>(), pidx.data_ptr<int>(), active_idxes.data_ptr<int>(), trinkets, cf.data_ptr<float>(), m_pyramid, m_res, xs_bck.data_ptr<float>(), active_n, nf, fdim, nl, lod, // debug stuff cc.data_ptr<int>() ); auto xsw0_fwd = torch::relu(torch::addmm(b0[lod], xs_fwd, w0[lod])); auto xsw0_bck = torch::relu(torch::addmm(b0[lod], xs_bck, w0[lod])); auto d_fwd = torch::addmm(b1[lod], xsw0_fwd, w1[lod]); auto d_bck = torch::addmm(b1[lod], xsw0_bck, w1[lod]); auto d = d_fwd - d_bck; normal.index_put_({ hit, i }, d.index({ I::Slice(), 0 })); } solr::normalize_kernel<<<blocks, threads>>>( active_idxes.data_ptr<int>(), normal.data_ptr<float>(), active_n); return normal; } std::vector<torch::Tensor> SDF::sphereTrace( const torch::Tensor & ray_o, const torch::Tensor & ray_d, const torch::Tensor & nuggets, const torch::Tensor & points, const torch::Tensor & info, const int lod) { TIMER; // Convert to solr::Nuggets solr::Nugget* nuggets_ptr = reinterpret_cast<solr::Nugget*>(nuggets.contiguous().data_ptr<int>()); int nn = nuggets.size(0); // Rendering Parameters const int MARCH_ITER = 50; const float MIN_DIS = 0.0003; const float far = 5.0; // Tensor sizes int nr = ray_o.size(0); // # rays int nf = cf.size(0); // # feats int nl = w0.size(); // # lods int fdim = cf.size(1); // feat dim auto f_opt = torch::TensorOptions().dtype(torch::kF32).device(ray_o.device()); torch::Tensor x = ray_o.clone(); torch::Tensor t = torch::zeros({ nr, 1 }, f_opt); torch::Tensor d = torch::zeros({ nr, 1 }, f_opt); torch::Tensor dprev = torch::zeros({ nr, 1 }, f_opt); torch::Tensor normal = torch::ones({ nr, 3 }, f_opt); torch::Tensor ray_inv = 1.0 / ray_d; auto i_opt = torch::TensorOptions().dtype(torch::kInt32).device(ray_o.device()); // Tensor to store the hit-point index of each ray torch::Tensor pidx = torch::zeros({ nr, 1 }, i_opt) - 1; // Indices of beginnings of ray-nugget lists torch::Tensor info_idxes = torch::nonzero(info).index({I::Slice(), 0}).to(torch::kInt); // # ray-nugget hits int n_iidx = info_idxes.size(0); // Voxel size int voxel_res = pow(2, lod+2); float voxel_radius = (1.0 / voxel_res); // cond is the active rays // hit is the rays that have hit a surface auto b_opt = torch::TensorOptions().dtype(torch::kBool).device(ray_o.device()); torch::Tensor cond = torch::zeros({nr}, b_opt); // by default, no rays are active torch::Tensor hit = torch::zeros({nr}, b_opt); TIMER_CHECK(" init "); const int _aabb_threads = 256; const int _aabb_blocks = (n_iidx + _aabb_threads - 1) / _aabb_threads; solr::ray_aabb_kernel<<<_aabb_blocks, _aabb_threads>>>( ray_o.data_ptr<float>(), ray_d.data_ptr<float>(), ray_inv.data_ptr<float>(), ray_o.data_ptr<float>(), nuggets_ptr, points.data_ptr<short>(), info.data_ptr<int>(), info_idxes.data_ptr<int>(), voxel_radius, true, x.data_ptr<float>(), t.data_ptr<float>(), cond.data_ptr<bool>(), pidx.data_ptr<int>(), nn, n_iidx); TIMER_CHECK(" trace "); // # of voxel centers //int nvc = vc[lod].size(0); //return {x, t, cond, normal}; // uncomment to return voxels TIMER_CHECK(" post "); CUDA_PRINT_ERROR(); for (int i=0; i<MARCH_ITER; ++i) { // probably write a cuda kernel here... first a sum kernel, allocate, then populate? torch::Tensor active_idxes = torch::nonzero(cond).index({I::Slice(), 0}).to(torch::kInt); int n_active = active_idxes.size(0); // # active TIMER_CHECK(" get sizes"); if (n_active == 0) { DEBUG_PRINT(i); break; } TIMER_CHECK(" done? "); // Concat [x, f] torch::Tensor xs = torch::zeros({ n_active, fdim+3 }, f_opt); TIMER_CHECK(" allocate "); const int _sparse_threads = 128; const int _sparse_blocks = (n_active + _sparse_threads - 1) / _sparse_threads; sparse_grid_sample_kernel<<<_sparse_blocks, _sparse_threads>>>( x.data_ptr<float>(), pidx.data_ptr<int>(), active_idxes.data_ptr<int>(), trinkets, cf.data_ptr<float>(), m_pyramid, m_res, xs.data_ptr<float>(), n_active, nf, fdim, nl, lod, // debug stuff cc.data_ptr<int>() ); TIMER_CHECK(" sparse "); CUDA_PRINT_ERROR(); auto xsw0 = torch::relu(torch::addmm(b0[lod], xs, w0[lod])); auto _d = torch::addmm(b1[lod], xsw0, w1[lod]); TIMER_CHECK(" d "); int _step_threads = 128; int _step_blocks = (n_active + _step_threads - 1) / _step_threads; solr::step_kernel<<<_step_blocks, _step_threads>>>( ray_o.data_ptr<float>(), ray_d.data_ptr<float>(), active_idxes.data_ptr<int>(), _d.data_ptr<float>(), x.data_ptr<float>(), t.data_ptr<float>(), d.data_ptr<float>(), dprev.data_ptr<float>(), cond.data_ptr<bool>(), hit.data_ptr<bool>(), n_active); TIMER_CHECK(" step "); CUDA_PRINT_ERROR(); const int _sample_threads = 128; const int _sample_blocks = (n_iidx + _sample_threads - 1) / _sample_threads; solr::ray_aabb_kernel<<<_sample_blocks, _sample_threads>>>( ray_o.data_ptr<float>(), ray_d.data_ptr<float>(), ray_inv.data_ptr<float>(), x.data_ptr<float>(), nuggets_ptr, points.data_ptr<short>(), info.data_ptr<int>(), info_idxes.data_ptr<int>(), voxel_radius, false, x.data_ptr<float>(), t.data_ptr<float>(), cond.data_ptr<bool>(), pidx.data_ptr<int>(), nn, n_iidx); TIMER_CHECK(" sample "); CUDA_PRINT_ERROR(); } TIMER_CHECK(" st "); normal = getNormal(x, pidx, hit, lod); CUDA_PRINT_ERROR(); return {x, t, hit, normal}; }
the_stack
// Copyright (c) 2018 Changan Wang // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #if GOOGLE_CUDA == 1 #define EIGEN_USE_GPU #include "l_softmax_op.h" #include "tensorflow/core/util/cuda_kernel_helper.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_shape.h" using namespace tensorflow; #include <cstdint> #include <cmath> #include <cfloat> static __device__ int32_t cuda_factorial(int32_t n){ int32_t frac = 1; while(n-- > 0) frac *= (n+1); return frac; } // Define the CUDA kernel. template <typename T> __global__ void LargeMarginSoftmaxGradCudaKernel(CudaLaunchConfig config, const T * back_grads, const T * features, const T * weights, const float * cur_lambda, const int32_t * labels, const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions, const int32_t margin_order, const bool b_angular, float * feat_norm, float * weights_norm, float * cos_theta, float * theta_seg, T * grad_features, T * grad_weights) { for(int32_t index = 0;index < batch_size;++index){ T temp_sum{0}; const T *feat_along = features + index * num_dimensions; for(int32_t dim_ind = 0;dim_ind < num_dimensions;++dim_ind){ temp_sum += ldg(feat_along + dim_ind) * ldg(feat_along + dim_ind); } feat_norm[index] = std::pow(static_cast<float>(temp_sum), .5); } for(int32_t index = 0;index < output_dimensions;++index){ T temp_sum{0}; const T *weights_along = weights + index * num_dimensions; for(int32_t dim_ind = 0;dim_ind < num_dimensions;++dim_ind){ temp_sum += ldg(weights_along + dim_ind) * ldg(weights_along + dim_ind); } weights_norm[index] = b_angular ? 1. : std::pow(static_cast<float>(temp_sum), .5); } for(int32_t index = 0;index < margin_order;++index){ theta_seg[index] = std::cos(_PI * index / margin_order); } theta_seg[margin_order] = -1.; CUDA_1D_KERNEL_LOOP(worker_index, config.virtual_thread_count) { const int32_t output_row = worker_index / output_dimensions; const int32_t output_col = worker_index % output_dimensions; float feat_norm_value = feat_norm[output_row]; float *p_cos_theta = cos_theta + output_row * output_dimensions; const T *feat_start = features + output_row * num_dimensions; // get cos_theta for features and all weights rows for(int32_t col_ind = 0;col_ind < output_dimensions;++col_ind){ const T *weights_start = weights + col_ind * num_dimensions; T inner_dot{0}; for(int32_t index = 0;index < num_dimensions;++index){ inner_dot += ldg(feat_start + index) * ldg(weights_start + index); } p_cos_theta[col_ind] = static_cast<float>(inner_dot) / (feat_norm[output_row] * weights_norm[col_ind]); } int32_t k_block = 0; for(int32_t index = 1;index < margin_order+1;++index){ if(p_cos_theta[output_col] > theta_seg[index]){ k_block = index - 1; break; } } float single_cos = p_cos_theta[output_col]; float sin2_theta = 1. - single_cos * single_cos; float cos_n_theta = 0.; // calculate cons_n_theta if(ldg(labels+output_row) == output_col){ cos_n_theta = std::pow(single_cos, margin_order*1.); for(int32_t m = 1; m <= margin_order / 2; ++m){ float binomial = cuda_factorial(margin_order) / (cuda_factorial(2 * m) * cuda_factorial(margin_order - 2 * m) * 1.); cos_n_theta += std::pow(-1, m) * std::pow(sin2_theta, m * 1.) * std::pow(single_cos, margin_order - 2. * m) * binomial; } cos_n_theta = std::pow(-1., k_block) * cos_n_theta - 2 * k_block; } // grad of cos_n_theta by cos_theta float grad_of_cos_theta = margin_order * std::pow(single_cos, margin_order - 1.); for(int32_t m = 1; m <= margin_order / 2; ++m){ float binomial = cuda_factorial(margin_order) / (cuda_factorial(2 * m) * cuda_factorial(margin_order - 2 * m) * 1.); grad_of_cos_theta += std::pow(-1, m) * std::pow(sin2_theta, m - 1.) * std::pow(single_cos, margin_order - 2 * m - 1.) * (-2 * m + margin_order - margin_order * std::pow(single_cos, 2.)) * binomial; } grad_of_cos_theta = grad_of_cos_theta * std::pow(-1., k_block); // backprop const float input_grad = ldg(back_grads + worker_index); const T *weights_start = weights + output_col * num_dimensions; T *grad_feat_start = grad_features + output_row * num_dimensions; T *grad_weights_start = grad_weights + output_col * num_dimensions; // softmax for(int32_t dim_ind = 0; dim_ind < num_dimensions; ++dim_ind){ atomicAdd(grad_weights_start + dim_ind, input_grad * ldg(cur_lambda)/(ldg(cur_lambda) + 1.) * ldg(feat_start+dim_ind)); atomicAdd(grad_feat_start + dim_ind, input_grad * ldg(cur_lambda)/(ldg(cur_lambda) + 1.) * ldg(weights_start+dim_ind)); } // large margin softmax if(ldg(labels + output_row) == output_col){ for(int32_t dim_ind = 0; dim_ind < num_dimensions; ++dim_ind){ float wx_norm = feat_norm_value * weights_norm[output_col]; float grad_cos_n_theta_by_w = b_angular ? grad_of_cos_theta * feat_start[dim_ind] / feat_norm_value : grad_of_cos_theta / (feat_norm_value * weights_norm[output_col] * weights_norm[output_col]) * ( (ldg(feat_start+dim_ind) * weights_norm[output_col]) - (wx_norm * single_cos * ldg(weights_start+dim_ind) / weights_norm[output_col]) ); if(b_angular){ atomicAdd(grad_weights_start + dim_ind, input_grad * feat_norm_value/(ldg(cur_lambda) + 1.) *grad_cos_n_theta_by_w ); }else{ atomicAdd(grad_weights_start + dim_ind, input_grad * feat_norm_value/(ldg(cur_lambda) + 1.) * ( cos_n_theta * ldg(weights_start+dim_ind) / weights_norm[output_col] + grad_cos_n_theta_by_w * weights_norm[output_col] ) ); } float grad_cos_n_theta_by_x = grad_of_cos_theta / (weights_norm[output_col] * feat_norm_value * feat_norm_value) * ( (ldg(weights_start+dim_ind) * feat_norm_value) - (wx_norm * single_cos * ldg(feat_start+dim_ind) / feat_norm_value) ); atomicAdd(grad_feat_start + dim_ind, input_grad * weights_norm[output_col]/(ldg(cur_lambda) + 1.) * ( cos_n_theta * ldg(feat_start+dim_ind) / feat_norm_value + grad_cos_n_theta_by_x * feat_norm_value ) ); } } } } template <typename T> void LargeMarginSoftmaxGradFunctor<GPUDevice, T>::operator()(OpKernelContext* context, const GPUDevice& d, typename TTypes<T>::ConstFlat back_grads, typename TTypes<T>::ConstFlat features, typename TTypes<T>::ConstFlat weights, typename TTypes<float>::ConstFlat cur_lambda, typename TTypes<int32_t>::ConstFlat labels, const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions, const int32_t margin_order, const bool b_angular, typename TTypes<float>::Flat feat_norm, typename TTypes<float>::Flat weights_norm, typename TTypes<float>::Flat cos_theta, typename TTypes<float>::Flat theta_seg, typename TTypes<T>::Flat grad_features, typename TTypes<T>::Flat grad_weights) { CudaLaunchConfig config = GetCudaLaunchConfig(batch_size * num_dimensions, d); SetZero <<<config.block_count, config.thread_per_block, 0, d.stream()>>> (batch_size * num_dimensions, grad_features.data()); config = GetCudaLaunchConfig(output_dimensions * num_dimensions, d); SetZero <<<config.block_count, config.thread_per_block, 0, d.stream()>>> (output_dimensions * num_dimensions, grad_weights.data()); config = GetCudaLaunchConfig(batch_size * output_dimensions, d); LargeMarginSoftmaxGradCudaKernel <<<config.block_count, config.thread_per_block, 0, d.stream()>>> (config, back_grads.data(), features.data(), weights.data(), cur_lambda.data(), labels.data(), batch_size, num_dimensions, output_dimensions, margin_order, b_angular, feat_norm.data(), weights_norm.data(), cos_theta.data(), theta_seg.data(), grad_features.data(), grad_weights.data()); cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } template struct LargeMarginSoftmaxGradFunctor<GPUDevice, float>; // #define DEFINE_GPU_SPECS(T) \ // template struct LargeMarginSoftmaxGradFunctor<T>; // TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS); #endif // GOOGLE_CUDA
the_stack
#include <cub/cub.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include "badslam/cost_function.cuh" #include "badslam/cuda_util.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/surfel_projection_nvcc_only.cuh" #include "badslam/util.cuh" #include "badslam/util_nvcc_only.cuh" namespace vis { __global__ void ResetSurfelAccumCUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } surfels(kSurfelAccum0, surfel_index) = 0; surfels(kSurfelAccum1, surfel_index) = 0; surfels(kSurfelAccum2, surfel_index) = 0; surfels(kSurfelAccum3, surfel_index) = 0; surfels(kSurfelAccum4, surfel_index) = 0; surfels(kSurfelAccum5, surfel_index) = 0; surfels(kSurfelAccum6, surfel_index) = 0; surfels(kSurfelAccum7, surfel_index) = 0; surfels(kSurfelAccum8, surfel_index) = 0; } } void CallResetSurfelAccumCUDAKernel( cudaStream_t stream, u32 surfels_size, const CUDABuffer_<float>& surfels, const CUDABuffer_<u8>& active_surfels) { CUDA_AUTO_TUNE_1D( ResetSurfelAccumCUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } __global__ void ResetSurfelAccum0to3CUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } surfels(kSurfelAccum0, surfel_index) = 0; surfels(kSurfelAccum1, surfel_index) = 0; surfels(kSurfelAccum2, surfel_index) = 0; surfels(kSurfelAccum3, surfel_index) = 0; } } void CallResetSurfelAccum0to3CUDAKernel( cudaStream_t stream, u32 surfels_size, const CUDABuffer_<float>& surfels, const CUDABuffer_<u8>& active_surfels) { CUDA_AUTO_TUNE_1D( ResetSurfelAccum0to3CUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } template<bool use_depth_residuals> __global__ void AccumulateSurfelPositionAndDescriptorOptimizationCoeffsCUDAKernel( SurfelProjectionParameters s, PixelCenterUnprojector depth_unprojector, DepthToColorPixelCorner depth_to_color, PixelCornerProjector color_corner_projector, cudaTextureObject_t color_texture, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < s.surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } SurfelProjectionResult6 r; if (SurfelProjectsToAssociatedPixel(surfel_index, s, &r)) { float3 rn = s.frame_T_global.Rotate(r.surfel_normal); // --- Depth residual change wrt. position change --- if (use_depth_residuals) { float depth_residual_inv_stddev = ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(r.px), depth_unprojector.ny(r.py), r.pixel_calibrated_depth, rn, s.depth_params.baseline_fx); const float depth_jacobian = -depth_residual_inv_stddev; float3 local_unproj; float raw_depth_residual; ComputeRawDepthResidual( depth_unprojector, r.px, r.py, r.pixel_calibrated_depth, depth_residual_inv_stddev, r.surfel_local_position, rn, &local_unproj, &raw_depth_residual); const float depth_weight = ComputeDepthResidualWeight(raw_depth_residual); // Accumulate: s.surfels(kSurfelAccum0, surfel_index) += depth_weight * depth_jacobian * depth_jacobian; s.surfels(kSurfelAccum6, surfel_index) += depth_weight * raw_depth_residual * depth_jacobian; } // -------------------------------------------------- float2 color_pxy; if (TransformDepthToColorPixelCorner(r.pxy, depth_to_color, &color_pxy)) { // --- Descriptor residual --- float2 t1_pxy, t2_pxy; ComputeTangentProjections( r.surfel_global_position, r.surfel_normal, SurfelGetRadiusSquared(s.surfels, surfel_index), s.frame_T_global, color_corner_projector, &t1_pxy, &t2_pxy); const float surfel_descriptor_1 = s.surfels(kSurfelDescriptor1, surfel_index); const float surfel_descriptor_2 = s.surfels(kSurfelDescriptor2, surfel_index); float raw_descriptor_residual_1; float raw_descriptor_residual_2; ComputeRawDescriptorResidual( color_texture, color_pxy, t1_pxy, t2_pxy, surfel_descriptor_1, surfel_descriptor_2, &raw_descriptor_residual_1, &raw_descriptor_residual_2); // --------------------------- // --- Descriptor residual change wrt. position change --- float grad_x_1; float grad_y_1; float grad_x_2; float grad_y_2; DescriptorJacobianWrtProjectedPosition( color_texture, color_pxy, t1_pxy, t2_pxy, &grad_x_1, &grad_y_1, &grad_x_2, &grad_y_2); const float term1 = -color_corner_projector.fx * (rn.x*r.surfel_local_position.z - rn.z*r.surfel_local_position.x); const float term2 = -color_corner_projector.fy * (rn.y*r.surfel_local_position.z - rn.z*r.surfel_local_position.y); const float term3 = 1.f / (r.surfel_local_position.z * r.surfel_local_position.z); float jacobian_wrt_position_1 = -(grad_x_1 * term1 + grad_y_1 * term2) * term3; float jacobian_wrt_position_2 = -(grad_x_2 * term1 + grad_y_2 * term2) * term3; // ------------------------------------------------------- // --- Descriptor residual change wrt. descriptor change --- constexpr float jacobian_wrt_descriptor = -1.f; // --------------------------------------------------------- // Accumulate: // kSurfelAccum0: H(0, 0) // kSurfelAccum1: H(0, 1) = H(1, 0) // kSurfelAccum2: H(0, 2) = H(2, 0) // kSurfelAccum3: H(1, 1) // kSurfelAccum4: H(1, 2) = H(2, 1) // kSurfelAccum5: H(2, 2) // kSurfelAccum6: b(0) // kSurfelAccum7: b(1) // kSurfelAccum8: b(2) const float weight_1 = ComputeDescriptorResidualWeight(raw_descriptor_residual_1); const float weighted_raw_residual_1 = weight_1 * raw_descriptor_residual_1; const float weight_2 = ComputeDescriptorResidualWeight(raw_descriptor_residual_2); const float weighted_raw_residual_2 = weight_2 * raw_descriptor_residual_2; // Residual 1 (and some parts of 2, where accumulating onto the same variable) s.surfels(kSurfelAccum0, surfel_index) += weight_1 * jacobian_wrt_position_1 * jacobian_wrt_position_1 + weight_2 * jacobian_wrt_position_2 * jacobian_wrt_position_2; // from residual 2 s.surfels(kSurfelAccum1, surfel_index) += weight_1 * jacobian_wrt_position_1 * jacobian_wrt_descriptor; s.surfels(kSurfelAccum3, surfel_index) += weight_1 * jacobian_wrt_descriptor * jacobian_wrt_descriptor; s.surfels(kSurfelAccum6, surfel_index) += weighted_raw_residual_1 * jacobian_wrt_position_1 + weighted_raw_residual_2 * jacobian_wrt_position_2; // from residual 2 s.surfels(kSurfelAccum7, surfel_index) += weighted_raw_residual_1 * jacobian_wrt_descriptor; // Residual 2 s.surfels(kSurfelAccum2, surfel_index) += weight_2 * jacobian_wrt_position_2 * jacobian_wrt_descriptor; s.surfels(kSurfelAccum5, surfel_index) += weight_2 * jacobian_wrt_descriptor * jacobian_wrt_descriptor; s.surfels(kSurfelAccum8, surfel_index) += weighted_raw_residual_2 * jacobian_wrt_descriptor; } } } } void AccumulateSurfelPositionAndDescriptorOptimizationCoeffsCUDAKernel( cudaStream_t stream, const SurfelProjectionParameters& s, const PixelCenterUnprojector& depth_unprojector, const DepthToColorPixelCorner& depth_to_color, const PixelCornerProjector& color_corner_projector, cudaTextureObject_t color_texture, const CUDABuffer_<u8>& active_surfels, bool use_depth_residuals) { if (use_depth_residuals) { CUDA_AUTO_TUNE_1D( AccumulateSurfelPositionAndDescriptorOptimizationCoeffsCUDAKernel<true>, 512, s.surfels_size, 0, stream, /* kernel parameters */ s, depth_unprojector, depth_to_color, color_corner_projector, color_texture, active_surfels); } else { CUDA_AUTO_TUNE_1D( AccumulateSurfelPositionAndDescriptorOptimizationCoeffsCUDAKernel<false>, 512, s.surfels_size, 0, stream, /* kernel parameters */ s, depth_unprojector, depth_to_color, color_corner_projector, color_texture, active_surfels); } CUDA_CHECK(); } __global__ void UpdateSurfelPositionAndDescriptorCUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } float H_0_0 = surfels(kSurfelAccum0, surfel_index); float H_0_1 = surfels(kSurfelAccum1, surfel_index); float H_0_2 = surfels(kSurfelAccum2, surfel_index); float H_1_1 = surfels(kSurfelAccum3, surfel_index); float H_1_2 = surfels(kSurfelAccum4, surfel_index); float H_2_2 = surfels(kSurfelAccum5, surfel_index); // Make sure that the matrix is positive definite // (instead of only semi-positive definite). constexpr float kEpsilon = 1e-6f; H_0_0 += kEpsilon; H_1_1 += kEpsilon; H_2_2 += kEpsilon; // Perform in-place Cholesky decomposition of H H_0_0 = sqrtf(H_0_0); H_0_1 = H_0_1 / H_0_0; H_1_1 = sqrtf(H_1_1 - H_0_1 * H_0_1); H_0_2 = H_0_2 / H_0_0; H_1_2 = (H_1_2 - H_0_2 * H_0_1) / H_1_1; H_2_2 = sqrtf(H_2_2 - H_0_2 * H_0_2 - H_1_2 * H_1_2); // Solve H * x = b for x. // // (H_0_0 0 0) (H_0_0 H_0_1 H_0_2) (x0) (b0) // (H_0_1 H_1_1 0) * ( 0 H_1_1 H_1_2) * (x1) = (b1) // (H_0_2 H_1_2 H_2_2) ( 0 0 H_2_2) (x2) (b2) // // Naming the result of the second multiplication y, we get: // // (H_0_0 0 0) (y0) (b0) // (H_0_1 H_1_1 0) * (y1) = (b1) // (H_0_2 H_1_2 H_2_2) (y2) (b2) // // and: // // (H_0_0 H_0_1 H_0_2) (x0) (y0) // ( 0 H_1_1 H_1_2) * (x1) = (y1) // ( 0 0 H_2_2) (x2) = (y2) const float b0 = surfels(kSurfelAccum6, surfel_index); const float b1 = surfels(kSurfelAccum7, surfel_index); const float b2 = surfels(kSurfelAccum8, surfel_index); float y0 = b0 / H_0_0; float y1 = (b1 - H_0_1 * y0) / H_1_1; float y2 = (b2 - H_0_2 * y0 - H_1_2 * y1) / H_2_2; float x2 = y2 / H_2_2; float x1 = (y1 - H_1_2 * x2) / H_1_1; float x0 = (y0 - H_0_2 * x2 - H_0_1 * x1) / H_0_0; if (x0 != 0) { // Update surfel position float3 global_position = SurfelGetPosition(surfels, surfel_index); float3 surfel_normal = SurfelGetNormal(surfels, surfel_index); SurfelSetPosition(&surfels, surfel_index, global_position - x0 * surfel_normal); } if (x1 != 0) { float surfel_descriptor_1 = surfels(kSurfelDescriptor1, surfel_index); surfel_descriptor_1 -= x1; surfels(kSurfelDescriptor1, surfel_index) = ::max(-180.f, ::min(180.f, surfel_descriptor_1)); } if (x2 != 0) { float surfel_descriptor_2 = surfels(kSurfelDescriptor2, surfel_index); surfel_descriptor_2 -= x2; surfels(kSurfelDescriptor2, surfel_index) = ::max(-180.f, ::min(180.f, surfel_descriptor_2)); } // Reset accum fields for normal optimization. // surfels(kSurfelAccum0, surfel_index) = 0; // surfels(kSurfelAccum1, surfel_index) = 0; // surfels(kSurfelAccum2, surfel_index) = 0; // surfels(kSurfelAccum3, surfel_index) = 0; } } void CallUpdateSurfelPositionAndDescriptorCUDAKernel( cudaStream_t stream, u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { CUDA_AUTO_TUNE_1D( UpdateSurfelPositionAndDescriptorCUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } __global__ void ResetSurfelAccum0to1CUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } surfels(kSurfelAccum0, surfel_index) = 0; surfels(kSurfelAccum1, surfel_index) = 0; } } void CallResetSurfelAccum0to1CUDAKernel( cudaStream_t stream, u32 surfels_size, const CUDABuffer_<float>& surfels, const CUDABuffer_<u8>& active_surfels) { CUDA_AUTO_TUNE_1D( ResetSurfelAccum0to1CUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } // This function only considers the depth residual. If the descriptor residual // is also used, it should be considered jointly. __global__ void AccumulateSurfelPositionOptimizationCoeffsFromDepthResidualCUDAKernel( SurfelProjectionParameters s, PixelCenterUnprojector depth_unprojector, DepthToColorPixelCorner depth_to_color, float color_fx, float color_fy, cudaTextureObject_t color_texture, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < s.surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } SurfelProjectionResult6 r; if (SurfelProjectsToAssociatedPixel(surfel_index, s, &r)) { // --- Depth residual --- float3 rn = s.frame_T_global.Rotate(r.surfel_normal); float depth_residual_inv_stddev = ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(r.px), depth_unprojector.ny(r.py), r.pixel_calibrated_depth, rn, s.depth_params.baseline_fx); const float depth_jacobian = -depth_residual_inv_stddev; float3 local_unproj; float raw_depth_residual; ComputeRawDepthResidual( depth_unprojector, r.px, r.py, r.pixel_calibrated_depth, depth_residual_inv_stddev, r.surfel_local_position, rn, &local_unproj, &raw_depth_residual); // Accumulate: // kSurfelAccum0: H // kSurfelAccum1: b const float depth_weight = ComputeDepthResidualWeight(raw_depth_residual); float weighted_jacobian = depth_weight * depth_jacobian; s.surfels(kSurfelAccum0, surfel_index) += weighted_jacobian * depth_jacobian; s.surfels(kSurfelAccum1, surfel_index) += weighted_jacobian * raw_depth_residual; } } } void CallAccumulateSurfelPositionOptimizationCoeffsFromDepthResidualCUDAKernel( cudaStream_t stream, SurfelProjectionParameters s, PixelCenterUnprojector depth_unprojector, DepthToColorPixelCorner depth_to_color, float color_fx, float color_fy, cudaTextureObject_t color_texture, CUDABuffer_<u8> active_surfels) { CUDA_AUTO_TUNE_1D( AccumulateSurfelPositionOptimizationCoeffsFromDepthResidualCUDAKernel, 512, s.surfels_size, 0, stream, /* kernel parameters */ s, depth_unprojector, depth_to_color, color_fx, color_fy, color_texture, active_surfels); CUDA_CHECK(); } __global__ void UpdateSurfelPositionCUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } float H = surfels(kSurfelAccum0, surfel_index); constexpr float kEpsilon = 1e-6f; if (H > kEpsilon) { float3 global_position = SurfelGetPosition(surfels, surfel_index); float t = -1.f * surfels(kSurfelAccum1, surfel_index) / H; float3 surfel_normal = SurfelGetNormal(surfels, surfel_index); SurfelSetPosition(&surfels, surfel_index, global_position + t * surfel_normal); } } } void CallUpdateSurfelPositionCUDAKernel( cudaStream_t stream, u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { CUDA_AUTO_TUNE_1D( UpdateSurfelPositionCUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } __global__ void AccumulateSurfelNormalOptimizationCoeffsCUDAKernel( SurfelProjectionParameters s, CUDAMatrix3x3 global_R_frame, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < s.surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } SurfelProjectionResultXY r; if (SurfelProjectsToAssociatedPixel(surfel_index, s, &r)) { // Transform the frame's normal to global space. float3 local_normal = U16ToImageSpaceNormal(s.normals_buffer(r.py, r.px)); float3 global_normal = global_R_frame * local_normal; // Accumulate. // kSurfelAccum0: normal.x // kSurfelAccum1: normal.y // kSurfelAccum2: normal.z // kSurfelAccum3: count // NOTE: This does a simple averaging of the normals, it does not // optimize according to the cost function. s.surfels(kSurfelAccum0, surfel_index) += global_normal.x; s.surfels(kSurfelAccum1, surfel_index) += global_normal.y; s.surfels(kSurfelAccum2, surfel_index) += global_normal.z; s.surfels(kSurfelAccum3, surfel_index) += 1.f; } } } void CallAccumulateSurfelNormalOptimizationCoeffsCUDAKernel( cudaStream_t stream, SurfelProjectionParameters s, CUDAMatrix3x3 global_R_frame, CUDABuffer_<u8> active_surfels) { CUDA_AUTO_TUNE_1D( AccumulateSurfelNormalOptimizationCoeffsCUDAKernel, 512, s.surfels_size, 0, stream, /* kernel parameters */ s, global_R_frame, active_surfels); CUDA_CHECK(); } __global__ void UpdateSurfelNormalCUDAKernel( u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; if (surfel_index < surfels_size) { if (!(active_surfels(0, surfel_index) & kSurfelActiveFlag)) { return; } float count = surfels(kSurfelAccum3, surfel_index); if (count >= 1) { float3 normal_sum = make_float3(surfels(kSurfelAccum0, surfel_index), surfels(kSurfelAccum1, surfel_index), surfels(kSurfelAccum2, surfel_index)); SurfelSetNormal(&surfels, surfel_index, (1.f / count) * normal_sum); } } } void CallUpdateSurfelNormalCUDAKernel( cudaStream_t stream, u32 surfels_size, CUDABuffer_<float> surfels, CUDABuffer_<u8> active_surfels) { CUDA_AUTO_TUNE_1D( UpdateSurfelNormalCUDAKernel, 512, surfels_size, 0, stream, /* kernel parameters */ surfels_size, surfels, active_surfels); CUDA_CHECK(); } }
the_stack
* \file * cub::WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. */ #pragma once #include "../../thread/thread_operators.cuh" #include "../../thread/thread_load.cuh" #include "../../thread/thread_store.cuh" #include "../../util_type.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. */ template < typename T, ///< Data type being scanned int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct WarpScanSmem { /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// The number of warp scan steps STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE, /// The number of threads in half a warp HALF_WARP_THREADS = 1 << (STEPS - 1), /// The number of shared memory elements per warp WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, /// Whether the data type is a primitive integer IS_INTEGER = (Traits<T>::CATEGORY == UNSIGNED_INTEGER) || (Traits<T>::CATEGORY == SIGNED_INTEGER), }; /// Storage cell type (workaround for SM1x compiler bugs with custom-ops like Max() on signed chars) typedef typename If<((Equals<T, char>::VALUE || Equals<T, signed char>::VALUE) && (PTX_ARCH < 200)), int, T>::Type CellT; /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) typedef CellT _TempStorage[WARP_SMEM_ELEMENTS]; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /****************************************************************************** * Thread fields ******************************************************************************/ _TempStorage &temp_storage; unsigned int lane_id; /****************************************************************************** * Construction ******************************************************************************/ /// Constructor __device__ __forceinline__ WarpScanSmem( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), lane_id(IS_ARCH_WARP ? LaneId() : LaneId() % LOGICAL_WARP_THREADS) {} /****************************************************************************** * Utility methods ******************************************************************************/ /// Basic inclusive scan iteration (template unrolled, inductive-case specialization) template < bool HAS_IDENTITY, int STEP, typename ScanOp> __device__ __forceinline__ void ScanStep( T &partial, ScanOp scan_op, Int2Type<STEP> step) { const int OFFSET = 1 << STEP; // Share partial into buffer ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) partial); // Update partial if addend is in range if (HAS_IDENTITY || (lane_id >= OFFSET)) { T addend = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - OFFSET]); partial = scan_op(addend, partial); } ScanStep<HAS_IDENTITY>(partial, scan_op, Int2Type<STEP + 1>()); } /// Basic inclusive scan iteration(template unrolled, base-case specialization) template < bool HAS_IDENTITY, typename ScanOp> __device__ __forceinline__ void ScanStep( T &partial, ScanOp scan_op, Int2Type<STEPS> step) {} /// Inclusive prefix scan with identity template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. T identity, ///< [in] Identity value ScanOp scan_op) ///< [in] Binary scan operator { ThreadStore<STORE_VOLATILE>(&temp_storage[lane_id], (CellT) identity); // Iterate scan steps output = input; ScanStep<true>(output, scan_op, Int2Type<0>()); } /// Inclusive prefix scan (specialized for summation across primitive types) __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. Sum scan_op, ///< [in] Binary scan operator Int2Type<true> is_primitive) ///< [in] Marker type indicating whether T is primitive type { T identity = ZeroInitialize<T>(); InclusiveScan(input, output, identity, scan_op); } /// Inclusive prefix scan template <typename ScanOp, int IS_PRIMITIVE> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator Int2Type<IS_PRIMITIVE> is_primitive) ///< [in] Marker type indicating whether T is primitive type { // Iterate scan steps output = input; ScanStep<false>(output, scan_op, Int2Type<0>()); } /// Get exclusive from inclusive (specialized for summation of integer types) __device__ __forceinline__ T GetExclusive( T input, T inclusive, Sum scan_op, Int2Type<true> is_integer) { return inclusive - input; } /// Get exclusive from inclusive (specialized for scans other than summation of integer types) template <typename ScanOp, int _IS_INTEGER> __device__ __forceinline__ T GetExclusive( T input, T inclusive, ScanOp scan_op, Int2Type<_IS_INTEGER> is_integer) { ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); return (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); } /// Get exclusive from inclusive (specialized for summation of integer types) __device__ __forceinline__ T GetExclusive( T input, T inclusive, Sum scan_op, T &warp_aggregate, Int2Type<true> is_integer) { ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); return inclusive - input; } /// Get exclusive from inclusive (specialized for scans other than summation of integer types) template <typename ScanOp, int _IS_INTEGER> __device__ __forceinline__ T GetExclusive( T input, T inclusive, ScanOp scan_op, T &warp_aggregate, Int2Type<_IS_INTEGER> is_integer) { ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); return (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); } /****************************************************************************** * Interface ******************************************************************************/ /// Broadcast __device__ __forceinline__ T Broadcast( T input, ///< [in] The value to broadcast unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting { if (lane_id == src_lane) { ThreadStore<STORE_VOLATILE>(temp_storage, (CellT) input); } return (T) ThreadLoad<LOAD_VOLATILE>(temp_storage); } //--------------------------------------------------------------------- // Inclusive operations //--------------------------------------------------------------------- /// Inclusive scan template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op) ///< [in] Binary scan operator { InclusiveScan(input, output, scan_op, Int2Type<Traits<T>::PRIMITIVE>()); } /// Inclusive scan with aggregate template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InclusiveScan(input, output, scan_op); // Retrieve aggregate ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) output); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); } //--------------------------------------------------------------------- // Combo (inclusive & exclusive) operations //--------------------------------------------------------------------- /// Combination scan without identity template <typename ScanOp> __device__ __forceinline__ void Scan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. ScanOp scan_op) ///< [in] Binary scan operator { // Compute inclusive scan InclusiveScan(input, inclusive_output, scan_op); // Grab result from predecessor exclusive_output = GetExclusive(input, inclusive_output, scan_op, Int2Type<IS_INTEGER>()); } /// Combination scan with identity template <typename ScanOp> __device__ __forceinline__ void Scan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. T identity, ///< [in] Identity value ScanOp scan_op) ///< [in] Binary scan operator { // Compute inclusive scan InclusiveScan(input, inclusive_output, identity, scan_op); // Grab result from predecessor exclusive_output = GetExclusive(input, inclusive_output, scan_op, Int2Type<IS_INTEGER>()); } //--------------------------------------------------------------------- // Exclusive operations //--------------------------------------------------------------------- /// Exclusive scan template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. T identity, ///< [in] Identity value ScanOp scan_op) ///< [in] Binary scan operator { T inclusive_output; Scan(input, inclusive_output, output, identity, scan_op); } /// Exclusive scan without identity template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op) ///< [in] Binary scan operator { T inclusive_output; Scan(input, inclusive_output, output, scan_op); } /// Exclusive scan with aggregate template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. T identity, ///< [in] Identity value ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { // Compute inclusive scan T inclusive_output; InclusiveScan(input, inclusive_output, identity, scan_op); // Grab result from predecessor output = GetExclusive(input, inclusive_output, scan_op, warp_aggregate, Int2Type<IS_INTEGER>()); } /// Exclusive scan with aggregate, without identity template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { // Compute inclusive scan T inclusive_output; InclusiveScan(input, inclusive_output, scan_op); // Grab result from predecessor output = GetExclusive(input, inclusive_output, scan_op, warp_aggregate, Int2Type<IS_INTEGER>()); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
\brief Execution environment */ #include <cstring> #include "cutlass/numeric_types.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/library/util.h" #include "device_allocation.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) { return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8; } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Layout> static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) { typename Layout::TensorCoord extent_coord; typename Layout::Stride stride_coord; if (extent.size() != size_t(Layout::kRank)) { throw std::runtime_error("Layout does not have same rank as extent vector."); } for (int i = 0; i < Layout::kRank; ++i) { extent_coord[i] = extent.at(i); } std::vector<int64_t> stride; stride.resize(Layout::kStrideRank, 0); Layout layout = Layout::packed(extent_coord); stride_coord = layout.stride(); for (int i = 0; i < Layout::kStrideRank; ++i) { stride.at(i) = (int64_t)stride_coord[i]; } return stride; } /// Returns the stride of a packed layout std::vector<int64_t> DeviceAllocation::get_packed_layout( library::LayoutTypeID layout_id, std::vector<int> const &extent) { std::vector<int64_t> stride; switch (layout_id) { case library::LayoutTypeID::kColumnMajor: stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent); break; case library::LayoutTypeID::kRowMajor: stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent); break; case library::LayoutTypeID::kColumnMajorInterleavedK2: stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent); break; case library::LayoutTypeID::kRowMajorInterleavedK2: stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent); break; case library::LayoutTypeID::kColumnMajorInterleavedK4: stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent); break; case library::LayoutTypeID::kRowMajorInterleavedK4: stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent); break; case library::LayoutTypeID::kColumnMajorInterleavedK16: stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent); break; case library::LayoutTypeID::kRowMajorInterleavedK16: stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent); break; case library::LayoutTypeID::kColumnMajorInterleavedK32: stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent); break; case library::LayoutTypeID::kRowMajorInterleavedK32: stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent); break; case library::LayoutTypeID::kColumnMajorInterleavedK64: stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent); break; case library::LayoutTypeID::kRowMajorInterleavedK64: stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent); break; case library::LayoutTypeID::kTensorNCHW: stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent); break; case library::LayoutTypeID::kTensorNHWC: stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent); break; case library::LayoutTypeID::kTensorNDHWC: stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent); break; case library::LayoutTypeID::kTensorNC32HW32: stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent); break; case library::LayoutTypeID::kTensorNC64HW64: stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent); break; case library::LayoutTypeID::kTensorC32RSK32: stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent); break; case library::LayoutTypeID::kTensorC64RSK64: stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent); break; default: break; } return stride; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Template to use CUTLASS Layout functions to template <typename Layout> static size_t construct_layout_( void *bytes, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> &stride) { if (extent.size() != Layout::kRank) { throw std::runtime_error( "Layout must have same rank as extent vector."); } if (Layout::kStrideRank && stride.empty()) { stride = get_packed_layout_stride<Layout>(extent); return construct_layout_<Layout>( bytes, layout_id, extent, stride); } else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) { throw std::runtime_error( "Layout requires either empty stride or stride vector matching Layout::kStrideRank"); } typename Layout::Stride stride_coord; for (int i = 0; i < Layout::kStrideRank; ++i) { stride_coord[i] = (int)stride.at(i); } typename Layout::TensorCoord extent_coord; for (int i = 0; i < Layout::kRank; ++i) { extent_coord[i] = extent.at(i); } // Construct the CUTLASS layout object from the stride object Layout layout(stride_coord); // Pack it into bytes if (bytes) { *reinterpret_cast<Layout *>(bytes) = layout; } // Return capacity size_t capacity_ = layout.capacity(extent_coord); return capacity_; } /// returns the capacity needed size_t DeviceAllocation::construct_layout( void *bytes, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> &stride) { switch (layout_id) { case library::LayoutTypeID::kColumnMajor: return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajor: return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kColumnMajorInterleavedK2: return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajorInterleavedK2: return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kColumnMajorInterleavedK4: return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajorInterleavedK4: return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kColumnMajorInterleavedK16: return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajorInterleavedK16: return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kColumnMajorInterleavedK32: return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajorInterleavedK32: return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kColumnMajorInterleavedK64: return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kRowMajorInterleavedK64: return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorNCHW: return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorNHWC: return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorNDHWC: return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorNC32HW32: return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorNC64HW64: return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorC32RSK32: return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride); case library::LayoutTypeID::kTensorC64RSK64: return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride); default: break; } return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////// DeviceAllocation::DeviceAllocation(): type_(library::NumericTypeID::kInvalid), batch_stride_(0), capacity_(0), pointer_(nullptr), layout_(library::LayoutTypeID::kUnknown), batch_count_(1) { } DeviceAllocation::DeviceAllocation( library::NumericTypeID type, size_t capacity ): type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr), layout_(library::LayoutTypeID::kUnknown), batch_count_(1) { cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity)); if (result != cudaSuccess) { type_ = library::NumericTypeID::kInvalid; capacity_ = 0; pointer_ = nullptr; throw std::bad_alloc(); } } DeviceAllocation::DeviceAllocation( library::NumericTypeID type, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> const &stride, int batch_count ): type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)), pointer_(nullptr), batch_count_(1) { reset(type, layout_id, extent, stride, batch_count); } DeviceAllocation::~DeviceAllocation() { if (pointer_) { cudaFree(pointer_); } } DeviceAllocation &DeviceAllocation::reset() { if (pointer_) { cudaFree(pointer_); } type_ = library::NumericTypeID::kInvalid; batch_stride_ = 0; capacity_ = 0; pointer_ = nullptr; layout_ = library::LayoutTypeID::kUnknown; stride_.clear(); extent_.clear(); tensor_ref_buffer_.clear(); batch_count_ = 1; return *this; } DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) { reset(); type_ = type; batch_stride_ = capacity; capacity_ = capacity; cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type_, capacity_)); if (result != cudaSuccess) { throw std::bad_alloc(); } layout_ = library::LayoutTypeID::kUnknown; stride_.clear(); extent_.clear(); batch_count_ = 1; tensor_ref_buffer_.resize(sizeof(pointer_), 0); std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_)); return *this; } /// Allocates memory for a given layout and tensor DeviceAllocation &DeviceAllocation::reset( library::NumericTypeID type, library::LayoutTypeID layout_id, std::vector<int> const &extent, std::vector<int64_t> const &stride, int batch_count) { reset(); tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0); type_ = type; layout_ = layout_id; stride_ = stride; extent_ = extent; batch_count_ = batch_count; batch_stride_ = construct_layout( tensor_ref_buffer_.data() + sizeof(pointer_), layout_id, extent, stride_); capacity_ = batch_stride_ * batch_count_; cudaError_t result = cudaMalloc((void **)&pointer_, bytes(type, capacity_)); if (result != cudaSuccess) { throw std::bad_alloc(); } std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_)); return *this; } bool DeviceAllocation::good() const { return (capacity_ && pointer_); } library::NumericTypeID DeviceAllocation::type() const { return type_; } void *DeviceAllocation::data() const { return pointer_; } void *DeviceAllocation::batch_data(int batch_idx) const { return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx; } library::LayoutTypeID DeviceAllocation::layout() const { return layout_; } std::vector<int64_t> const & DeviceAllocation::stride() const { return stride_; } /// Gets the extent vector std::vector<int> const & DeviceAllocation::extent() const { return extent_; } /// Gets the number of adjacent tensors in memory int DeviceAllocation::batch_count() const { return batch_count_; } /// Gets the stride (in units of elements) beteween items int64_t DeviceAllocation::batch_stride() const { return batch_stride_; } /// Gets the stride (in units of bytes) beteween items int64_t DeviceAllocation::batch_stride_bytes() const { return bytes(type_, batch_stride_); } size_t DeviceAllocation::capacity() const { return capacity_; } size_t DeviceAllocation::bytes() const { return bytes(type_, capacity_); } /// Copies from an equivalent-sized tensor in device memory void DeviceAllocation::copy_from_device(void const *ptr) { cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice); if (result != cudaSuccess) { throw std::runtime_error("Failed device-to-device copy"); } } /// Copies from an equivalent-sized tensor in device memory void DeviceAllocation::copy_from_host(void const *ptr) { cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice); if (result != cudaSuccess) { throw std::runtime_error("Failed device-to-device copy"); } } /// Copies from an equivalent-sized tensor in device memory void DeviceAllocation::copy_to_host(void *ptr) { cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost); if (result != cudaSuccess) { throw std::runtime_error("Failed device-to-device copy"); } } void DeviceAllocation::initialize_random_device(int seed, Distribution dist) { if (!good()) { throw std::runtime_error("Attempting to initialize invalid allocation."); } // Instantiate calls to CURAND here. This file takes a long time to compile for // this reason. switch (type_) { case library::NumericTypeID::kF16: cutlass::reference::device::BlockFillRandom<cutlass::half_t>( reinterpret_cast<cutlass::half_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kBF16: cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>( reinterpret_cast<cutlass::bfloat16_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kTF32: cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>( reinterpret_cast<cutlass::tfloat32_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kF32: cutlass::reference::device::BlockFillRandom<float>( reinterpret_cast<float *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kCBF16: cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>( reinterpret_cast<complex<bfloat16_t> *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kCTF32: cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>( reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kCF32: cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>( reinterpret_cast<cutlass::complex<float> *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kF64: cutlass::reference::device::BlockFillRandom<double>( reinterpret_cast<double *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kCF64: cutlass::reference::device::BlockFillRandom<complex<double>>( reinterpret_cast<complex<double> *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS2: cutlass::reference::device::BlockFillRandom<int2b_t>( reinterpret_cast<int2b_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS4: cutlass::reference::device::BlockFillRandom<int4b_t>( reinterpret_cast<int4b_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS8: cutlass::reference::device::BlockFillRandom<int8_t>( reinterpret_cast<int8_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS16: cutlass::reference::device::BlockFillRandom<int16_t>( reinterpret_cast<int16_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS32: cutlass::reference::device::BlockFillRandom<int32_t>( reinterpret_cast<int32_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kS64: cutlass::reference::device::BlockFillRandom<int64_t>( reinterpret_cast<int64_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kB1: cutlass::reference::device::BlockFillRandom<uint1b_t>( reinterpret_cast<uint1b_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU2: cutlass::reference::device::BlockFillRandom<uint2b_t>( reinterpret_cast<uint2b_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU4: cutlass::reference::device::BlockFillRandom<uint4b_t>( reinterpret_cast<uint4b_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU8: cutlass::reference::device::BlockFillRandom<uint8_t>( reinterpret_cast<uint8_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU16: cutlass::reference::device::BlockFillRandom<uint16_t>( reinterpret_cast<uint16_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU32: cutlass::reference::device::BlockFillRandom<uint32_t>( reinterpret_cast<uint32_t *>(pointer_), capacity_, seed, dist ); break; case library::NumericTypeID::kU64: cutlass::reference::device::BlockFillRandom<uint64_t>( reinterpret_cast<uint64_t *>(pointer_), capacity_, seed, dist ); break; default: break; } } void DeviceAllocation::initialize_random_host(int seed, Distribution dist) { if (!good()) { throw std::runtime_error("Attempting to initialize invalid allocation."); } std::vector<uint8_t> host_data(bytes()); switch (type_) { case library::NumericTypeID::kF16: cutlass::reference::host::BlockFillRandom<cutlass::half_t>( reinterpret_cast<cutlass::half_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kBF16: cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>( reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kTF32: cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>( reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kF32: cutlass::reference::host::BlockFillRandom<float>( reinterpret_cast<float *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kCF16: cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>( reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kCBF16: cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>( reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kCTF32: cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>( reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kCF32: cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>( reinterpret_cast<cutlass::complex<float> *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kF64: cutlass::reference::host::BlockFillRandom<double>( reinterpret_cast<double *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kCF64: cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>( reinterpret_cast<cutlass::complex<double> *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS2: cutlass::reference::host::BlockFillRandom<int2b_t>( reinterpret_cast<int2b_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS4: cutlass::reference::host::BlockFillRandom<int4b_t>( reinterpret_cast<int4b_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS8: cutlass::reference::host::BlockFillRandom<int8_t>( reinterpret_cast<int8_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS16: cutlass::reference::host::BlockFillRandom<int16_t>( reinterpret_cast<int16_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS32: cutlass::reference::host::BlockFillRandom<int32_t>( reinterpret_cast<int32_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kS64: cutlass::reference::host::BlockFillRandom<int64_t>( reinterpret_cast<int64_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kB1: cutlass::reference::host::BlockFillRandom<uint1b_t>( reinterpret_cast<uint1b_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU2: cutlass::reference::host::BlockFillRandom<uint2b_t>( reinterpret_cast<uint2b_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU4: cutlass::reference::host::BlockFillRandom<uint4b_t>( reinterpret_cast<uint4b_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU8: cutlass::reference::host::BlockFillRandom<uint8_t>( reinterpret_cast<uint8_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU16: cutlass::reference::host::BlockFillRandom<uint16_t>( reinterpret_cast<uint16_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU32: cutlass::reference::host::BlockFillRandom<uint32_t>( reinterpret_cast<uint32_t *>(host_data.data()), capacity_, seed, dist ); break; case library::NumericTypeID::kU64: cutlass::reference::host::BlockFillRandom<uint64_t>( reinterpret_cast<uint64_t *>(host_data.data()), capacity_, seed, dist ); break; default: break; } copy_from_host(host_data.data()); } void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) { if (!good()) { throw std::runtime_error("Attempting to initialize invalid allocation."); } // Instantiate calls to CURAND here. This file takes a long time to compile for // this reason. switch (type_) { case library::NumericTypeID::kU16: cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>( reinterpret_cast<uint16_t *>(pointer_), capacity_, seed, MetaSizeInBits ); break; case library::NumericTypeID::kU32: cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>( reinterpret_cast<uint32_t *>(pointer_), capacity_, seed, MetaSizeInBits ); break; default: break; } } void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) { if (!good()) { throw std::runtime_error("Attempting to initialize invalid allocation."); } std::vector<uint8_t> host_data(bytes()); switch (type_) { case library::NumericTypeID::kS16: cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>( reinterpret_cast<uint16_t *>(host_data.data()), capacity_, seed, MetaSizeInBits ); break; case library::NumericTypeID::kS32: cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>( reinterpret_cast<uint32_t *>(host_data.data()), capacity_, seed, MetaSizeInBits ); break; default: break; } copy_from_host(host_data.data()); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if two blocks have exactly the same value bool DeviceAllocation::block_compare_equal( library::NumericTypeID numeric_type, void const *ptr_A, void const *ptr_B, size_t capacity) { switch (numeric_type) { case library::NumericTypeID::kF16: return reference::device::BlockCompareEqual<half_t>( reinterpret_cast<half_t const *>(ptr_A), reinterpret_cast<half_t const *>(ptr_B), capacity); case library::NumericTypeID::kBF16: return reference::device::BlockCompareEqual<bfloat16_t>( reinterpret_cast<bfloat16_t const *>(ptr_A), reinterpret_cast<bfloat16_t const *>(ptr_B), capacity); case library::NumericTypeID::kTF32: return reference::device::BlockCompareEqual<tfloat32_t>( reinterpret_cast<tfloat32_t const *>(ptr_A), reinterpret_cast<tfloat32_t const *>(ptr_B), capacity); case library::NumericTypeID::kF32: return reference::device::BlockCompareEqual<float>( reinterpret_cast<float const *>(ptr_A), reinterpret_cast<float const *>(ptr_B), capacity); case library::NumericTypeID::kCF32: return reference::device::BlockCompareEqual<cutlass::complex<float> >( reinterpret_cast<complex<float> const *>(ptr_A), reinterpret_cast<complex<float> const *>(ptr_B), capacity); case library::NumericTypeID::kCF16: return reference::device::BlockCompareEqual<complex<half_t>>( reinterpret_cast<complex<half_t> const *>(ptr_A), reinterpret_cast<complex<half_t> const *>(ptr_B), capacity); case library::NumericTypeID::kCBF16: return reference::device::BlockCompareEqual<complex<bfloat16_t>>( reinterpret_cast<complex<bfloat16_t> const *>(ptr_A), reinterpret_cast<complex<bfloat16_t> const *>(ptr_B), capacity); case library::NumericTypeID::kCTF32: return reference::device::BlockCompareEqual<complex<tfloat32_t>>( reinterpret_cast<complex<tfloat32_t> const *>(ptr_A), reinterpret_cast<complex<tfloat32_t> const *>(ptr_B), capacity); case library::NumericTypeID::kF64: return reference::device::BlockCompareEqual<double>( reinterpret_cast<double const *>(ptr_A), reinterpret_cast<double const *>(ptr_B), capacity); case library::NumericTypeID::kCF64: return reference::device::BlockCompareEqual<complex<double>>( reinterpret_cast<complex<double> const *>(ptr_A), reinterpret_cast<complex<double> const *>(ptr_B), capacity); case library::NumericTypeID::kS2: return reference::device::BlockCompareEqual<int2b_t>( reinterpret_cast<int2b_t const *>(ptr_A), reinterpret_cast<int2b_t const *>(ptr_B), capacity); case library::NumericTypeID::kS4: return reference::device::BlockCompareEqual<int4b_t>( reinterpret_cast<int4b_t const *>(ptr_A), reinterpret_cast<int4b_t const *>(ptr_B), capacity); case library::NumericTypeID::kS8: return reference::device::BlockCompareEqual<int8_t>( reinterpret_cast<int8_t const *>(ptr_A), reinterpret_cast<int8_t const *>(ptr_B), capacity); case library::NumericTypeID::kS16: return reference::device::BlockCompareEqual<int16_t>( reinterpret_cast<int16_t const *>(ptr_A), reinterpret_cast<int16_t const *>(ptr_B), capacity); case library::NumericTypeID::kS32: return reference::device::BlockCompareEqual<int32_t>( reinterpret_cast<int32_t const *>(ptr_A), reinterpret_cast<int32_t const *>(ptr_B), capacity); case library::NumericTypeID::kS64: return reference::device::BlockCompareEqual<int64_t>( reinterpret_cast<int64_t const *>(ptr_A), reinterpret_cast<int64_t const *>(ptr_B), capacity); case library::NumericTypeID::kB1: return reference::device::BlockCompareEqual<uint1b_t>( reinterpret_cast<uint1b_t const *>(ptr_A), reinterpret_cast<uint1b_t const *>(ptr_B), capacity); case library::NumericTypeID::kU2: return reference::device::BlockCompareEqual<uint2b_t>( reinterpret_cast<uint2b_t const *>(ptr_A), reinterpret_cast<uint2b_t const *>(ptr_B), capacity); case library::NumericTypeID::kU4: return reference::device::BlockCompareEqual<uint4b_t>( reinterpret_cast<uint4b_t const *>(ptr_A), reinterpret_cast<uint4b_t const *>(ptr_B), capacity); case library::NumericTypeID::kU8: return reference::device::BlockCompareEqual<uint8_t>( reinterpret_cast<uint8_t const *>(ptr_A), reinterpret_cast<uint8_t const *>(ptr_B), capacity); case library::NumericTypeID::kU16: return reference::device::BlockCompareEqual<uint16_t>( reinterpret_cast<uint16_t const *>(ptr_A), reinterpret_cast<uint16_t const *>(ptr_B), capacity); case library::NumericTypeID::kU32: return reference::device::BlockCompareEqual<uint32_t>( reinterpret_cast<uint32_t const *>(ptr_A), reinterpret_cast<uint32_t const *>(ptr_B), capacity); case library::NumericTypeID::kU64: return reference::device::BlockCompareEqual<uint64_t>( reinterpret_cast<uint64_t const *>(ptr_A), reinterpret_cast<uint64_t const *>(ptr_B), capacity); default: throw std::runtime_error("Unsupported numeric type"); } } /// Returns true if two blocks have approximately the same value bool DeviceAllocation::block_compare_relatively_equal( library::NumericTypeID numeric_type, void const *ptr_A, void const *ptr_B, size_t capacity, double epsilon, double nonzero_floor) { switch (numeric_type) { case library::NumericTypeID::kF16: return reference::device::BlockCompareRelativelyEqual<half_t>( reinterpret_cast<half_t const *>(ptr_A), reinterpret_cast<half_t const *>(ptr_B), capacity, static_cast<half_t>(epsilon), static_cast<half_t>(nonzero_floor)); case library::NumericTypeID::kBF16: return reference::device::BlockCompareRelativelyEqual<bfloat16_t>( reinterpret_cast<bfloat16_t const *>(ptr_A), reinterpret_cast<bfloat16_t const *>(ptr_B), capacity, static_cast<bfloat16_t>(epsilon), static_cast<bfloat16_t>(nonzero_floor)); case library::NumericTypeID::kTF32: return reference::device::BlockCompareRelativelyEqual<tfloat32_t>( reinterpret_cast<tfloat32_t const *>(ptr_A), reinterpret_cast<tfloat32_t const *>(ptr_B), capacity, static_cast<tfloat32_t>(epsilon), static_cast<tfloat32_t>(nonzero_floor)); case library::NumericTypeID::kF32: return reference::device::BlockCompareRelativelyEqual<float>( reinterpret_cast<float const *>(ptr_A), reinterpret_cast<float const *>(ptr_B), capacity, static_cast<float>(epsilon), static_cast<float>(nonzero_floor)); case library::NumericTypeID::kF64: return reference::device::BlockCompareRelativelyEqual<double>( reinterpret_cast<double const *>(ptr_A), reinterpret_cast<double const *>(ptr_B), capacity, static_cast<double>(epsilon), static_cast<double>(nonzero_floor)); case library::NumericTypeID::kS2: return reference::device::BlockCompareRelativelyEqual<int2b_t>( reinterpret_cast<int2b_t const *>(ptr_A), reinterpret_cast<int2b_t const *>(ptr_B), capacity, static_cast<int2b_t>(epsilon), static_cast<int2b_t>(nonzero_floor)); case library::NumericTypeID::kS4: return reference::device::BlockCompareRelativelyEqual<int4b_t>( reinterpret_cast<int4b_t const *>(ptr_A), reinterpret_cast<int4b_t const *>(ptr_B), capacity, static_cast<int4b_t>(epsilon), static_cast<int4b_t>(nonzero_floor)); case library::NumericTypeID::kS8: return reference::device::BlockCompareRelativelyEqual<int8_t>( reinterpret_cast<int8_t const *>(ptr_A), reinterpret_cast<int8_t const *>(ptr_B), capacity, static_cast<int8_t>(epsilon), static_cast<int8_t>(nonzero_floor)); case library::NumericTypeID::kS16: return reference::device::BlockCompareRelativelyEqual<int16_t>( reinterpret_cast<int16_t const *>(ptr_A), reinterpret_cast<int16_t const *>(ptr_B), capacity, static_cast<int16_t>(epsilon), static_cast<int16_t>(nonzero_floor)); case library::NumericTypeID::kS32: return reference::device::BlockCompareRelativelyEqual<int32_t>( reinterpret_cast<int32_t const *>(ptr_A), reinterpret_cast<int32_t const *>(ptr_B), capacity, static_cast<int32_t>(epsilon), static_cast<int32_t>(nonzero_floor)); case library::NumericTypeID::kS64: return reference::device::BlockCompareRelativelyEqual<int64_t>( reinterpret_cast<int64_t const *>(ptr_A), reinterpret_cast<int64_t const *>(ptr_B), capacity, static_cast<int64_t>(epsilon), static_cast<int64_t>(nonzero_floor)); case library::NumericTypeID::kB1: return reference::device::BlockCompareRelativelyEqual<uint1b_t>( reinterpret_cast<uint1b_t const *>(ptr_A), reinterpret_cast<uint1b_t const *>(ptr_B), capacity, static_cast<uint1b_t>(epsilon), static_cast<uint1b_t>(nonzero_floor)); case library::NumericTypeID::kU2: return reference::device::BlockCompareRelativelyEqual<uint2b_t>( reinterpret_cast<uint2b_t const *>(ptr_A), reinterpret_cast<uint2b_t const *>(ptr_B), capacity, static_cast<uint2b_t>(epsilon), static_cast<uint2b_t>(nonzero_floor)); case library::NumericTypeID::kU4: return reference::device::BlockCompareRelativelyEqual<uint4b_t>( reinterpret_cast<uint4b_t const *>(ptr_A), reinterpret_cast<uint4b_t const *>(ptr_B), capacity, static_cast<uint4b_t>(epsilon), static_cast<uint4b_t>(nonzero_floor)); case library::NumericTypeID::kU8: return reference::device::BlockCompareRelativelyEqual<uint8_t>( reinterpret_cast<uint8_t const *>(ptr_A), reinterpret_cast<uint8_t const *>(ptr_B), capacity, static_cast<uint8_t>(epsilon), static_cast<uint8_t>(nonzero_floor)); case library::NumericTypeID::kU16: return reference::device::BlockCompareRelativelyEqual<uint16_t>( reinterpret_cast<uint16_t const *>(ptr_A), reinterpret_cast<uint16_t const *>(ptr_B), capacity, static_cast<uint16_t>(epsilon), static_cast<uint16_t>(nonzero_floor)); case library::NumericTypeID::kU32: return reference::device::BlockCompareRelativelyEqual<uint32_t>( reinterpret_cast<uint32_t const *>(ptr_A), reinterpret_cast<uint32_t const *>(ptr_B), capacity, static_cast<uint32_t>(epsilon), static_cast<uint32_t>(nonzero_floor)); case library::NumericTypeID::kU64: return reference::device::BlockCompareRelativelyEqual<uint64_t>( reinterpret_cast<uint64_t const *>(ptr_A), reinterpret_cast<uint64_t const *>(ptr_B), capacity, static_cast<uint64_t>(epsilon), static_cast<uint64_t>(nonzero_floor)); // No relatively equal comparison for complex numbers. // // As a simplification, we can require bitwise equality. This avoids false positives. // (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.) // case library::NumericTypeID::kCF16: return reference::device::BlockCompareEqual<cutlass::complex<half_t> >( reinterpret_cast<complex<half_t> const *>(ptr_A), reinterpret_cast<complex<half_t> const *>(ptr_B), capacity); case library::NumericTypeID::kCF32: return reference::device::BlockCompareEqual<cutlass::complex<float> >( reinterpret_cast<complex<float> const *>(ptr_A), reinterpret_cast<complex<float> const *>(ptr_B), capacity); case library::NumericTypeID::kCF64: return reference::device::BlockCompareEqual<cutlass::complex<double> >( reinterpret_cast<complex<double> const *>(ptr_A), reinterpret_cast<complex<double> const *>(ptr_B), capacity); default: { throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type)); } } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord, int Rank> struct vector_to_coord { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { coord[Rank - 1] = vec.at(Rank - 1); if (Rank > 1) { vector_to_coord<TensorCoord, Rank - 1>(coord, vec); } } vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) { coord[Rank - 1] = (int)vec.at(Rank - 1); if (Rank > 1) { vector_to_coord<TensorCoord, Rank - 1>(coord, vec); } } }; /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord> struct vector_to_coord<TensorCoord, 1> { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { coord[0] = vec.at(0); } vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) { coord[0] = (int)vec.at(0); } }; /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord> struct vector_to_coord<TensorCoord, 0> { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename Layout> static void write_tensor_csv_static_tensor_view( std::ostream &out, DeviceAllocation &allocation) { Coord<Layout::kRank> extent; Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride; if (allocation.extent().size() != Layout::kRank) { throw std::runtime_error("Allocation extent has invalid rank"); } if (allocation.stride().size() != Layout::kStrideRank) { throw std::runtime_error("Allocation stride has invalid rank"); } vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent()); vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>, Layout::kStrideRank>(stride, allocation.stride()); Layout layout(stride); HostTensor<Element, Layout> host_tensor(extent, layout, false); if (host_tensor.capacity() != allocation.batch_stride()) { throw std::runtime_error("Unexpected capacity to equal."); } host_tensor.copy_in_device_to_host( static_cast<Element const *>(allocation.data()), allocation.batch_stride()); TensorViewWrite(out, host_tensor.host_view()); out << "\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static void write_tensor_csv_static_type( std::ostream &out, DeviceAllocation &allocation) { switch (allocation.layout()) { case library::LayoutTypeID::kRowMajor: write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation); break; case library::LayoutTypeID::kColumnMajor: write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation); break; case library::LayoutTypeID::kRowMajorInterleavedK2: write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation); break; case library::LayoutTypeID::kColumnMajorInterleavedK2: write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation); break; case library::LayoutTypeID::kRowMajorInterleavedK4: write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation); break; case library::LayoutTypeID::kColumnMajorInterleavedK4: write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation); break; case library::LayoutTypeID::kRowMajorInterleavedK16: write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation); break; case library::LayoutTypeID::kColumnMajorInterleavedK16: write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation); break; case library::LayoutTypeID::kRowMajorInterleavedK32: write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation); break; case library::LayoutTypeID::kColumnMajorInterleavedK32: write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation); break; case library::LayoutTypeID::kRowMajorInterleavedK64: write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation); break; case library::LayoutTypeID::kColumnMajorInterleavedK64: write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation); break; case library::LayoutTypeID::kTensorNHWC: write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation); break; case library::LayoutTypeID::kTensorNDHWC: write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation); break; case library::LayoutTypeID::kTensorNC32HW32: write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation); break; case library::LayoutTypeID::kTensorNC64HW64: write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation); break; case library::LayoutTypeID::kTensorC32RSK32: write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation); break; case library::LayoutTypeID::kTensorC64RSK64: write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation); break; default: throw std::runtime_error("Unhandled layout"); } } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Writes a tensor to csv void DeviceAllocation::write_tensor_csv( std::ostream &out) { switch (this->type()) { case library::NumericTypeID::kF16: write_tensor_csv_static_type<half_t>(out, *this); break; case library::NumericTypeID::kBF16: write_tensor_csv_static_type<bfloat16_t>(out, *this); break; case library::NumericTypeID::kTF32: write_tensor_csv_static_type<tfloat32_t>(out, *this); break; case library::NumericTypeID::kF32: write_tensor_csv_static_type<float>(out, *this); break; case library::NumericTypeID::kF64: write_tensor_csv_static_type<double>(out, *this); break; case library::NumericTypeID::kS2: write_tensor_csv_static_type<int2b_t>(out, *this); break; case library::NumericTypeID::kS4: write_tensor_csv_static_type<int4b_t>(out, *this); break; case library::NumericTypeID::kS8: write_tensor_csv_static_type<int8_t>(out, *this); break; case library::NumericTypeID::kS16: write_tensor_csv_static_type<int16_t>(out, *this); break; case library::NumericTypeID::kS32: write_tensor_csv_static_type<int32_t>(out, *this); break; case library::NumericTypeID::kS64: write_tensor_csv_static_type<int64_t>(out, *this); break; case library::NumericTypeID::kB1: write_tensor_csv_static_type<uint1b_t>(out, *this); break; case library::NumericTypeID::kU2: write_tensor_csv_static_type<uint2b_t>(out, *this); break; case library::NumericTypeID::kU4: write_tensor_csv_static_type<uint4b_t>(out, *this); break; case library::NumericTypeID::kU8: write_tensor_csv_static_type<uint8_t>(out, *this); break; case library::NumericTypeID::kU16: write_tensor_csv_static_type<uint16_t>(out, *this); break; case library::NumericTypeID::kU32: write_tensor_csv_static_type<uint32_t>(out, *this); break; case library::NumericTypeID::kU64: write_tensor_csv_static_type<uint64_t>(out, *this); break; case library::NumericTypeID::kCF16: write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this); break; case library::NumericTypeID::kCF32: write_tensor_csv_static_type<cutlass::complex<float> >(out, *this); break; case library::NumericTypeID::kCF64: write_tensor_csv_static_type<cutlass::complex<double> >(out, *this); break; default: throw std::runtime_error("Unsupported numeric type"); } } template <typename Element, typename Layout> static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) { Coord<Layout::kRank> extent; Coord<Layout::kStrideRank, typename Layout::LongIndex> stride; if (allocation.extent().size() != Layout::kRank) { throw std::runtime_error("Allocation extent has invalid rank"); } if (allocation.stride().size() != Layout::kStrideRank) { throw std::runtime_error("Allocation stride has invalid rank"); } vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent()); vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>, Layout::kStrideRank>(stride, allocation.stride()); TensorView<Element, Layout> view( static_cast<Element *>(allocation.data()), Layout(stride), extent ); cutlass::reference::device::TensorFill<Element, Layout>( view, val ); } template <typename Element> static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) { switch (allocation.layout()) { case library::LayoutTypeID::kRowMajor: tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val); break; case library::LayoutTypeID::kColumnMajor: tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val); break; case library::LayoutTypeID::kTensorNHWC: tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val); break; case library::LayoutTypeID::kTensorNDHWC: tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val); break; case library::LayoutTypeID::kTensorNC32HW32: tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val); break; case library::LayoutTypeID::kTensorNC64HW64: tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val); break; case library::LayoutTypeID::kTensorC32RSK32: tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val); break; case library::LayoutTypeID::kTensorC64RSK64: tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val); break; default: throw std::runtime_error("Unsupported layout"); break; } } /// Fills a tensor uniformly with a value (most frequently used to clear the tensor) void DeviceAllocation::fill(double val = 0.0) { switch (this->type()) { case library::NumericTypeID::kF16: tensor_fill<half_t>(*this, static_cast<half_t>(val)); break; case library::NumericTypeID::kBF16: tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val)); break; case library::NumericTypeID::kTF32: tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val)); break; case library::NumericTypeID::kF32: tensor_fill<float>(*this, static_cast<float>(val)); break; case library::NumericTypeID::kF64: tensor_fill<double>(*this, static_cast<double>(val)); break; case library::NumericTypeID::kS2: tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val)); break; case library::NumericTypeID::kS4: tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val)); break; case library::NumericTypeID::kS8: tensor_fill<int8_t>(*this, static_cast<int8_t>(val)); break; case library::NumericTypeID::kS16: tensor_fill<int16_t>(*this, static_cast<int16_t>(val)); break; case library::NumericTypeID::kS32: tensor_fill<int32_t>(*this, static_cast<int32_t>(val)); break; case library::NumericTypeID::kS64: tensor_fill<int64_t>(*this, static_cast<int64_t>(val)); break; case library::NumericTypeID::kB1: tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val)); break; case library::NumericTypeID::kU2: tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val)); break; case library::NumericTypeID::kU4: tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val)); break; case library::NumericTypeID::kU8: tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val)); break; case library::NumericTypeID::kU16: tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val)); break; case library::NumericTypeID::kU32: tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val)); break; case library::NumericTypeID::kU64: tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val)); break; case library::NumericTypeID::kCF16: tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val)); break; case library::NumericTypeID::kCF32: tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val)); break; case library::NumericTypeID::kCF64: tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val)); break; default: throw std::runtime_error("Unsupported numeric type"); } } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass
the_stack
//#define NVBIO_CUDA_DEBUG #include <cub/cub.cuh> #include <nvbio/basic/omp.h> #include "input_thread.h" #include <nvbio/basic/pipeline.h> #include <nvbio/sufsort/sufsort.h> #include <nvbio/sufsort/sufsort_utils.h> #include <nvbio/sufsort/file_bwt.h> #include <nvbio/sufsort/bwte.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/dna.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/system.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/strings/string_set.h> #include <nvbio/io/sequence/sequence.h> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> using namespace nvbio; static const uint32 SYMBOL_SIZE = io::SequenceDataAccess<DNA>::SEQUENCE_BITS; static const bool BIG_ENDIAN = io::SequenceDataAccess<DNA>::SEQUENCE_BIG_ENDIAN; typedef io::SequenceDataAccess<DNA>::sequence_storage_iterator storage_iterator; typedef io::SequenceDataAccess<DNA>::index_iterator offsets_iterator; typedef BWTEContext<SYMBOL_SIZE,BIG_ENDIAN,storage_iterator,offsets_iterator> BWTE_context_type; /// /// A small class implementing a Pipeline stage reading sequence batches from a file /// struct SortStage { typedef io::SequenceDataHost argument_type; typedef BWTEBlock return_type; /// constructor /// ///\param file input sequence file ///\param max_strings maximum number of strings per batch ///\param max_bps maximum number of base pairs per batch /// SortStage(BWTE_context_type& context) : m_context( context ) {} /// fill the next batch /// bool process(PipelineContext& context) { // fetch the input io::SequenceDataHost* h_read_data = context.input<io::SequenceDataHost>(0); // fetch the output BWTEBlock* block = context.output<BWTEBlock>(); // build a view const io::SequenceDataAccess<DNA> h_read_view( *h_read_data ); m_context.sort_block( 0u, h_read_data->size(), h_read_view.sequence_string_set(), *block ); return true; } BWTE_context_type& m_context; }; /// /// A small class implementing a Pipeline stage reading sequence batches from a file /// struct SinkStage { typedef io::SequenceDataHost argument_type; /// constructor /// ///\param file input sequence file ///\param max_strings maximum number of strings per batch ///\param max_bps maximum number of base pairs per batch /// SinkStage( BWTE_context_type& context, PagedText<SYMBOL_SIZE,BIG_ENDIAN>& bwt, SparseSymbolSet& dollars) : m_context( context ), m_bwt( bwt ), m_dollars( dollars ), n_reads( 0 ), m_time( 0.0f ) {} /// fill the next batch /// bool process(PipelineContext& context) { const ScopedTimer<float> timer( &m_time ); // fetch the input io::SequenceDataHost* h_read_data = context.input<io::SequenceDataHost>( 0 ); // build a view const io::SequenceDataAccess<DNA> h_read_view( *h_read_data ); log_info(stderr, " block [%u, %u] (%u / %.2fG bps, %.1f M suffixes/s)\n", n_reads, n_reads + h_read_data->size(), h_read_data->bps(), 1.0e-9f * m_bwt.size(), m_time ? (1.0e-6f * m_bwt.size()) / m_time : 0.0f ); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); /* m_context.append_block( 0u, h_read_data->size(), h_read_view.sequence_string_set(), m_bwt, m_dollars, true ); */ // fetch the second input BWTEBlock* block = context.input<BWTEBlock>( 1 ); m_context.merge_block( 0u, h_read_data->size(), h_read_view.sequence_string_set(), *block, m_bwt, m_dollars, true ); n_reads += h_read_data->size(); return true; } BWTE_context_type& m_context; PagedText<SYMBOL_SIZE,BIG_ENDIAN>& m_bwt; SparseSymbolSet& m_dollars; uint32 n_reads; float m_time; }; int main(int argc, char* argv[]) { if (argc < 2) { log_visible(stderr, "nvSetBWT - Copyright 2013-2014, NVIDIA Corporation\n"); log_info(stderr, "usage:\n"); log_info(stderr, " nvSetBWT [options] input_file output_file\n"); log_info(stderr, " options:\n"); log_info(stderr, " -v | --verbosity int (0-6) [5]\n"); log_info(stderr, " -c | --compression string [1R] (e.g. \"1\", ..., \"9\", \"1R\")\n"); log_info(stderr, " -t | --threads int [auto]\n"); log_info(stderr, " -b | --bucketing int [16] (# of bits used for bucketing)\n"); log_info(stderr, " -F | --skip-forward\n"); log_info(stderr, " -R | --skip-reverse\n"); log_info(stderr, " output formats:\n"); log_info(stderr, " .txt ASCII\n"); log_info(stderr, " .txt.gz ASCII, gzip compressed\n"); log_info(stderr, " .txt.bgz ASCII, block-gzip compressed\n"); log_info(stderr, " .bwt 2-bit packed binary\n"); log_info(stderr, " .bwt.gz 2-bit packed binary, gzip compressed\n"); log_info(stderr, " .bwt.bgz 2-bit packed binary, block-gzip compressed\n"); log_info(stderr, " .bwt4 4-bit packed binary\n"); log_info(stderr, " .bwt4.gz 4-bit packed binary, gzip compressed\n"); log_info(stderr, " .bwt4.bgz 4-bit packed binary, block-gzip compressed\n"); return 0; } const char* reads_name = argv[argc-2]; const char* output_name = argv[argc-1]; bool forward = true; bool reverse = true; const char* comp_level = "1R"; io::QualityEncoding qencoding = io::Phred33; int threads = 0; for (int i = 0; i < argc - 2; ++i) { if ((strcmp( argv[i], "-v" ) == 0) || (strcmp( argv[i], "-verbosity" ) == 0) || (strcmp( argv[i], "--verbosity" ) == 0)) { set_verbosity( Verbosity( atoi( argv[++i] ) ) ); } else if ((strcmp( argv[i], "-F" ) == 0) || (strcmp( argv[i], "--skip-forward" ) == 0)) // skip forward strand { forward = false; } else if ((strcmp( argv[i], "-R" ) == 0) || (strcmp( argv[i], "--skip-reverse" ) == 0)) // skip reverse strand { reverse = false; } else if ((strcmp( argv[i], "-c" ) == 0) || (strcmp( argv[i], "--compression" ) == 0)) // setup compression level { comp_level = argv[++i]; } else if ((strcmp( argv[i], "-t" ) == 0) || (strcmp( argv[i], "--threads" ) == 0)) // setup number of threads { threads = atoi( argv[++i] ); } } try { log_visible(stderr,"nvSetBWT... started\n"); // build an output file SharedPointer<SetBWTHandler> output_handler = SharedPointer<SetBWTHandler>( open_bwt_file( output_name, comp_level ) ); if (output_handler == NULL) { log_error(stderr, " failed to create an output handler\n"); return 1; } // gather device memory stats size_t free_device, total_device; cudaMemGetInfo(&free_device, &total_device); cuda::check_error("cuda-check"); log_stats(stderr, " device has %ld of %ld MB free\n", free_device/1024/1024, total_device/1024/1024); #ifdef _OPENMP // now set the number of CPU threads omp_set_num_threads( threads > 0 ? threads : omp_get_num_procs() ); omp_set_nested(1); #pragma omp parallel { log_verbose(stderr, " running on multiple threads (%d)\n", omp_get_thread_num()); } #endif uint32 encoding_flags = 0u; if (forward) encoding_flags |= io::FORWARD; if (reverse) encoding_flags |= io::REVERSE_COMPLEMENT; log_visible(stderr, "opening read file \"%s\"\n", reads_name); SharedPointer<nvbio::io::SequenceDataStream> read_data_file( nvbio::io::open_sequence_file( reads_name, qencoding, uint32(-1), uint32(-1), io::SequenceEncoding( encoding_flags ) ) ); if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads_name); return false; } // output vectors PagedText<SYMBOL_SIZE,BIG_ENDIAN> bwt; SparseSymbolSet dollars; // get the current device int current_device; cudaGetDevice( &current_device ); // build a BWTEContext BWTE_context_type bwte_context( current_device ); // find out how big a block can we alloc uint32 max_block_suffixes = 256*1024*1024; uint32 max_block_strings = 16*1024*1024; while (bwte_context.needed_device_memory( max_block_strings, max_block_suffixes ) + 256u*1024u*1024u >= free_device) max_block_suffixes /= 2; log_verbose(stderr, " block size: %u\n", max_block_suffixes); // reserve enough space for the block processing bwte_context.reserve( max_block_strings, max_block_suffixes ); cudaMemGetInfo(&free_device, &total_device); log_stats(stderr, " device has %ld of %ld MB free\n", free_device/1024/1024, total_device/1024/1024); // build the input stage InputStage input_stage( read_data_file.get(), max_block_strings, max_block_suffixes - max_block_strings ); // build the sort stage SortStage sort_stage( bwte_context ); // build the sink SinkStage sink_stage( bwte_context, bwt, dollars ); // build the pipeline Pipeline pipeline; const uint32 in0 = pipeline.append_stage( &input_stage, 4u ); const uint32 in1 = pipeline.append_stage( &sort_stage, 4u ); const uint32 out = pipeline.append_sink( &sink_stage ); pipeline.add_dependency( in0, out ); pipeline.add_dependency( in0, in1 ); pipeline.add_dependency( in1, out ); Timer timer; timer.start(); // and run it! pipeline.run(); log_info(stderr," writing output... started\n"); // write out the results for (uint32 i = 0; i < bwt.page_count(); ++i) { // find the dollars corresponding to this page const uint64 page_begin = bwt.get_page_offset(i); const uint64 page_end = bwt.get_page_offset(i+1); const uint64 dollars_begin = nvbio::lower_bound_index( page_begin, dollars.pos(), dollars.size() ); const uint64 dollars_end = nvbio::lower_bound_index( page_end, dollars.pos(), dollars.size() ); //log_debug(stderr," page[%u] : %llu symbols (%llu,%llu), %llu dollars (%llu,%llu)\n", i, page_end - page_begin, page_begin, page_end, dollars_end - dollars_begin, dollars_begin, dollars_end); // and output the page output_handler->process( bwt.get_page_size(i), SYMBOL_SIZE, (const uint32*)bwt.get_page(i), dollars_end - dollars_begin, dollars.pos() + dollars_begin, dollars.ids() + dollars_begin ); } log_info(stderr," writing output... done\n"); timer.stop(); const float time = timer.seconds(); log_verbose(stderr," total time : %.1fs\n", time); log_verbose(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); log_visible(stderr,"nvSetBWT... done\n"); } catch (nvbio::cuda_error e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::bad_alloc e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::logic_error e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (nvbio::runtime_error e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::bad_alloc e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::logic_error e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (std::runtime_error e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); return 1; } catch (...) { log_error(stderr, "caught an unknown exception!\n"); return 1; } return 0; }
the_stack
#pragma once #include <gunrock/util/error_utils.cuh> #include <gunrock/util/cuda_properties.cuh> #include <gunrock/util/device_intrinsics.cuh> #include <gunrock/util/io/modified_load.cuh> #include <gunrock/util/io/modified_store.cuh> #include <gunrock/util/array_utils.cuh> namespace gunrock { namespace util { /** * Manages device storage needed for implementing work-stealing * and queuing progress between CTAs in a single grid. * * Can be used for: * * (1) Work-stealing. Consists of a pair of counters in * global device memory, optionally, a host-managed selector for * indexing into the pair. * * (2) Device-managed queue. Consists of a quadruplet of counters * in global device memory and selection into the counters is made * based upon the supplied iteration count. * Current iteration: incoming queue length * Next iteration: outgoing queue length * Next next iteration: needs to be reset before next iteration * Can be used with work-stealing counters to for work-stealing * queue operation * * For work-stealing passes, the current counter provides an atomic * reference of progress, and the current pass will typically reset * the counter for the next. * */ template <typename SizeT> class CtaWorkProgress { protected: enum { QUEUE_COUNTERS = 4, STEAL_COUNTERS = 2, OVERFLOW_COUNTERS = 1, }; // Seven pointer-sized counters in global device memory (we may not use // all of them, or may only use 32-bit versions of them) SizeT* d_counters; util::Array1D<int, SizeT, PINNED, cudaHostAllocMapped | cudaHostAllocPortable>* p_counters; // Host-controlled selector for indexing into d_counters. int progress_selector; public: enum { COUNTERS = QUEUE_COUNTERS + STEAL_COUNTERS + OVERFLOW_COUNTERS }; /** * Constructor */ CtaWorkProgress() : d_counters(NULL), progress_selector(0), p_counters(NULL) {} virtual ~CtaWorkProgress() {} //--------------------------------------------------------------------- // Work-stealing //--------------------------------------------------------------------- // Steals work from the host-indexed progress counter, returning // the offset of that work (from zero) and incrementing it by count. // Typically called by thread-0 // template <typename SizeT> __device__ __forceinline__ SizeT Steal(SizeT count) { SizeT* d_steal_counters = d_counters + QUEUE_COUNTERS; return util::AtomicInt<SizeT>::Add(d_steal_counters + progress_selector, count); } // Steals work from the specified iteration's progress counter, returning the // offset of that work (from zero) and incrementing it by count. // Typically called by thread-0 template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ SizeT Steal(SizeT count, IterationT iteration) { SizeT* d_steal_counters = d_counters + QUEUE_COUNTERS; return util::AtomicInt<SizeT>::Add(d_steal_counters + (iteration & 1), count); } // Resets the work progress for the next host-indexed work-stealing // pass. Typically called by thread-0 in block-0. // template <typename SizeT> __device__ __forceinline__ void PrepResetSteal() { SizeT reset_val = 0; SizeT* d_steal_counters = d_counters + QUEUE_COUNTERS; util::io::ModifiedStore<util::io::st::cg>::St( reset_val, d_steal_counters + (progress_selector ^ 1)); } // Resets the work progress for the specified work-stealing iteration. // Typically called by thread-0 in block-0. template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ void PrepResetSteal(IterationT iteration) { SizeT reset_val = 0; SizeT* d_steal_counters = d_counters + QUEUE_COUNTERS; util::io::ModifiedStore<util::io::st::cg>::St( reset_val, d_steal_counters + (iteration & 1)); } //--------------------------------------------------------------------- // Queuing //--------------------------------------------------------------------- // Get counter for specified iteration template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ SizeT* GetQueueCounter( IterationT iteration) const { return d_counters + (iteration & 0x3); } // Load work queue length for specified iteration template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ SizeT LoadQueueLength(IterationT iteration) { SizeT queue_length; util::io::ModifiedLoad<util::io::ld::cg>::Ld(queue_length, GetQueueCounter(iteration)); return queue_length; } // Store work queue length for specified iteration template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ void StoreQueueLength(SizeT queue_length, IterationT iteration) { util::io::ModifiedStore<util::io::st::cg>::St(queue_length, GetQueueCounter(iteration)); } // Enqueues work from the specified iteration's queue counter, returning the // offset of that work (from zero) and incrementing it by count. // Typically called by thread-0 template </*typename SizeT,*/ typename IterationT> __device__ __forceinline__ SizeT Enqueue(SizeT count, IterationT iteration) { SizeT old_value = util::AtomicInt<SizeT>::Add(GetQueueCounter(iteration), count); // printf("d_counters = %p, iteration = %lld, old_value = %lld, count = // %lld, blockIdx.x = %d\n", // d_counters, (long long) iteration, (long long) old_value, (long // long)count, blockIdx.x); return old_value; } // Sets the overflow counter to non-zero // template <typename SizeT> __device__ __forceinline__ void SetOverflow() { d_counters[QUEUE_COUNTERS + STEAL_COUNTERS] = 1; } cudaError_t Reset_(SizeT reset_val = 0, cudaStream_t stream = 0) { cudaError_t retval = cudaSuccess; for (SizeT i = 0; i < COUNTERS; i++) (*p_counters)[i] = reset_val; if (retval = p_counters->Move(util::HOST, util::DEVICE, COUNTERS, 0, stream)) return retval; progress_selector = 0; return retval; } cudaError_t Init() { cudaError_t retval = cudaSuccess; if (retval = p_counters->Allocate(COUNTERS, util::HOST | util::DEVICE)) return retval; d_counters = p_counters->GetPointer(util::DEVICE); progress_selector = 0; return retval; } cudaError_t Release() { progress_selector = 0; d_counters = NULL; return p_counters->Release(); } }; /** * Version of work progress with storage lifetime management. * * We can use this in host enactors, and pass the base CtaWorkProgress * as parameters to kernels. */ template <typename SizeT> class CtaWorkProgressLifetime : public CtaWorkProgress<SizeT> { protected: // GPU d_counters was allocated on int gpu; util::Array1D<int, SizeT, PINNED, cudaHostAllocMapped | cudaHostAllocPortable> counters; public: /** * Constructor */ CtaWorkProgressLifetime() : CtaWorkProgress<SizeT>(), gpu(GR_INVALID_DEVICE) { counters.SetName("counters"); this->p_counters = &counters; } /** * Destructor */ virtual ~CtaWorkProgressLifetime() { // Release(); } // Deallocates and resets the progress counters cudaError_t Release() { cudaError_t retval = cudaSuccess; if (gpu != GR_INVALID_DEVICE) { // Save current gpu int current_gpu; if (retval = util::GRError( cudaGetDevice(&current_gpu), "CtaWorkProgress cudaGetDevice failed: ", __FILE__, __LINE__)) return retval; // Deallocate if (retval = util::GRError( cudaSetDevice(gpu), "CtaWorkProgress cudaSetDevice failed: ", __FILE__, __LINE__)) return retval; // if (retval = util::GRError(cudaFree(d_counters), // "CtaWorkProgress cudaFree d_counters failed: ", __FILE__, __LINE__)) // return retval; if (retval = CtaWorkProgress<SizeT>::Release()) return retval; // d_counters = NULL; gpu = GR_INVALID_DEVICE; // Restore current gpu if (retval = util::GRError( cudaSetDevice(current_gpu), "CtaWorkProgress cudaSetDevice failed: ", __FILE__, __LINE__)) return retval; } return retval; } // Sets up the progress counters for the next kernel launch (lazily // allocating and initializing them if necessary) // template <typename SizeT> cudaError_t Init() { cudaError_t retval = cudaSuccess; // Make sure that our progress counters are allocated if (this->counters.GetPointer(util::DEVICE) == NULL) { // Allocate and initialize if (retval = util::GRError( cudaGetDevice(&gpu), "CtaWorkProgress cudaGetDevice failed: ", __FILE__, __LINE__)) return retval; if (retval = CtaWorkProgress<SizeT>::Init()) return retval; if (retval = CtaWorkProgress<SizeT>::Reset_()) return retval; } // Update our progress counter selector to index the next progress counter // progress_selector ^= 1; return retval; } // Checks if overflow counter is set // template <typename SizeT> cudaError_t CheckOverflow(bool& overflow, cudaStream_t stream = 0) // out param { cudaError_t retval = cudaSuccess; // SizeT counter; if (retval = counters.Move(util::DEVICE, util::HOST, 1, this->QUEUE_COUNTERS + this->STEAL_COUNTERS, stream)) return retval; // if (retval = util::GRError(cudaMemcpy( // &counter, // ((SizeT*) d_counters) + QUEUE_COUNTERS + STEAL_COUNTERS, // 1 * sizeof(SizeT), // cudaMemcpyDeviceToHost), // "CtaWorkProgress cudaMemcpy d_counters failed", __FILE__, __LINE__)) // break; overflow = counters[this->QUEUE_COUNTERS + this->STEAL_COUNTERS]; return retval; } // Acquire work queue length template <typename IterationT /*, typename SizeT*/> cudaError_t GetQueueLength(IterationT iteration, SizeT& queue_length, bool DEBUG = false, cudaStream_t stream = 0, bool skip_sync = false) // out param { cudaError_t retval = cudaSuccess; IterationT queue_length_idx = iteration & 0x3; if (stream == 0) { if (!DEBUG) cudaMemcpy(&queue_length, this->d_counters + queue_length_idx, sizeof(SizeT), cudaMemcpyDeviceToHost); else if (retval = util::GRError( cudaMemcpy(&queue_length, this->d_counters + queue_length_idx, sizeof(SizeT), cudaMemcpyDeviceToHost), "CtaWorkProgress cudaMemcpy d_counters failed", __FILE__, __LINE__)) return retval; } else { // printf("GetQueueLength using MemcpyAsync\n"); if (!DEBUG) cudaMemcpyAsync(&queue_length, this->d_counters + queue_length_idx, sizeof(SizeT), cudaMemcpyDeviceToHost, stream); else if (retval = util::GRError( cudaMemcpyAsync( &queue_length, this->d_counters + queue_length_idx, sizeof(SizeT), cudaMemcpyDeviceToHost, stream), "CtaWorkProgress cudaMemcpyAsync d_counter failed.", __FILE__, __LINE__)) return retval; if (!skip_sync) { if (retval = util::GRError(cudaStreamSynchronize(stream), "CtaWorkProgress GetQueueLength failed", __FILE__, __LINE__)) return retval; } } /*if (retval = counters.Move(util::DEVICE, util::HOST, 1, queue_length_idx, stream)) return retval; if (!skip_sync) { if (retval = util::GRError(cudaStreamSynchronize(stream), "CtaWorkProgress GetQueueLength failed", __FILE__, __LINE__)) return retval; } queue_length = counters[queue_length_idx];*/ return retval; } template <typename IndexT /*, typename SizeT*/> SizeT* GetQueueLengthPointer(IndexT index) { IndexT queue_length_idx = index & 0x3; return counters.GetPointer(util::DEVICE) + queue_length_idx; } // Set work queue length template <typename IterationT /*, typename SizeT*/> cudaError_t SetQueueLength(IterationT iteration, SizeT queue_length, bool DEBUG = false, cudaStream_t stream = 0) { cudaError_t retval = cudaSuccess; IterationT queue_length_idx = iteration & 0x3; /*if (stream == 0) { if (!DEBUG) cudaMemcpy(((SizeT*) d_counters) + queue_length_idx, &queue_length, sizeof(SizeT), cudaMemcpyHostToDevice); else if (retval = util::GRError(cudaMemcpy( ((SizeT*) d_counters) + queue_length_idx, &queue_length, 1 * sizeof(SizeT), cudaMemcpyHostToDevice), "CtaWorkProgress cudaMemcpy d_counters failed", __FILE__, __LINE__)) break; } else { // printf("gpu = %d, queue_idx = %d, d_counters = %p, stream = %d, queue_length = %d\n",gpu, queue_length_idx, d_counters, stream, queue_length);fflush(stdout); //util::MemsetKernel<<<1,1,0,stream>>>(((SizeT*) d_counters) + queue_length_idx, queue_length, 1); cudaMemcpyAsync(((SizeT*) d_counters) + queue_length_idx, &queue_length, sizeof(SizeT), cudaMemcpyHostToDevice,stream); if (DEBUG) { cudaStreamSynchronize(stream); retval = util::GRError("CtaWorkProgress MemsetKernel d_counters failed", __FILE__, __LINE__); } }*/ this->counters[queue_length_idx] = queue_length; if (retval = this->counters.Move(util::HOST, util::DEVICE, 1, queue_length_idx, stream)) return retval; if (DEBUG) { if (retval = util::GRError(cudaStreamSynchronize(stream), "CtaWorkProgress SetQueuelength failed", __FILE__, __LINE__)) return retval; } return retval; } }; } // namespace util } // namespace gunrock
the_stack
/** \addtogroup cudpp_cta * @{ */ /** @name Compress Functions * @{ */ #include <stdio.h> #include <cudpp_globals.h> template<class T, int depth> __device__ void binSearch_frag_mult(T* keyArraySmem, T* valueArraySmem, int offset, int &mid, T cmpValue, T testValue, int myAddress, int testGlobalIndex, T* globalPointerArray, T* globalStringArray, int bIndex, size_t numElements) { cmpValue = keyArraySmem[mid]; if(cmpValue != testValue) mid = (cmpValue > testValue ? mid-offset : mid+offset); int count = 1; T cmpKey = cmpValue; T tstKey = testValue; while(cmpKey == tstKey) { tstKey = (myAddress+4*count > numElements-1) ? globalStringArray[myAddress + 4*count - numElements] : globalStringArray[myAddress + 4*count]; cmpKey = (valueArraySmem[mid] + 4*count > numElements-1) ? globalStringArray[valueArraySmem[mid] + 4*count - numElements] : globalStringArray[valueArraySmem[mid] + 4*count]; if(cmpKey > tstKey) mid -= offset; else if(cmpKey < tstKey) mid += offset; count++; } } template<class T, int depth> __device__ void linearStringMerge(T *searchArray, T *pointerArray, T *A_values, T myKey, T myAddress, int &index, T &cmpValue, T *saveGlobalArray, T *savePointerArray, T *stringValues, int myStartIdxC, int myStartIdxA, int myStartIdxB, int localAPartSize, int localBPartSize, int localCPartSize, T localMaxB, T finalMaxB, T localMinB, int tid, int aIndex, int bIndex, int offset, int subPartitions, size_t numElements) { while(cmpValue < myKey && index < BWT_INTERSECT_B_BLOCK_SIZE_multi ) cmpValue = searchArray[++index]; bool breakNext = false; while(cmpValue == myKey && index < BWT_INTERSECT_B_BLOCK_SIZE_multi && !breakNext /*&& cmpValue != UINT_MAX*/) { int count = 1; T tmpKey = myKey; T cmpKey = cmpValue; while(tmpKey == cmpKey) { tmpKey = (myAddress+4*count > numElements-1) ? stringValues[myAddress + 4*count - numElements] : stringValues[myAddress + 4*count]; cmpKey = (pointerArray[index] + 4*count > numElements-1) ? stringValues[pointerArray[index] + 4*count - numElements] : stringValues[pointerArray[index] + 4*count]; if(cmpKey < tmpKey) cmpValue = searchArray[++index]; else if(cmpKey > tmpKey || myAddress == pointerArray[index]) breakNext = true; count++; } } int globalCAddress = myStartIdxC + index + bIndex + aIndex + offset + tid*depth; if(((myKey < localMaxB && myKey > localMinB) || bIndex+index >= (localBPartSize) || (index > 0 && index <BWT_INTERSECT_B_BLOCK_SIZE_multi)) && globalCAddress < (myStartIdxC+localCPartSize) && myKey < finalMaxB) { saveGlobalArray [globalCAddress] = myKey; savePointerArray[globalCAddress] = myAddress; } else if((myKey == localMaxB && myKey <= finalMaxB) && index == BWT_INTERSECT_B_BLOCK_SIZE_multi && globalCAddress <= (myStartIdxC+localCPartSize)) { unsigned int tmpAdd = myAddress; unsigned int cmpAdd = A_values[myStartIdxB+bIndex+index]; int count = 1; unsigned int tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues[tmpAdd + 4*count - numElements] : stringValues[tmpAdd + 4*count]; unsigned int cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues[cmpAdd + 4*count - numElements] : stringValues[cmpAdd + 4*count]; while(tmpKey == cmpKey) { count++; tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues[tmpAdd + 4*count - numElements] : stringValues[tmpAdd + 4*count]; cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues[cmpAdd + 4*count - numElements] : stringValues[cmpAdd + 4*count]; } if(tmpKey < cmpKey) { saveGlobalArray [globalCAddress] = myKey; savePointerArray[globalCAddress] = myAddress; } } else if(myKey == localMinB && globalCAddress < (myStartIdxC+localCPartSize)) { unsigned int tmpAdd = myAddress; unsigned int cmpAdd = A_values[myStartIdxB+bIndex+index]; int count = 1; unsigned int tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues[tmpAdd + 4*count - numElements] : stringValues[tmpAdd + 4*count]; unsigned int cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues[cmpAdd + 4*count - numElements] : stringValues[cmpAdd + 4*count]; while(tmpKey == cmpKey) { count++; tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues[tmpAdd + 4*count - numElements] : stringValues[tmpAdd + 4*count]; cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues[cmpAdd + 4*count - numElements] : stringValues[cmpAdd + 4*count]; } if(tmpKey > cmpKey) { saveGlobalArray [globalCAddress] = myKey; savePointerArray[globalCAddress] = myAddress; } } } template<class T, int depth> __device__ void binSearch_fragment(T* binArray, T* pointerBinArray, int offset, int &mid, T cmpValue, T testValue, T myAddress, T* globalStringArray, T* globalStringArray2, size_t numElements) { cmpValue = binArray[mid]; if(cmpValue != testValue) mid = (cmpValue > testValue ? mid-offset : mid+offset); int count = 1; T cmpKey = cmpValue; while(cmpKey == testValue) { testValue = (myAddress+4*count > numElements-1) ? globalStringArray2[myAddress + 4*count - numElements] : globalStringArray2[myAddress + 4*count]; cmpKey = (pointerBinArray[mid]+4*count > numElements-1) ? globalStringArray2[pointerBinArray[mid] + 4*count - numElements] : globalStringArray2[pointerBinArray[mid] + 4*count]; if(cmpKey > testValue) mid -= offset; else if(cmpKey < testValue) mid += offset; count++; } } template<class T, int depth> __device__ void lin_merge_simple(T &cmpValue, T myKey, T myAddress, int &index, T* BKeys, T* BValues, T* stringValues, T* A_values, T* A_keys_out, T* A_values_out, int myStartIdxA, int myStartIdxB, int myStartIdxC, T localMinB, T localMaxB, int aCont, int bCont, int totalSize, int sizePerPartition, int i, T* stringValues2, size_t numElements) { while(cmpValue < myKey && index < BWT_INTERSECT_B_BLOCK_SIZE_simple) cmpValue = BKeys[++index]; bool breakNext = false; while(cmpValue == myKey && index < BWT_INTERSECT_B_BLOCK_SIZE_simple && !breakNext) { int count = 1; T tmpKey = myKey; T cmpKey = cmpValue; while(tmpKey == cmpKey) { tmpKey = (myAddress+4*count > numElements-1) ? stringValues2[myAddress + 4*count - numElements] : stringValues2[myAddress + 4*count]; cmpKey = (BValues[index]+4*count > numElements-1) ? stringValues2[BValues[index] + 4*count - numElements] : stringValues2[BValues[index] + 4*count]; if(cmpKey < tmpKey) cmpValue = BKeys[++index]; else if(cmpKey > tmpKey) breakNext = true; count++; } } int globalCAddress = myStartIdxC + bCont + index + aCont + i; //Save Value if it is valid (correct window) //If we are on the edge of a window, and we are tied with the localMax or localMin value //we must go to global memory to find out if we are valid if((myKey < localMaxB && myKey > localMinB) || (index==BWT_INTERSECT_B_BLOCK_SIZE_simple && (bCont+index)>=sizePerPartition) || (index > 0 && index <BWT_INTERSECT_B_BLOCK_SIZE_simple)) { A_keys_out[globalCAddress] = myKey; A_values_out[globalCAddress] = myAddress; } else if(myKey == localMaxB && index == BWT_INTERSECT_B_BLOCK_SIZE_simple) { unsigned int tmpAdd = myAddress; unsigned int cmpAdd = A_values[myStartIdxB+bCont+index]; int count = 1; unsigned int tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues2[tmpAdd + 4*count - numElements] : stringValues2[tmpAdd + 4*count]; unsigned int cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues2[cmpAdd + 4*count - numElements] : stringValues2[cmpAdd + 4*count]; while(tmpKey == cmpKey) { count++; tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues2[tmpAdd + 4*count - numElements] : stringValues2[tmpAdd + 4*count]; cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues2[cmpAdd + 4*count - numElements] : stringValues2[cmpAdd + 4*count]; } if(tmpKey < cmpKey) { A_keys_out[globalCAddress] = myKey; A_values_out[globalCAddress] = myAddress; } } else if(myKey == localMinB) { unsigned int tmpAdd = myAddress; unsigned int cmpAdd = A_values[myStartIdxB+bCont+index]; int count = 1; unsigned int tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues2[tmpAdd + 4*count - numElements] : stringValues2[tmpAdd + 4*count]; unsigned int cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues2[cmpAdd + 4*count - numElements] : stringValues2[cmpAdd + 4*count]; while(tmpKey == cmpKey) { count++; tmpKey = (tmpAdd+4*count > numElements-1) ? stringValues2[tmpAdd + 4*count - numElements] : stringValues2[tmpAdd + 4*count]; cmpKey = (cmpAdd+4*count > numElements-1) ? stringValues2[cmpAdd + 4*count - numElements] : stringValues2[cmpAdd + 4*count]; } if(tmpKey > cmpKey) { A_keys_out[globalCAddress] = myKey; A_values_out[globalCAddress] = myAddress; } } } template<class T, int depth> __device__ void bin_search_block(T &cmpValue, T tmpVal, T* in, T* addressPad, const T* stringVals, int & j, int bump, T* stringVals2, size_t numElements) { cmpValue = in[j]; if(cmpValue == tmpVal) { T tmp = (addressPad[depth*threadIdx.x]+4*1 > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x]+4*1-numElements] : stringVals2[addressPad[depth*threadIdx.x]+4*1]; T tmp2 = (addressPad[j]+4*1 > numElements-1) ? stringVals2[addressPad[j]+4*1-numElements] : stringVals2[addressPad[j]+4*1]; int i = 2; while(tmp == tmp2) { tmp = (addressPad[depth*threadIdx.x]+4*i > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x]+4*i-numElements] : stringVals2[addressPad[depth*threadIdx.x]+4*i]; tmp2 = (addressPad[j]+4*i > numElements-1) ? stringVals2[addressPad[j]+4*i-numElements] : stringVals2[addressPad[j]+4*i]; i++; } j = (tmp2 < tmp ? j + bump : j - bump); } else j = (cmpValue < tmpVal ? j + bump : j - bump); __syncthreads(); } template<class T, int depth> __device__ void lin_search_block(T &cmpValue, T &tmpVal, T* in, T* addressPad, const T* stringVals, int &j, int offset, int last, int startAddress, int addPart, T* stringVals2, size_t numElements) { while (cmpValue < tmpVal && j < last) cmpValue = in[++j]; //If we need to tie break while linearly searching while(cmpValue == tmpVal && j < last) { T tmp = (addressPad[depth*threadIdx.x+offset]+4*1 > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x+offset]+4*1-numElements] : stringVals2[addressPad[depth*threadIdx.x+offset]+4*1]; T tmp2 = (addressPad[j]+4*1 > numElements-1) ? stringVals2[addressPad[j]+4*1-numElements] : stringVals2[addressPad[j]+4*1]; int i = 2; while(tmp == tmp2) { tmp = (addressPad[depth*threadIdx.x+offset]+4*i > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x+offset]+4*i-numElements] : stringVals2[addressPad[depth*threadIdx.x+offset]+4*i]; tmp2 = (addressPad[j]+4*i > numElements-1) ? stringVals2[addressPad[j]+4*i-numElements] : stringVals2[addressPad[j]+4*i]; i++; } if(tmp2 < tmp) cmpValue = in[++j]; else if(tmp2 > tmp) break; } //Corner case to handle being at the edge of our shared memory search j = ((j==last && cmpValue < tmpVal) ? j+1 : j); if (j == last && cmpValue == tmpVal) { T tmp = (addressPad[depth*threadIdx.x+offset]+4*1 > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x+offset]+4*1-numElements] : stringVals2[addressPad[depth*threadIdx.x+offset]+4*1]; T tmp2 = (addressPad[j]+4*1 > numElements-1) ? stringVals2[addressPad[j]+4*1-numElements] : stringVals2[addressPad[j]+4*1]; int i = 2; while(tmp == tmp2) { tmp = (addressPad[depth*threadIdx.x+offset]+4*i > numElements-1) ? stringVals2[addressPad[depth*threadIdx.x+offset]+4*i-numElements] : stringVals2[addressPad[depth*threadIdx.x+offset]+4*i]; tmp2 = (addressPad[j]+4*i > numElements-1) ? stringVals2[addressPad[j]+4*i-numElements] : stringVals2[addressPad[j]+4*i]; i++; } if(tmp2 < tmp) j++; } tmpVal = j+startAddress+offset + addPart; } template<class T> __device__ void compareSwapVal(T &A1, T &A2, const int index1, const int index2, T* scratch, const T* stringVals, T* stringVals2, size_t numElements) { if(A1 > A2) { T tmp = A1; A1 = A2; A2 = tmp; tmp = scratch[index1]; scratch[index1] = scratch[index2]; scratch[index2] = tmp; } else if(A1 == A2) { T tmp = (scratch[index1]+4*1 > numElements-1) ? stringVals2[scratch[index1]+4*1-numElements] : stringVals2[scratch[index1]+1*4]; T tmp2 = (scratch[index2]+4*1 > numElements-1) ? stringVals2[scratch[index2]+4*1-numElements] : stringVals2[scratch[index2]+1*4]; int i = 2; while(tmp == tmp2) { tmp = (scratch[index1]+4*i > numElements-1) ? stringVals2[scratch[index1]+4*i-numElements] : stringVals2[scratch[index1]+4*i]; tmp2 = (scratch[index2]+4*i > numElements-1) ? stringVals2[scratch[index2]+4*i-numElements] : stringVals2[scratch[index2]+4*i]; i++; } if(tmp > tmp2) { tmp = A1; A1 = A2; A2 = tmp; tmp = scratch[index1]; scratch[index1] = scratch[index2]; scratch[index2] = tmp; } } } __device__ void BitArraySetBit(huffman_code *ba, unsigned int bit) { if (ba->numBits <= bit) { return; // bit out of range } ba->code[bit/8] |= BIT_IN_CHAR(bit); } __device__ void BitArrayShiftLeft(huffman_code *ba, unsigned int shifts) { int i, j; int chars = shifts / 8; // number of whole byte shifts shifts = shifts % 8; // number of bit shifts remaining if (shifts >= ba->numBits) { // all bits have been shifted off // set bits in all bytes to 0 memset((void *)(ba->code), 0, BITS_TO_CHARS(ba->numBits)); return; } // first handle big jumps of bytes if (chars > 0) { for (i = 0; (i + chars) < (int)BITS_TO_CHARS(ba->numBits); i++) { ba->code[i] = ba->code[i + chars]; } // now zero out new bytes on the right for (i = BITS_TO_CHARS(ba->numBits); chars > 0; chars--) { ba->code[i - chars] = 0; } } // now we have at most CUDPP_CHAR_BIT - 1 bit shifts across the whole array for (i = 0; i < (int)shifts; i++) { for (j = 0; j < (int)BIT_CHAR(ba->numBits - 1); j++) { ba->code[j] <<= 1; // handle shifts across byte bounds if (ba->code[j + 1] & MS_BIT) { ba->code[j] |= 0x01; } } ba->code[BIT_CHAR(ba->numBits - 1)] <<= 1; } } __device__ void BitArrayShiftRight(huffman_code *ba, unsigned int shifts) { int i, j; unsigned char mask; int chars = shifts / CUDPP_CHAR_BIT; // number of whole byte shifts shifts = shifts % CUDPP_CHAR_BIT; // number of bit shifts remaining if (shifts >= ba->numBits) { // all bits have been shifted off memset((void *)(ba->code), 0, BITS_TO_CHARS(ba->numBits)); return; } // first handle big jumps of bytes if (chars > 0) { for (i = BIT_CHAR(ba->numBits - 1); (i - chars) >= 0; i--) { ba->code[i] = ba->code[i - chars]; } // now zero out new bytes on the right for (; chars > 0; chars--) { ba->code[chars - 1] = 0; } } // now we have at most CUDPP_CHAR_BIT - 1 bit shifts across the whole array for (i = 0; i < (int)shifts; i++) { for (j = BIT_CHAR(ba->numBits - 1); j > 0; j--) { ba->code[j] >>= 1; // handle shifts across byte bounds if (ba->code[j - 1] & 0x01) { ba->code[j] |= MS_BIT; } } ba->code[0] >>= 1; } /*********************************************************************** * zero any spare bits that are beyond the end of the bit array so * increment and decrement are consistent. ***********************************************************************/ i = ba->numBits % CUDPP_CHAR_BIT; if (i != 0) { mask = UCHAR_MAX << (CUDPP_CHAR_BIT - i); ba->code[BIT_CHAR(ba->numBits - 1)] &= mask; } } __device__ int FindMinimumCount(my_huffman_node_t* ht, int elements) { int currentIndex = HUFF_NONE; // index with lowest count seen so far int currentCount = INT_MAX; // lowest count seen so far int currentLevel = INT_MAX; // level of lowest count seen so far // sequentially search array for (int i = 0; i < elements; i++) { // check for lowest count (or equally as low, but not as deep) if( (!ht[i].ignore) && (ht[i].count < currentCount || (ht[i].count == currentCount && ht[i].level < currentLevel)) ) { currentIndex = i; currentCount = ht[i].count; currentLevel = ht[i].level; } } return currentIndex; } /** @} */ // end compress functions /** @} */ // end cudpp_cta
the_stack
* \file * cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within global memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/device_histogram_dispatch.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within global memory. ![](histogram_logo.png) * \ingroup DeviceModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a> * counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>). * * \par Usage Considerations * \cdp_class{DeviceHistogram} * * \par Performance * * \image html histo_perf.png * */ struct DeviceHistogram { /******************************************************************//** * \name Single-channel samples *********************************************************************/ //@{ /** * \brief Computes a device-wide histogram using fast block-wide sorting. * * \par * - The total number of samples across all channels (\p num_samples) must be a whole multiple of \p CHANNELS. * - Delivers consistent throughput regardless of sample diversity * - Histograms having a large number of bins (e.g., thousands) may adversely affect shared memory occupancy and performance (or even the ability to launch). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of a 8-bin histogram of * single-channel <tt>unsigned char</tt> samples. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histogram * int num_samples; // e.g., 12 * unsigned char *d_samples; // e.g., [2, 6, 7, 5, 3, 0, 2, 1, 7, 0, 6, 2] * unsigned int *d_histogram; // e.g., [ , , , , , , , ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::SingleChannelSorting<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histogram * cub::DeviceHistogram::SingleChannelSorting<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [2, 1, 3, 1, 0, 1, 2, 2] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t SingleChannelSorting( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Input samples HistoCounter* d_histogram, ///< [out] Array of BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Number of samples to process cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_SORT, BINS, 1, 1, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, &d_histogram, num_samples, stream, debug_synchronous); } /** * \brief Computes a device-wide histogram using shared-memory atomic read-modify-write operations. * * \par * - Input samples having lower diversity can cause performance to be degraded due to serializations from bin-collisions. * - Histograms having a large number of bins (e.g., thousands) may adversely affect shared memory occupancy and performance (or even the ability to launch). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of a 8-bin histogram of * single-channel <tt>unsigned char</tt> samples. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histogram * int num_samples; // e.g., 12 * unsigned char *d_samples; // e.g., [2, 6, 7, 5, 3, 0, 2, 1, 7, 0, 6, 2] * unsigned int *d_histogram; // e.g., [ , , , , , , , ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::SingleChannelSorting<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histogram * cub::DeviceHistogram::SingleChannelSharedAtomic<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [2, 1, 3, 1, 0, 1, 2, 2] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t SingleChannelSharedAtomic( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Input samples HistoCounter* d_histogram, ///< [out] Array of BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Number of samples to process cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_SHARED_ATOMIC, BINS, 1, 1, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, &d_histogram, num_samples, stream, debug_synchronous); } /** * \brief Computes a device-wide histogram using global-memory atomic read-modify-write operations. * * \par * - Input samples having lower diversity can cause performance to be degraded due to serializations from bin-collisions. * - Performance is not significantly impacted when computing histograms having large numbers of bins (e.g., thousands). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of a 8-bin histogram of * single-channel <tt>unsigned char</tt> samples. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histogram * int num_samples; // e.g., 12 * unsigned char *d_samples; // e.g., [2, 6, 7, 5, 3, 0, 2, 1, 7, 0, 6, 2] * unsigned int *d_histogram; // e.g., [ , , , , , , , ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::SingleChannelSorting<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histogram * cub::DeviceHistogram::SingleChannelGlobalAtomic<8>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histogram, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [2, 1, 3, 1, 0, 1, 2, 2] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t SingleChannelGlobalAtomic( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Input samples HistoCounter* d_histogram, ///< [out] Array of BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Number of samples to process cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_GLOBAL_ATOMIC, BINS, 1, 1, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, &d_histogram, num_samples, stream, debug_synchronous); } //@} end member group /******************************************************************//** * \name Interleaved multi-channel samples *********************************************************************/ //@{ /** * \brief Computes a device-wide histogram from multi-channel data using fast block-sorting. * * \par * - The total number of samples across all channels (\p num_samples) must be a whole multiple of \p CHANNELS. * - Delivers consistent throughput regardless of sample diversity * - Histograms having a large number of bins (e.g., thousands) may adversely affect shared memory occupancy and performance (or even the ability to launch). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of three 256-bin histograms from * an input sequence of quad-channel (interleaved) <tt>unsigned char</tt> samples. * (E.g., RGB histograms from RGBA pixel samples.) * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histograms * int num_samples; // e.g., 20 (five pixels with four channels each) * unsigned char *d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), * // (0, 6, 7, 5), (3, 0, 2, 6)] * unsigned int *d_histogram[3]; // e.g., [ [ , , , , , , , ]; * // [ , , , , , , , ]; * // [ , , , , , , , ] ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiChannelSorting<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiChannelSorting<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1]; * // [0, 3, 0, 0, 0, 0, 2, 0]; * // [0, 0, 2, 0, 0, 0, 1, 2] ] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, int CHANNELS, int ACTIVE_CHANNELS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t MultiChannelSorting( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32b pixels where each pixel consists of four RGBA 8b samples). HistoCounter *d_histograms[ACTIVE_CHANNELS], ///< [out] Array of active channel histogram pointers, each pointing to an output array having BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Total number of samples to process in all channels, including non-active channels cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_SORT, BINS, CHANNELS, ACTIVE_CHANNELS, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, d_histograms, num_samples, stream, debug_synchronous); } /** * \brief Computes a device-wide histogram from multi-channel data using shared-memory atomic read-modify-write operations. * * \par * - The total number of samples across all channels (\p num_samples) must be a whole multiple of \p CHANNELS. * - Input samples having lower diversity can cause performance to be degraded due to serializations from bin-collisions. * - Histograms having a large number of bins (e.g., thousands) may adversely affect shared memory occupancy and performance (or even the ability to launch). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of three 256-bin histograms from * an input sequence of quad-channel (interleaved) <tt>unsigned char</tt> samples. * (E.g., RGB histograms from RGBA pixel samples.) * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histograms * int num_samples; // e.g., 20 (five pixels with four channels each) * unsigned char *d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), * // (0, 6, 7, 5), (3, 0, 2, 6)] * unsigned int *d_histogram[3]; // e.g., [ [ , , , , , , , ]; * // [ , , , , , , , ]; * // [ , , , , , , , ] ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiChannelSharedAtomic<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiChannelSharedAtomic<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1]; * // [0, 3, 0, 0, 0, 0, 2, 0]; * // [0, 0, 2, 0, 0, 0, 1, 2] ] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, int CHANNELS, int ACTIVE_CHANNELS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t MultiChannelSharedAtomic( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32b pixels where each pixel consists of four RGBA 8b samples). HistoCounter *d_histograms[ACTIVE_CHANNELS], ///< [out] Array of active channel histogram pointers, each pointing to an output array having BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Total number of samples to process in all channels, including non-active channels cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_SHARED_ATOMIC, BINS, CHANNELS, ACTIVE_CHANNELS, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, d_histograms, num_samples, stream, debug_synchronous); } /** * \brief Computes a device-wide histogram from multi-channel data using global-memory atomic read-modify-write operations. * * \par * - The total number of samples across all channels (\p num_samples) must be a whole multiple of \p CHANNELS. * - Input samples having lower diversity can cause performance to be degraded due to serializations from bin-collisions. * - Performance is not significantly impacted when computing histograms having large numbers of bins (e.g., thousands). * - Performance is often improved when referencing input samples through a texture-caching iterator (e.g., cub::TexObjInputIterator). * - \devicestorage * - \cdp * * \par Snippet * The code snippet below illustrates the computation of three 256-bin histograms from * an input sequence of quad-channel (interleaved) <tt>unsigned char</tt> samples. * (E.g., RGB histograms from RGBA pixel samples.) * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device pointers for input and histograms * int num_samples; // e.g., 20 (five pixels with four channels each) * unsigned char *d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), * // (0, 6, 7, 5), (3, 0, 2, 6)] * unsigned int *d_histogram[3]; // e.g., [ [ , , , , , , , ]; * // [ , , , , , , , ]; * // [ , , , , , , , ] ] * ... * * // Wrap d_samples device pointer in a random-access texture iterator * cub::TexObjInputIterator<unsigned char> d_samples_tex_itr; * d_samples_tex_itr.BindTexture(d_samples, num_samples * sizeof(unsigned char)); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiChannelGlobalAtomic<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiChannelGlobalAtomic<8, 4, 3>(d_temp_storage, temp_storage_bytes, d_samples_tex_itr, d_histograms, num_samples); * * // Unbind texture iterator * d_samples_tex_itr.UnbindTexture(); * * // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1]; * // [0, 3, 0, 0, 0, 0, 2, 0]; * // [0, 0, 2, 0, 0, 0, 1, 2] ] * * \endcode * * \tparam BINS Number of histogram bins per channel * \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam InputIterator <b>[inferred]</b> Random-access input iterator type for reading input samples. (Must have an InputIterator::value_type that, when cast as an integer, falls in the range [0..BINS-1]) \iterator * \tparam HistoCounter <b>[inferred]</b> Integer type for counting sample occurrences per histogram bin */ template < int BINS, int CHANNELS, int ACTIVE_CHANNELS, typename InputIterator, typename HistoCounter> CUB_RUNTIME_FUNCTION static cudaError_t MultiChannelGlobalAtomic( void *d_temp_storage, ///< [in] %Device allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIterator d_samples, ///< [in] Pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32b pixels where each pixel consists of four RGBA 8b samples). HistoCounter *d_histograms[ACTIVE_CHANNELS], ///< [out] Array of active channel histogram pointers, each pointing to an output array having BINS counters of integral type \p HistoCounter. int num_samples, ///< [in] Total number of samples to process in all channels, including non-active channels cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int Offset; // Dispatch type typedef DeviceHistogramDispatch< DEVICE_HISTO_GLOBAL_ATOMIC, BINS, CHANNELS, ACTIVE_CHANNELS, InputIterator, HistoCounter, Offset> DeviceHistogramDispatch; return DeviceHistogramDispatch::Dispatch( d_temp_storage, temp_storage_bytes, d_samples, d_histograms, num_samples, stream, debug_synchronous); } //@} end member group }; /** * \example example_device_histogram.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
cuda_determinant::cuda_determinant() : N(0), A(NULL), Ainv(NULL), Ainv_delta(NULL), Ainv_colk(0), new_row(NULL), delta(0) { }; cuda_determinant::cuda_determinant(int n) { resize(N); } void cuda_determinant::resize(int n) { N = n; cudaMalloc((void**)&A , N*N*sizeof(float)); cudaMalloc((void**)&Ainv , N*N*sizeof(float)); cudaMalloc((void**)&Ainv_delta, 1*N*sizeof(float)); cudaMalloc((void**)&Ainv_colk , 1*N*sizeof(float)); cudaMalloc((void**)&new_row , 1*N*sizeof(float)); cudaMalloc((void**)&delta , 1*N*sizeof(float)); } void cuda_walker::resize(int nup, int ndown) { N[0] = nup; N[1] = ndown; dets[0].resize(N[0]); dets[1].resize(N[1]); } cuda_population::cuda_population() : MaxPop(1000) { A_vec.resize(MaxPop); Ainv_vec.resize(MaxPop); delta_vec.resize(MaxPop); Ainv_delta_vec.resize(MaxPop); Ainv_colk_vec.resize(MaxPop); ratio_vec.resize(MaxPop); pos_vec.resize(3*MaxPop); cudaMalloc((void**) &A_list_d, MaxPop*sizeof(float*)); cudaMalloc((void**) &Ainv_list_d, MaxPop*sizeof(float*)); cudaMalloc((void**) &Ainv_delta_list_d, MaxPop*sizeof(float*)); cudaMalloc((void**) &Ainv_colk_list_d, MaxPop*sizeof(float*)); cudaMalloc((void**) &delta_list_d, MaxPop*sizeof(float*)); cudaMalloc((void**) &ratios_d, MaxPop*sizeof(float)); cudaMalloc((void**) &pos_d, 4*MaxPop*sizeof(float)); } __global__ static void update_inverse_cuda1 (float *A_g[], float *Ainv_g[], float *u_g[], float *Ainv_delta_g[], float *Ainv_colk_g[], int N, int rowstride, int k); __global__ static void update_inverse_cuda2 (float *Ainv_g[], float *u_g[], float *Ainv_delta_g[], float *Ainv_colk_g[], int N, int rowstride, int k); void cuda_population::calc_new_row(int elec) { int detnum = (elec < num_elecs[0]) ? 0 : 1; int N = num_elecs[detnum]; for (int wi=0; wi<walkers.size(); wi++) { cuda_walker &w = walkers[wi]; cuda_determinant &det = w.dets[detnum]; pos_vec[4*wi+0] = w.R[3*elec+0]; pos_vec[4*wi+1] = w.R[3*elec+1]; pos_vec[4*wi+2] = w.R[3*elec+2]; delta_vec[wi] = det.delta; } cudaMemcpy(pos_d, &(pos_vec[0]), 4*walkers.size()*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(delta_list_d, &(delta_vec[0]), walkers.size()*sizeof(float*), cudaMemcpyHostToDevice); dim3 dimBlock(SPLINE_BLOCK_SIZE); dim3 dimGrid (N/SPLINE_BLOCK_SIZE, walkers.size()); eval_multi_multi_UBspline_3d_s_cuda<<<dimGrid,dimBlock>>> (pos_d, multi_spline->gridInv, multi_spline->coefs, delta_list_d, multi_spline->stride); } void cuda_population::update_determinants(int elec) { int index=0; int detnum = (elec < num_elecs[0]) ? 0 : 1; int N = num_elecs[detnum]; int row = (elec < num_elecs[0]) ? elec : elec - num_elecs[0]; for (int wi=0; wi<walkers.size(); wi++) { cuda_walker &w = walkers[wi]; cuda_determinant &det = w.dets[detnum]; if (w.accept) { A_vec[index] = det.A; Ainv_vec[index] = det.Ainv; Ainv_delta_vec[index] = det.Ainv_delta; Ainv_colk_vec[index] = det.Ainv_colk; delta_vec[index] = det.delta; index++; } } int num_accept = index; cudaMemcpy (A_list_d, &(A_vec[0]), num_accept*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (Ainv_list_d, &(Ainv_vec[0]), num_accept*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (Ainv_delta_list_d, &(Ainv_delta_vec[0]), num_accept*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (Ainv_colk_list_d, &(Ainv_colk_vec[0]), num_accept*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy (delta_list_d, &(delta_vec[0]), num_accept*sizeof(float*), cudaMemcpyHostToDevice); dim3 dimBlock(DET_BLOCK_SIZE); dim3 dimGrid (N/DET_BLOCK_SIZE, num_accept); update_inverse_cuda1<<<dimGrid,dimBlock>>> (A_list_d, Ainv_list_d, delta_list_d, Ainv_delta_list_d, Ainv_colk_list_d, N, N, row); update_inverse_cuda2<<<dimGrid,dimBlock>>> (Ainv_list_d, delta_list_d, Ainv_delta_list_d, Ainv_colk_list_d, N, N, row); }; #define RATIO_BLOCK_SIZE 128 __global__ void calc_ratios1 (float *Ainv_list[], float *new_row_list[], float *Ainv_tran, float *new_row_tran, int N, int k, int row_stride, int num_mats) { int col = threadIdx.x + blockIdx.x*RATIO_BLOCK_SIZE; if (col < num_mats) { float* Ainv = Ainv_list[col]; float *new_row = new_row_list[col]; for (int row=0; row<N; row++) { // __shared__ new_row_tran_shared[RATIO_BLOCK_SIZE]; // __shared__ Ainv_tran_shared[RATIO_BLOCK_SIZE]; new_row_tran[row_stride*row + col] = new_row[row]; Ainv_tran[row_stride*row+col] = Ainv[row*N + k]; } } } __global__ void calc_ratios (float *Ainv_list[], float *new_row_list[], float *ratio, int N, int row_stride, int elec) { int tid = threadIdx.x; int col = /*blockIdx.x*RATIO_BLOCK_SIZE * */tid; __shared__ float *Ainv, *new_row; if (col < N) { if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_row = new_row_list[blockIdx.x]; } __syncthreads(); __shared__ float new_row_shared[RATIO_BLOCK_SIZE]; new_row_shared[tid] = new_row[tid]; __shared__ float Ainv_colk_shared[RATIO_BLOCK_SIZE]; // This is *highly* uncoallesced, but we just have to eat it to allow // other kernels to operate quickly. Ainv_colk_shared[tid] = Ainv[col*row_stride + elec]; __syncthreads(); __shared__ float Ainv_new_row[RATIO_BLOCK_SIZE]; Ainv_new_row[tid] = Ainv_colk_shared[tid] * new_row_shared[tid]; __syncthreads(); // Now, we have to dot for (unsigned int s=RATIO_BLOCK_SIZE/2; s>0; s>>=1) { if (tid < s) Ainv_new_row[tid] += Ainv_new_row[tid + s]; __syncthreads(); } if (tid == 0) ratio[blockIdx.x] = Ainv_new_row[0]; } } __global__ void calc_ratios2 (float *Ainv_list[], float *new_row_list[], float *ratio, int N, int row_stride, int elec) { int tid = threadIdx.x; __shared__ float *Ainv, *new_row; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_row = new_row_list[blockIdx.x]; } __syncthreads(); int numBlocks = N/RATIO_BLOCK_SIZE; float sum=0.0; for (int block=0; block<numBlocks; block++) { int row = block*RATIO_BLOCK_SIZE + tid; __shared__ float new_row_shared[RATIO_BLOCK_SIZE]; new_row_shared[tid] = new_row[block*RATIO_BLOCK_SIZE+tid]; __syncthreads(); for (int i=0; i<RATIO_BLOCK_SIZE; i++) if (tid==0) sum += Ainv[row*row_stride + elec] * new_row_shared[i]; } if (tid==0) ratio[blockIdx.x] = sum; } extern "C" void dgetrf_(int *m, int *n, double A[], int *lda, int ipiv[], int *info); double Determinant (double *A, int N) { double LU[N*N]; int ipiv[N]; int info; for (int i=0; i<N*N; i++) LU[i] = A[i]; // Do LU factorization dgetrf_ (&N, &N, LU, &N, ipiv, &info); double det = 1.0; int numPerm = 0; for (int i=0; i<N; i++) { det *= LU[i*N+i]; numPerm += (ipiv[i] != (i+1)); } if (numPerm & 1) det *= -1.0; return det; } template<typename T> void GJInverse (T *A, int n) { const int maxSize = 2000; if (n == 2) { // Special case for 2x2 T a=A[0]; T b=A[1]; T c=A[2]; T d=A[3]; T detInv = 1.0/(a*d-b*c); A[0] = d*detInv; A[1] = -b*detInv; A[2] = -c*detInv; A[3] = a*detInv; return; } int colIndex[maxSize], rowIndex[maxSize], ipiv[maxSize]; T big, pivInv; int icol, irow; for (int j=0; j<n; j++) ipiv[j] = -1; for (int i=0; i<n; i++) { big = 0.0; for (int j=0; j<n; j++) if (ipiv[j] != 0) for (int k=0; k<n; k++) { if (ipiv[k] == -1) { if (fabs(A[n*j+k]) >= big) { big = fabs(A[n*j+k]); irow = j; icol = k; } } else if (ipiv[k] > 0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } } ++(ipiv[icol]); if (irow != icol) for (int l=0; l<n; l++) { T tmp = A[n*irow+l]; A[n*irow+l] = A[n*icol+l]; A[n*icol+l] = tmp; // swap (A[n*irow+l], A[n*icol+l]); } rowIndex[i] = irow; colIndex[i] = icol; if (A[n*icol+icol] == 0.0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } pivInv = 1.0/A[n*icol+icol]; A[n*icol+icol] = 1.0; for (int l=0; l<n; l++) A[n*icol+l] *= pivInv; for (int ll=0; ll<n; ll++) if (ll != icol) { T dum = A[n*ll+icol]; A[n*ll+icol] = 0.0; for (int l=0; l<n; l++) A[n*ll+l] -= A[n*icol+l]*dum; } } // Now unscramble the permutations for (int l=n-1; l>=0; l--) { if (rowIndex[l] != colIndex[l]) for (int k=0; k<n ; k++) { T tmp = A[n*k+rowIndex[l]]; A[n*k+rowIndex[l]] = A[n*k+colIndex[l]]; A[n*k+colIndex[l]] = tmp; // swap (A(k,rowIndex[l]),A(k, colIndex[l])); } } } void test_ratio() { //const int N = RATIO_BLOCK_SIZE; const int N = 128; const int numWalkers = 1024; const int elec = 15; float **AinvList, **uList; float **AinvList_d, **uList_d, *ratio_d; AinvList = (float**) malloc(numWalkers*sizeof(float*)); uList = (float**) malloc(numWalkers*sizeof(float*)); for (int i=0; i<numWalkers; i++) { cudaMalloc((void**)&(AinvList[i]), N*N*sizeof(float)); cudaMalloc((void**)&(uList[i]), N*sizeof(float)); } fprintf (stderr, "N = %d\n", N); cudaMalloc((void**)&(AinvList_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(uList_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&ratio_d, numWalkers*sizeof(float)); cudaMemcpy (AinvList_d, AinvList, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy ( uList_d, uList, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); dim3 dimBlock(RATIO_BLOCK_SIZE); dim3 dimGrid(numWalkers); double *A = (double*)malloc(N*N*sizeof(double)); float *Ainv = (float*) malloc(N*N*sizeof(float)); float *u = (float*) malloc(N*sizeof(float)); for (int i=0; i<N; i++) { u[i] = drand48(); for (int j=0; j<N; j++) A[i*N+j] = Ainv[i*N+j] = (float)drand48(); } GJInverse(Ainv, N); double det1 = Determinant (A, N); for (int i=0; i<N; i++) A[elec*N+i] = u[i]; double det2 = Determinant (A, N); fprintf (stderr, "Host ratio = %1.8f\n", det2/det1); for (int wi=0; wi<numWalkers; wi++) { cudaMemcpy (AinvList[wi], Ainv, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy (uList[wi], u, 1*N*sizeof(float), cudaMemcpyHostToDevice); } clock_t start = clock(); for (int i=0; i<10*N; i++) calc_ratios<<<dimGrid,dimBlock>>> (AinvList_d, uList_d, ratio_d, N, N, elec); clock_t end = clock(); float ratio; cudaMemcpy (&ratio, &(ratio_d[331]), sizeof(float), cudaMemcpyDeviceToHost); fprintf (stderr, "Device ratio = %1.8f\n", ratio); double time = (double)(end-start)/(double)CLOCKS_PER_SEC; double rate = 10.0/time; fprintf (stderr, "Rate = %1.3f generations per second.\n", rate); } void test_ratio1() { const int N = 128; const int numWalkers = 1024; float **AinvList, **uList; float **AinvList_d, **uList_d, *ratio_d; float *Ainv_tran, *ratio_tran; AinvList = (float**) malloc(numWalkers*sizeof(float*)); uList = (float**) malloc(numWalkers*sizeof(float*)); cudaMalloc ((void**) &Ainv_tran, N*numWalkers); cudaMalloc ((void**) &ratio_tran, N*numWalkers); for (int i=0; i<numWalkers; i++) { cudaMalloc((void**)&(AinvList[i]), N*N*sizeof(float)); cudaMalloc((void**)&(uList[i]), N*sizeof(float)); } fprintf (stderr, "N = %d\n", N); cudaMalloc((void**)&(AinvList_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(uList_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&ratio_d, numWalkers*sizeof(float)); cudaMemcpy (AinvList_d, AinvList, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy ( uList_d, uList, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); dim3 dimBlock(RATIO_BLOCK_SIZE); dim3 dimGrid(numWalkers/RATIO_BLOCK_SIZE); clock_t start = clock(); for (int i=0; i<10*N; i++) calc_ratios1<<<dimGrid,dimBlock>>> (AinvList_d, uList_d, Ainv_tran, ratio_tran, N, 1, numWalkers, numWalkers); clock_t end = clock(); double time = (double)(end-start)/(double)CLOCKS_PER_SEC; double rate = 10.0/time; fprintf (stderr, "Rate = %1.3f generations per second.\n", rate); } main() { test_ratio(); }
the_stack
#include "sample_kmers.h" #include "utils.h" #include <nvbio/basic/pipeline_context.h> #include <nvbio/basic/numbers.h> #include <nvbio/basic/bloom_filter.h> #include <nvbio/basic/primitives.h> #include <nvbio/basic/console.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/threads.h> #include <nvbio/basic/system.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/strings/prefetcher.h> #include <stdio.h> #include <stdlib.h> using namespace nvbio; /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename filter_type> struct SampleKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE SampleKmersFunctor( const uint32 _k, const float _alpha, const string_set_type _string_set, filter_type _filter) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), alpha( _alpha ), string_set( _string_set ), filter(_filter) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef typename string_traits<string_type>::forward_iterator forward_iterator; // fetch the i-th string const string_type string = string_set[i]; const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; // initialie a random number generator LCG_random random( hash(i) ); for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (float( random.next() ) / float(LCG_random::MAX) < alpha) { // insert the kmer filter.insert( kmer ); } } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all k-mers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } } const uint32 k; const uint64 kmask; const float alpha; string_set_type string_set; mutable filter_type filter; }; // process the next batch // bool SampleKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // declare the Bloom filter type typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> filter_type; typedef SampleKmersFunctor<string_set_type,filter_type> functor_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // set the device cudaSetDevice( device ); // copy it to the device nvbio::io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const nvbio::io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); //filter_type filter( filter_size, filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, d_read_view.sequence_string_set(), filter ); device_for_each( d_read_view.size(), kmer_filter ); cudaDeviceSynchronize(); cuda::check_error("sample-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // build a view const io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter filter_type filter( SAMPLED_KMERS_FILTER_K, filter_size, (uint64_2*)filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, alpha, h_read_view.sequence_string_set(), filter ); host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[SampleKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[SampleKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[SampleKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[SampleKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; } /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename sampled_filter_type, typename trusted_filter_type, typename threshold_type> struct TrustedKmersFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE TrustedKmersFunctor( const uint32 _k, const string_set_type _string_set, const sampled_filter_type _sampled_filter, trusted_filter_type _trusted_filter, const threshold_type _threshold) : k(_k), kmask( (uint64(1u) << (k*2))-1u ), string_set( _string_set ), sampled_filter(_sampled_filter), trusted_filter(_trusted_filter), threshold(_threshold) {} /// functor operator /// ///\param i input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 i) const { typedef typename string_set_type::string_type string_type; typedef nvbio::StringPrefetcher< string_type, nvbio::lmem_cache_tag<MAX_READ_LENGTH> > string_prefetcher_type; typedef typename string_prefetcher_type::string_type local_string_type; typedef typename nvbio::string_traits<local_string_type>::forward_iterator forward_iterator; //bool occur[MAX_READ_LENGTH]; uint32 occur_storage[MAX_READ_LENGTH/32]; nvbio::PackedStream<uint32*,uint8,1u,false> occur( occur_storage ); // instantiate a prefetcher string_prefetcher_type string_prefetcher; // fetch the i-th string //const string_type string = string_set[i]; const local_string_type string = string_prefetcher.load( string_set[i] ); const uint32 len = length( string ); if (len < k) return; // build a forward string iterator forward_iterator it( string.begin() ); // start with an empty kmer uint64 kmer = 0u; uint32 kmer_len = 0u; const uint32 occur_cnt = len - k + 1; // initialize all to false for (uint32 j = 0; j < (occur_cnt+31)/32; ++j) occur_storage[j] = 0u; // mark occurring kmers for (uint32 j = 0; j < len; ++j) { // fetch the next character const uint8 c = *it; ++it; if (c < 4) // make sure this is not an N { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (kmer_len < k) kmer_len++; if (kmer_len >= k) // check whether we have an actual 'k'-mer { if (sampled_filter[ kmer ]) occur[j - k + 1] = true; } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; } else { // an N, skip all kmers containing it it += k-1; j += k-1; // and reset the kmer kmer = 0u; kmer_len = 0u; } } // mark trusted kmers int32 zero_cnt = 0; int32 one_cnt = 0; // reset the forward iterator it = forward_iterator( string.begin() ); // start with an empty kmer kmer = 0u; kmer_len = 0u; // keep a k-bits mask of trusted positions const uint64 trusted_mask = (uint64(1u) << k) - 1u; uint64 trusted = 0u; for (uint32 j = 0; j < len; ++j) { if (j >= k) { if (occur[j - k]) --one_cnt; else --zero_cnt; } if (j < occur_cnt) { if (occur[j]) ++one_cnt; else ++zero_cnt; } const int32 sum = one_cnt + zero_cnt; //if (qual[j] <= bad_quality) //{ // trusted[j] = false; // continue ; //} trusted |= (one_cnt > threshold[sum]) ? 1u : 0u; // fetch the next character const uint8 c = *it; ++it; if (c < 4) // if an N, skip it (the kmers containing it will be marked as untrusted and skipped as well) { kmer |= c; // insert the new character at the end of the kmer (in a big-endian encoding) if (popc( trusted ) == k) // check whether we have an actual 'k'-mer - i.e. k trusted positions in a row trusted_filter.insert( kmer ); } // shift the kmer to the right, dropping the last symbol kmer <<= 2; kmer &= kmask; // shift the trusted bits by one to the right, dropping the last symbol trusted <<= 1; trusted &= trusted_mask; } } const uint32 k; const uint64 kmask; string_set_type string_set; const sampled_filter_type sampled_filter; mutable trusted_filter_type trusted_filter; const threshold_type threshold; }; // process the next batch // bool TrustedKmersStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataAccess<DNA_N>::sequence_string_set_type string_set_type; // fetch the input nvbio::io::SequenceDataHost* h_read_data = context.input<nvbio::io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // // Device (GPU) path // // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, nvbio::cuda::ldg_pointer<uint4> > sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type, cuda::ldg_pointer<uint32> > functor_type; // set the device cudaSetDevice( device ); // copy it to the device io::SequenceDataDevice d_read_data( *h_read_data ); // build a view const io::SequenceDataAccess<DNA_N> d_read_view( d_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint4*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const functor_type kmer_filter( k, d_read_view.sequence_string_set(), sampled_filter, trusted_filter, cuda::make_ldg_pointer(threshold) ); // and apply the functor to all reads in the batch device_for_each( d_read_view.size(), kmer_filter ); cudaDeviceSynchronize(); cuda::check_error("mark-trusted-kmers"); } else { // // Host (CPU) path // omp_set_num_threads( -device ); // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, const uint64_2*> sampled_filter_type; typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, uint64_2*> trusted_filter_type; typedef TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> functor_type; // build a view const nvbio::io::SequenceDataAccess<DNA_N> h_read_view( *h_read_data ); // build the Bloom filter sampled_filter_type sampled_filter( SAMPLED_KMERS_FILTER_K, sampled_filter_size, (const uint64_2*)sampled_filter_storage ); trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (uint64_2*)trusted_filter_storage ); // build the kmer sampling functor const TrustedKmersFunctor<string_set_type,sampled_filter_type,trusted_filter_type,const uint32*> kmer_filter( k, h_read_view.sequence_string_set(), sampled_filter, trusted_filter, threshold ); // and apply the functor to all reads in the batch host_for_each( h_read_view.size(), kmer_filter ); } } catch (nvbio::cuda_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error &e) { log_error(stderr, "[TrustedKmersStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc &e) { log_error(stderr, "[TrustedKmersStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error &e) { log_error(stderr, "[TrustedKmersStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[TrustedKmersStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr," peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; }
the_stack
namespace AggMIS { namespace GraphHelpers { namespace Kernels { __global__ void mapAdjacencyToBlockKernel(int size, int *adjIndexes, int *adjacency, int *adjacencyBlockLabel, int *blockMappedAdjacency, int *fineAggregate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int begin = adjIndexes[idx]; int end = adjIndexes[idx + 1]; int thisBlock = fineAggregate[idx]; // Fill block labeled adjacency and block mapped adjacency vectors for (int i = begin; i < end; i++) { int neighbor = fineAggregate[adjacency[i]]; if (thisBlock == neighbor) { adjacencyBlockLabel[i] = -1; blockMappedAdjacency[i] = -1; } else { adjacencyBlockLabel[i] = thisBlock; blockMappedAdjacency[i] = neighbor; } } } } __global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x + 1; if (idx < size) { int value = array[idx]; int nextValue = array[idx + 1]; if (value != nextValue) partIndices[value + 1] = idx; } } __global__ void findPartIndicesKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int value = array[idx]; int nextValue = (idx != size - 1) ? array[idx + 1] : -1; if (value != nextValue) { partIndices[value + 1] = idx + 1; } } } __global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { output[idx] = adjIndexes[idx + 1] - adjIndexes[idx]; } } __global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == size - 1) accumulatedSize[part[idx]] = weights[idx]; if (idx < size - 1) { int thisPart = part[idx]; if (thisPart != part[idx + 1]) accumulatedSize[thisPart] = weights[idx]; } } __global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx == 0) sizes[idx] = accumulatedSize[0]; else if (idx < size) { sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1]; } } } AggMIS::Types::Graph_d* GetInducedGraph(AggMIS::Types::Graph_d &graph, AggMIS::Types::IntVector_d &aggregation) { // Get references to parts of input graph and output graph AggMIS::Types::IntVector_d &adjIndexesIn = *(graph.indices); AggMIS::Types::IntVector_d &adjacencyIn = *(graph.adjacency); AggMIS::Types::Graph_d *result = new AggMIS::Types::Graph_d(); AggMIS::Types::IntVector_d &adjIndexesOut = *(result->indices); AggMIS::Types::IntVector_d &adjacencyOut = *(result->adjacency); // Declaring temporary vectors: AggMIS::Types::IntVector_d adjacencyBlockLabel, blockMappedAdjacency; adjacencyBlockLabel.resize(adjacencyIn.size(), 0); blockMappedAdjacency.resize(adjacencyIn.size(), 0); // Get the blocklabeled adjacency: mapAdjacencyToBlock(adjIndexesIn, adjacencyIn, adjacencyBlockLabel, blockMappedAdjacency, aggregation); // Zip up the block label and block mapped vectors and sort: thrust::sort(thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())), thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))); // Remove Duplicates and resize: int newSize = thrust::unique( thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())), thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))) - thrust::make_zip_iterator(thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())); adjacencyBlockLabel.resize(newSize); blockMappedAdjacency.resize(newSize); getPartIndicesNegStart(adjacencyBlockLabel, adjIndexesOut); adjacencyOut.resize(blockMappedAdjacency.size() - 1); thrust::copy(blockMappedAdjacency.begin() + 1, blockMappedAdjacency.end(), adjacencyOut.begin()); return result; } AggMIS::Types::Graph_h* GetInducedGraph(AggMIS::Types::Graph_h& graph, AggMIS::Types::IntVector_h& aggregation) { // Get references to graph indices and adjacency AggMIS::Types::IntVector_h &ind = *(graph.indices); AggMIS::Types::IntVector_h &adj = *(graph.adjacency); // A temporary container for the induced graph std::vector<std::vector<int> > tempGraph; // Filling in the temporary container for (int node = 0; node < graph.Size(); node++) { int startAgg = aggregation[node]; for (int nIt = ind[node]; nIt < ind[node + 1]; nIt++) { int endAgg = aggregation[adj[nIt]]; // If this is an edge between two aggregates add to // the induced graph. if (startAgg != endAgg && startAgg < endAgg) { // Making sure that there are entries in temp if (endAgg >= tempGraph.size()) tempGraph.resize(endAgg + 1); // Adding edge entries if (tempGraph[startAgg].size() == 0 || !(std::binary_search(tempGraph[startAgg].begin(), tempGraph[startAgg].end(), endAgg))) { tempGraph[startAgg].push_back(endAgg); std::sort(tempGraph[startAgg].begin(), tempGraph[startAgg].end()); } if (tempGraph[endAgg].size() == 0 || !(std::binary_search(tempGraph[endAgg].begin(), tempGraph[endAgg].end(), startAgg))) { tempGraph[endAgg].push_back(startAgg); std::sort(tempGraph[endAgg].begin(), tempGraph[endAgg].end()); } } } } // Copying out to graph format AggMIS::Types::Graph_h *result = new AggMIS::Types::Graph_h(); AggMIS::Types::IntVector_h &indOut = *(result->indices); AggMIS::Types::IntVector_h &adjOut = *(result->adjacency); // Getting new indices indOut.resize(tempGraph.size() + 1); indOut[0] = 0; for (int i = 0; i < tempGraph.size(); i++) indOut[i + 1] = indOut[i] + tempGraph[i].size(); // Writing out adjacency adjOut.resize(indOut.back()); int insertAt = 0; for (int i = 0; i < tempGraph.size(); i++) { for (int j = 0; j < tempGraph[i].size(); j++) { adjOut[insertAt++] = tempGraph[i][j]; } } return result; } void mapAdjacencyToBlock(AggMIS::Types::IntVector_d &adjIndexes, AggMIS::Types::IntVector_d &adjacency, AggMIS::Types::IntVector_d &adjacencyBlockLabel, AggMIS::Types::IntVector_d &blockMappedAdjacency, AggMIS::Types::IntVector_d &fineAggregate) { int size = adjIndexes.size() - 1; // Get pointers to device memory of arrays int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]); int *adjacencyBlockLabel_d = thrust::raw_pointer_cast(&adjacencyBlockLabel[0]); int *blockMappedAdjacency_d = thrust::raw_pointer_cast(&blockMappedAdjacency[0]); int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: Kernels::mapAdjacencyToBlockKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, adjacencyBlockLabel_d, blockMappedAdjacency_d, fineAggregate_d); } void getPartIndicesNegStart(AggMIS::Types::IntVector_d& sortedPartition, AggMIS::Types::IntVector_d& partIndices) { // Sizing the array: int maxPart = max(0, sortedPartition[sortedPartition.size() - 1]); partIndices.resize(maxPart + 2, 0); // Figuring out block sizes for kernel call: int size = sortedPartition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size%blockSize == 0 ? 0 : 1); // Getting pointers int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: Kernels::findPartIndicesNegStartKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d); partIndices[partIndices.size() - 1] = size - 1; } AggMIS::Types::IntVector_d* GetIndicesVector(int size) { thrust::counting_iterator<int> start(0); thrust::counting_iterator<int> end = start + size; return new AggMIS::Types::IntVector_d(start, end); } void SetToIndicesVector(int size, AggMIS::Types::IntVector_d& toSet) { thrust::counting_iterator<int> start(0); thrust::counting_iterator<int> end = start + size; toSet.assign(start, end); } void getPartSizes(AggMIS::Types::IntVector_d &aggregation, AggMIS::Types::IntVector_d &sizes) { // Make a copy of the partition vector to mess with: AggMIS::Types::IntVector_d temp(aggregation); // Sort the copy and find largest element thrust::sort(temp.begin(), temp.end()); int maxPart = temp[temp.size() - 1]; // Creating a new array size AggMIS::Types::IntVector_d partIndices(maxPart + 2, 0); // Figuring out block sizes for kernel call: int size = aggregation.size(); int blockSize = 256; int nBlocks = size / blockSize + (size%blockSize == 0 ? 0 : 1); // Getting pointers int *temp_d = thrust::raw_pointer_cast(&temp[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: Kernels::findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d); // Preparing sizes vector size = partIndices.size() - 1; sizes.resize(size); int *sizes_d = thrust::raw_pointer_cast(&sizes[0]); // Calling kernel to find sizes: Kernels::findAdjacencySizesKernel << < nBlocks, blockSize >> > (size, partIndices_d, sizes_d); // Cleaning up temp.resize(0); partIndices.resize(0); } void getPartSizes(AggMIS::Types::IntVector_d& aggregation, AggMIS::Types::IntVector_d& sizes, AggMIS::Types::IntVector_d& weights) { // Make copies to mess with AggMIS::Types::IntVector_d tempAgg(aggregation.begin(), aggregation.end()); AggMIS::Types::IntVector_d tempWeight(weights.begin(), weights.end()); // Sorting temp vectors together thrust::sort_by_key(tempAgg.begin(), tempAgg.end(), tempWeight.begin()); // Getting prefix sum of values thrust::inclusive_scan(tempWeight.begin(), tempWeight.end(), tempWeight.begin()); // Another temp vector for accumulated size at last nodes AggMIS::Types::IntVector_d accumulatedSize(tempAgg[tempAgg.size() - 1] + 1); // Preparing to call kernel to fill accumulated size vector int size = tempAgg.size(); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); Kernels::accumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, AggMIS::Types::StartOf(tempAgg), AggMIS::Types::StartOf(tempWeight), AggMIS::Types::StartOf(accumulatedSize)); // Calling kernel to get the un-accumulated part sizes: size = accumulatedSize.size(); nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); sizes.resize(size); Kernels::unaccumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, AggMIS::Types::StartOf(accumulatedSize), AggMIS::Types::StartOf(sizes)); } AggMIS::Types::IntVector_d* GetValences(AggMIS::Types::Graph_d &graph) { AggMIS::Types::IntVector_d *result = new AggMIS::Types::IntVector_d(graph.indices->size() - 1); thrust::adjacent_difference(graph.indices->begin() + 1, graph.indices->end(), result->begin()); return result; } AggMIS::Types::IntVector_h* GetValences(AggMIS::Types::Graph_h &graph) { AggMIS::Types::IntVector_h *result = new AggMIS::Types::IntVector_h(graph.indices->size() - 1); thrust::adjacent_difference(graph.indices->begin() + 1, graph.indices->end(), result->begin()); return result; } bool IsGraphValid(AggMIS::Types::Graph_d& graph) { // Call the override with the indices and adjacency of the graph return IsGraphValid(*graph.indices, *graph.adjacency); } bool IsGraphValid(AggMIS::Types::Graph_h& graph) { // Call the override with the indices and adjacency of the graph return IsGraphValid(*graph.indices, *graph.adjacency); } bool IsGraphValid(AggMIS::Types::IntVector_d& indices, AggMIS::Types::IntVector_d& adjacency) { // Get temporary host vectors to call with AggMIS::Types::IntVector_h tempInd(indices); AggMIS::Types::IntVector_h tempAdj(adjacency); // Call host vector override bool result = IsGraphValid(tempInd, tempAdj); // Clean up temp arrays tempInd.clear(); tempAdj.clear(); return result; } bool IsGraphValid(AggMIS::Types::IntVector_h& indices, AggMIS::Types::IntVector_h& adjacency) { // Get size of graph int graphSize = indices.size() - 1; // If there are no nodes return with error if (graphSize <= 0) { printf("Graph is empty, no nodes specified!\n"); return false; } // Check that the indices are all in order if (indices[0] != 0) { int first = indices[0]; printf("Indices are not proper, start with %d not 0\n", first); return false; } for (int i = 1; i < indices.size(); i++) { if (indices[i] <= indices[i - 1]) { int a = indices[i - 1]; int b = indices[i]; printf("Non-sequential indices: indices[%d]=%d but indices[%d]=%d\n", i - 1, a, i, b); return false; } } if (indices[indices.size() - 1] > adjacency.size()) { printf("Largest index points outside of adjacency array!\n"); return false; } // Check that adjacency contains only valid node Id's for (int i = 0; i < adjacency.size(); i++) { int nodeId = adjacency[i]; if (nodeId < 0 || nodeId >= graphSize) { printf("adjacency[%d]=%d but graphSize=%d\n", i, nodeId, graphSize); return false; } } // Check that all neighbor lists are mutually consistent int errorCount = 0; for (int i = 0; i < graphSize; i++) { int rootIdx = i; for (int j = indices[i]; j < indices[i + 1]; j++) { int neighborIdx = adjacency[j]; bool found = false; for (int jj = indices[neighborIdx]; jj < indices[neighborIdx + 1]; jj++) { if (adjacency[jj] == rootIdx) found = true; } if (!found) { printf("Node %d has neighbor %d but not reverse!\n", rootIdx, neighborIdx); errorCount++; } } } if (errorCount > 0) { printf("Found %d inconsistencies in adjacency.\n", errorCount); return false; } // If we haven't returned yet things are good. return true; } } }
the_stack
typedef unsigned char uint8_t; typedef long long ll_t; typedef struct __device_builtin__ __builtin_align__(_NCS_) { uint8_t _VARNAMES_; } _uint8n_t; typedef union { _uint8n_t u8n; uint8_t val[_NCS_]; } uint8n_t; __device__ __forceinline__ float atomicMax(float *address, float val) { int ret = __float_as_int(*address); while(val > __int_as_float(ret)) { int old = ret; if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) break; } return __int_as_float(ret); } __device__ void load_precomputed( const float *precomputed, _VOLATILE_ float *sMem, int nQuery ){ const int tid = threadIdx.x; const int qid = blockIdx.x; if (tid < 256){ #pragma unroll for (int i = 0; i < _M_; i++){ #if _TPB_ >= 256 int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); sMem[i * _K_ + tid] = precomputed[adr]; #else #pragma unroll for (int j = 0; j < _K_ / _TPB_; j++){ int adr = (i * nQuery * _K_) + (qid * _K_) + (j * _TPB_ + tid); sMem[i * _K_ + j * _TPB_ + tid] = precomputed[adr]; } #endif } } __syncthreads(); } __device__ __forceinline__ unsigned int bfe( unsigned int source, unsigned int bitIndex ) { unsigned int bit; asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); return bit; } __device__ __forceinline__ void warp_comparator( float &value, float &index, const int stride, const int direction ){ const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); const float otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); bool condition = value < otherValue == direction; index = condition ? otherIndex : index; value = condition ? otherValue : value; } __device__ __forceinline__ void block_comparator( float &value, float &index, const int stride, const int direction, const int laneID, _VOLATILE_ float sMem[] ){ float tempPrecomputed1 = sMem[laneID]; float tempPrecomputed2 = sMem[_TPB_ + laneID]; __syncthreads(); sMem[laneID] = value; sMem[_TPB_ + laneID] = index; __syncthreads(); float otherValue = sMem[laneID ^ stride]; float otherIndex = sMem[_TPB_ + laneID ^ stride]; __syncthreads(); sMem[laneID] = tempPrecomputed1; sMem[_TPB_ + laneID] = tempPrecomputed2; __syncthreads(); bool condition = value < otherValue == direction; value = condition ? otherValue : value; index = condition ? otherIndex : index; /* */ } __device__ __forceinline__ void block_comparator_noop( ){ __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); } __device__ __forceinline__ void thread_comparator( float &value, float &index, float otherValue, float otherIndex, const int direction ){ bool condition = value > otherValue == direction; if (condition){ value = otherValue; index = otherIndex; /* value = value + otherValue; otherValue = value - otherValue; value = value - otherValue; index = index + otherIndex; otherIndex = index - otherIndex; index = index - otherIndex; */ } } __device__ void bitonic_sort_2( float &value, float &index, int laneID ){ warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_4( float &value, float &index, int laneID ){ bitonic_sort_2(value, index, laneID); warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_8( float &value, float &index, int laneID ){ bitonic_sort_4(value, index, laneID); warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_16( float &value, float &index, int laneID ){ bitonic_sort_8(value, index, laneID); warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_32( float &value, float &index, int laneID ){ bitonic_sort_16(value, index, laneID); warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_global_2( float &value, float &index, float otherValue, float otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_4( float &value, float &index, float otherValue, float otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_8( float &value, float &index, float otherValue, float otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_16( float &value, float &index, float otherValue, float otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_32( float &value, float &index, float otherValue, float otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } #if _TPB_ >= 64 __device__ void bitonic_sort_64( float &value, float &index, _VOLATILE_ float sMem[], int laneID ){ bitonic_sort_32(value, index, laneID); block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_64( float &value, float &index, float otherValue, float otherIndex, _VOLATILE_ float sMem[], int laneID ) { if (_TPB_ - 64 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); } } #if _TPB_ >= 128 __device__ void bitonic_sort_128( float &value, float &index, _VOLATILE_ float sMem[], int laneID ){ bitonic_sort_64(value, index, sMem, laneID); block_comparator(value, index, 64, bfe(laneID, 7) ^ bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, bfe(laneID, 7) ^ bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, bfe(laneID, 7) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 7) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 7) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 7) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 7) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_128( float &value, float &index, float otherValue, float otherIndex, _VOLATILE_ float sMem[], int laneID ) { if (_TPB_ - 128 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 256 __device__ void bitonic_sort_256( float &value, float &index, _VOLATILE_ float sMem[], int laneID ){ bitonic_sort_128(value, index, sMem, laneID); block_comparator(value, index, 128, bfe(laneID, 8) ^ bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, bfe(laneID, 8) ^ bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, bfe(laneID, 8) ^ bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, bfe(laneID, 8) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 8) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 8) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 8) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 8) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_256( float &value, float &index, float otherValue, float otherIndex, _VOLATILE_ float sMem[], int laneID ) { if (_TPB_ - 256 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 512 __device__ void bitonic_sort_512( float &value, float &index, _VOLATILE_ float sMem[], int laneID ){ bitonic_sort_256(value, index, sMem, laneID); block_comparator(value, index, 256, bfe(laneID, 9) ^ bfe(laneID, 8), laneID, sMem); block_comparator(value, index, 128, bfe(laneID, 9) ^ bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, bfe(laneID, 9) ^ bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, bfe(laneID, 9) ^ bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, bfe(laneID, 9) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 9) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 9) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 9) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 9) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_512( float &value, float &index, float otherValue, float otherIndex, _VOLATILE_ float sMem[], int laneID ) { if (_TPB_ - 512 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 1024 __device__ void bitonic_sort_1024( float &value, float &index, _VOLATILE_ float sMem[], int laneID ){ bitonic_sort_512(value, index, sMem, laneID); block_comparator(value, index, 512, bfe(laneID, 10) ^ bfe(laneID, 9), laneID, sMem); block_comparator(value, index, 256, bfe(laneID, 10) ^ bfe(laneID, 8), laneID, sMem); block_comparator(value, index, 128, bfe(laneID, 10) ^ bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, bfe(laneID, 10) ^ bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, bfe(laneID, 10) ^ bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, bfe(laneID, 10) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 10) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 10) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 10) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 10) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_1024( float &value, float &index, float otherValue, float otherIndex, _VOLATILE_ float sMem[], int laneID ) { if (_TPB_ - 256 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 512, !bfe(laneID, 9), laneID, sMem); block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } __device__ void load_consume_data( const uint8n_t* data, _VOLATILE_ float sMem[], float &value, int iN, int nData ){ #pragma unroll for (int i = 0; i < _M_ / _NCS_; i++){ uint8n_t threadData = data[(i * nData) + iN]; float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; value += pre0; value += pre1; value += pre2; value += pre3; } } __device__ void load_data( const uint8n_t* data, uint8n_t dataCache[_M_ / _NCS_], int iN, int nData ){ #pragma unroll for (int i = 0; i < _M_ / _NCS_; i++){ uint8n_t threadData = data[(i * nData) + iN]; dataCache[i] = threadData; } } __device__ void consume_data( _VOLATILE_ float sMem[], uint8n_t dataCache[_M_ / _NCS_], float &value ){ #pragma unroll for (int i = 0; i < _M_ / _NCS_; i++){ uint8n_t threadData = dataCache[i]; float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; value += pre0; value += pre1; value += pre2; value += pre3; } } extern "C" __global__ void ivfpq_topk( const uint8n_t* __restrict__ data, const float* __restrict__ precomputed, const uint8_t* __restrict__ isEmpty, const ll_t* __restrict__ cellStart, const ll_t* __restrict__ cellSize, const ll_t* __restrict__ totalSize, float* __restrict__ gValue, ll_t* __restrict__ gIndex, int nData, int nQuery, int nProbe, int nCandidates ) { const int tid = threadIdx.x; // thread ID const int qid = blockIdx.x; // query ID extern __shared__ _VOLATILE_ float sMem[]; // M * K load_precomputed(precomputed, sMem, nQuery); float finalValue = -654321; float finalIndex = -1; const ll_t threadTotalSize = totalSize[qid]; const int nIter = (threadTotalSize + _TPB_ - 1) / _TPB_; int cCell = 0; int cCellStart = cellStart[qid * nProbe + cCell]; int cCellSize = cellSize[qid * nProbe + cCell]; int cCellEnd = cCellStart + cCellSize; int iN = cCellStart + tid; for (int i = 0; i < nIter; i++){ while (iN >= cCellEnd){ cCell ++; // increment cell index by 1 if (cCell >= nProbe) break; int pCellEnd = cCellEnd; cCellStart = cellStart[qid * nProbe + cCell]; cCellSize = cellSize[qid * nProbe + cCell]; cCellEnd = cCellStart + cCellSize; iN = iN - pCellEnd + cCellStart; } float value; float index = iN; int cIsEmpty = 0; if (cCellStart <= iN && iN < cCellEnd){ value = 0.f; cIsEmpty = isEmpty[iN]; //load_consume_data(data, sMem, value, iN, nData); uint8n_t dataCache[_M_ / _NCS_]; load_data(data, dataCache, iN, nData); consume_data(sMem, dataCache, value); /* */ } else { value = -123456.f; } value = cIsEmpty == 0 ? value : -987654.f; index = cIsEmpty == 0 ? index : -1; #if _TPB_ == 32 bitonic_sort_32(value, index, tid); #elif _TPB_ == 64 bitonic_sort_64(value, index, sMem, tid); #elif _TPB_ == 128 bitonic_sort_128(value, index, sMem, tid); #elif _TPB_ == 256 bitonic_sort_256(value, index, sMem, tid); #elif _TPB_ == 512 bitonic_sort_512(value, index, sMem, tid); #elif _TPB_ == 1024 bitonic_sort_1024(value, index, sMem, tid); #endif switch (nCandidates){ case 2: bitonic_sort_global_2( finalValue, finalIndex, value, index, tid); break; case 4: bitonic_sort_global_4( finalValue, finalIndex, value, index, tid); break; case 8: bitonic_sort_global_8( finalValue, finalIndex, value, index, tid); break; case 16: bitonic_sort_global_16( finalValue, finalIndex, value, index, tid); break; case 32: bitonic_sort_global_32( finalValue, finalIndex, value, index, tid); break; case 64: bitonic_sort_global_64( finalValue, finalIndex, value, index, sMem, tid); break; case 128: bitonic_sort_global_128( finalValue, finalIndex, value, index, sMem, tid); break; case 256: bitonic_sort_global_256( finalValue, finalIndex, value, index, sMem, tid); break; case 512: bitonic_sort_global_512( finalValue, finalIndex, value, index, sMem, tid); break; case 1024: bitonic_sort_global_1024( finalValue, finalIndex, value, index, sMem, tid); break; } iN += _TPB_; } if (_TPB_ - nCandidates <= tid){ const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); gValue[writeAddress] = finalValue; gIndex[writeAddress] = finalIndex; } }
the_stack
#include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/ParallelFor.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/hashmap/CUDA/StdGPUHashBackend.h" #include "open3d/core/hashmap/DeviceHashBackend.h" #include "open3d/core/hashmap/Dispatch.h" #include "open3d/core/hashmap/HashMap.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/VoxelBlockGrid.h" #include "open3d/t/geometry/kernel/VoxelBlockGridImpl.h" #include "open3d/utility/Logging.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace voxel_grid { struct Coord3i { OPEN3D_HOST_DEVICE Coord3i(index_t x, index_t y, index_t z) : x_(x), y_(y), z_(z) {} OPEN3D_HOST_DEVICE bool operator==(const Coord3i &other) const { return x_ == other.x_ && y_ == other.y_ && z_ == other.z_; } index_t x_; index_t y_; index_t z_; }; void PointCloudTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &points, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc) { index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; index_t n = points.GetLength(); const float *pcd_ptr = static_cast<const float *>(points.GetDataPtr()); core::Device device = points.GetDevice(); core::Tensor block_coordi({8 * n, 3}, core::Int32, device); index_t *block_coordi_ptr = static_cast<index_t *>(block_coordi.GetDataPtr()); core::Tensor count(std::vector<index_t>{0}, {}, core::Int32, device); index_t *count_ptr = static_cast<index_t *>(count.GetDataPtr()); core::ParallelFor(hashmap->GetDevice(), n, [=] OPEN3D_DEVICE(index_t workload_idx) { float x = pcd_ptr[3 * workload_idx + 0]; float y = pcd_ptr[3 * workload_idx + 1]; float z = pcd_ptr[3 * workload_idx + 2]; index_t xb_lo = static_cast<index_t>( floorf((x - sdf_trunc) / block_size)); index_t xb_hi = static_cast<index_t>( floorf((x + sdf_trunc) / block_size)); index_t yb_lo = static_cast<index_t>( floorf((y - sdf_trunc) / block_size)); index_t yb_hi = static_cast<index_t>( floorf((y + sdf_trunc) / block_size)); index_t zb_lo = static_cast<index_t>( floorf((z - sdf_trunc) / block_size)); index_t zb_hi = static_cast<index_t>( floorf((z + sdf_trunc) / block_size)); for (index_t xb = xb_lo; xb <= xb_hi; ++xb) { for (index_t yb = yb_lo; yb <= yb_hi; ++yb) { for (index_t zb = zb_lo; zb <= zb_hi; ++zb) { index_t idx = atomicAdd(count_ptr, 1); block_coordi_ptr[3 * idx + 0] = xb; block_coordi_ptr[3 * idx + 1] = yb; block_coordi_ptr[3 * idx + 2] = zb; } } } }); index_t total_block_count = count.Item<index_t>(); if (total_block_count == 0) { utility::LogError( "[CUDATSDFTouchKernel] No block is touched in TSDF volume, " "abort integration. Please check specified parameters, " "especially depth_scale and voxel_size"); } block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_buf_indices, block_masks; hashmap->Activate(block_coordi.Slice(0, 0, count.Item<index_t>()), block_buf_indices, block_masks); voxel_block_coords = block_coordi.IndexGet({block_masks}); } void DepthTouchCUDA(std::shared_ptr<core::HashMap> &hashmap, const core::Tensor &depth, const core::Tensor &intrinsics, const core::Tensor &extrinsics, core::Tensor &voxel_block_coords, index_t voxel_grid_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max, index_t stride) { core::Device device = depth.GetDevice(); NDArrayIndexer depth_indexer(depth, 2); core::Tensor pose = t::geometry::InverseTransformation(extrinsics); TransformIndexer ti(intrinsics, pose, 1.0f); // Output index_t rows_strided = depth_indexer.GetShape(0) / stride; index_t cols_strided = depth_indexer.GetShape(1) / stride; index_t n = rows_strided * cols_strided; const index_t step_size = 3; const index_t est_multipler_factor = (step_size + 1); static core::Tensor block_coordi; if (block_coordi.GetLength() != est_multipler_factor * n) { block_coordi = core::Tensor({est_multipler_factor * n, 3}, core::Dtype::Int32, device); } // Counter core::Tensor count(std::vector<index_t>{0}, {1}, core::Dtype::Int32, device); index_t *count_ptr = count.GetDataPtr<index_t>(); index_t *block_coordi_ptr = block_coordi.GetDataPtr<index_t>(); index_t resolution = voxel_grid_resolution; float block_size = voxel_size * resolution; DISPATCH_DTYPE_TO_TEMPLATE(depth.GetDtype(), [&]() { core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) { index_t y = (workload_idx / cols_strided) * stride; index_t x = (workload_idx % cols_strided) * stride; float d = *depth_indexer.GetDataPtr<scalar_t>(x, y) / depth_scale; if (d > 0 && d < depth_max) { float x_c = 0, y_c = 0, z_c = 0; ti.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0, &x_c, &y_c, &z_c); float x_g = 0, y_g = 0, z_g = 0; ti.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); // Origin float x_o = 0, y_o = 0, z_o = 0; ti.GetCameraPosition(&x_o, &y_o, &z_o); // Direction float x_d = x_g - x_o; float y_d = y_g - y_o; float z_d = z_g - z_o; const float t_min = max(d - sdf_trunc, 0.0); const float t_max = min(d + sdf_trunc, depth_max); const float t_step = (t_max - t_min) / step_size; float t = t_min; index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, (step_size + 1)); for (index_t step = 0; step <= step_size; ++step) { index_t offset = (step + idx) * 3; index_t xb = static_cast<index_t>( floorf((x_o + t * x_d) / block_size)); index_t yb = static_cast<index_t>( floorf((y_o + t * y_d) / block_size)); index_t zb = static_cast<index_t>( floorf((z_o + t * z_d) / block_size)); block_coordi_ptr[offset + 0] = xb; block_coordi_ptr[offset + 1] = yb; block_coordi_ptr[offset + 2] = zb; t += t_step; } } }); }); index_t total_block_count = static_cast<index_t>(count[0].Item<index_t>()); if (total_block_count == 0) { utility::LogError( "No block is touched in TSDF volume, " "abort integration. Please check specified parameters, " "especially depth_scale and voxel_size"); } total_block_count = std::min(total_block_count, static_cast<index_t>(hashmap->GetCapacity())); block_coordi = block_coordi.Slice(0, 0, total_block_count); core::Tensor block_addrs, block_masks; hashmap->Activate(block_coordi, block_addrs, block_masks); // Customized IndexGet (generic version too slow) voxel_block_coords = core::Tensor({hashmap->Size(), 3}, core::Int32, device); index_t *voxel_block_coord_ptr = voxel_block_coords.GetDataPtr<index_t>(); bool *block_masks_ptr = block_masks.GetDataPtr<bool>(); count[0] = 0; core::ParallelFor(device, total_block_count, [=] OPEN3D_DEVICE(index_t workload_idx) { if (block_masks_ptr[workload_idx]) { index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); index_t offset_lhs = 3 * idx; index_t offset_rhs = 3 * workload_idx; voxel_block_coord_ptr[offset_lhs + 0] = block_coordi_ptr[offset_rhs + 0]; voxel_block_coord_ptr[offset_lhs + 1] = block_coordi_ptr[offset_rhs + 1]; voxel_block_coord_ptr[offset_lhs + 2] = block_coordi_ptr[offset_rhs + 2]; } }); OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); } #define FN_ARGUMENTS \ const core::Tensor &depth, const core::Tensor &color, \ const core::Tensor &indices, const core::Tensor &block_keys, \ TensorMap &block_values, const core::Tensor &intrinsics, \ const core::Tensor &extrinsics, index_t resolution, \ float voxel_size, float sdf_trunc, float depth_scale, \ float depth_max template void IntegrateCUDA<uint16_t, uint8_t, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<uint16_t, uint8_t, float, float, float>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, uint16_t, uint16_t>( FN_ARGUMENTS); template void IntegrateCUDA<float, float, float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ std::shared_ptr<core::HashMap> &hashmap, const TensorMap &block_value_map, \ const core::Tensor &range_map, TensorMap &renderings_map, \ const core::Tensor &intrinsics, const core::Tensor &extrinsics, \ index_t h, index_t w, index_t block_resolution, float voxel_size, \ float sdf_trunc, float depth_scale, float depth_min, \ float depth_max, float weight_threshold template void RayCastCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void RayCastCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &points, core::Tensor &normals, core::Tensor &colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &valid_size template void ExtractPointCloudCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractPointCloudCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS void ExtractTriangleMeshCUDA(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const std::vector<core::Tensor> &block_values, core::Tensor &vertices, core::Tensor &triangles, core::Tensor &vertex_normals, core::Tensor &vertex_colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &vertex_count); #define FN_ARGUMENTS \ const core::Tensor &block_indices, const core::Tensor &inv_block_indices, \ const core::Tensor &nb_block_indices, \ const core::Tensor &nb_block_masks, \ const core::Tensor &block_keys, const TensorMap &block_value_map, \ core::Tensor &vertices, core::Tensor &triangles, \ core::Tensor &vertex_normals, core::Tensor &vertex_colors, \ index_t block_resolution, float voxel_size, \ float weight_threshold, index_t &vertex_count template void ExtractTriangleMeshCUDA<float, uint16_t, uint16_t>(FN_ARGUMENTS); template void ExtractTriangleMeshCUDA<float, float, float>(FN_ARGUMENTS); #undef FN_ARGUMENTS } // namespace voxel_grid } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
the_stack
#include <stdgpu/config.h> #if STDGPU_BACKEND == STDGPU_BACKEND_CUDA #define STDGPU_BACKEND_ATOMIC_HEADER <stdgpu/STDGPU_BACKEND_DIRECTORY/atomic.cuh> // NOLINT(bugprone-macro-parentheses,misc-macro-parentheses) // cppcheck-suppress preprocessorErrorDirective #include STDGPU_BACKEND_ATOMIC_HEADER #undef STDGPU_BACKEND_ATOMIC_HEADER #else #define STDGPU_BACKEND_ATOMIC_HEADER <stdgpu/STDGPU_BACKEND_DIRECTORY/atomic.h> // NOLINT(bugprone-macro-parentheses,misc-macro-parentheses) // cppcheck-suppress preprocessorErrorDirective #include STDGPU_BACKEND_ATOMIC_HEADER #undef STDGPU_BACKEND_ATOMIC_HEADER #endif #include <stdgpu/attribute.h> #include <stdgpu/memory.h> #include <stdgpu/platform.h> namespace stdgpu { namespace detail { inline STDGPU_DEVICE_ONLY void atomic_load_thread_fence(const memory_order order) { switch (order) { case memory_order_consume : case memory_order_acquire : case memory_order_acq_rel : case memory_order_seq_cst : { stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_thread_fence(); } break; case memory_order_relaxed : case memory_order_release : default : { // Nothing to do ... } } } inline STDGPU_DEVICE_ONLY void atomic_store_thread_fence(const memory_order order) { switch (order) { case memory_order_release : case memory_order_acq_rel : case memory_order_seq_cst : { stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_thread_fence(); } break; case memory_order_relaxed : case memory_order_consume : case memory_order_acquire : default : { // Nothing to do ... } } } inline STDGPU_DEVICE_ONLY void atomic_consistency_thread_fence(const memory_order order) { switch (order) { case memory_order_seq_cst : { stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_thread_fence(); } break; case memory_order_relaxed : case memory_order_consume : case memory_order_acquire : case memory_order_release : case memory_order_acq_rel : default : { // Nothing to do ... } } } } // namespace detail inline STDGPU_DEVICE_ONLY void atomic_thread_fence(const memory_order order) { switch (order) { case memory_order_consume : case memory_order_acquire : case memory_order_release : case memory_order_acq_rel : case memory_order_seq_cst : { stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_thread_fence(); } break; case memory_order_relaxed : default : { // Nothing to do ... } } } inline STDGPU_DEVICE_ONLY void atomic_signal_fence(const memory_order order) { atomic_thread_fence(order); } template <typename T, typename Allocator> inline atomic<T, Allocator> atomic<T, Allocator>::createDeviceObject(const Allocator& allocator) { atomic<T, Allocator> result(allocator); result._value_ref._value = createDeviceArray<T, allocator_type>(result._allocator, 1, 0); return result; } template <typename T, typename Allocator> inline void atomic<T, Allocator>::destroyDeviceObject(atomic<T, Allocator>& device_object) { destroyDeviceArray<T, allocator_type>(device_object._allocator, device_object._value_ref._value); } template <typename T, typename Allocator> inline atomic<T, Allocator>::atomic() : _value_ref(nullptr) { } template <typename T, typename Allocator> inline atomic<T, Allocator>::atomic(const Allocator& allocator) : _value_ref(nullptr), _allocator(allocator) { } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE typename atomic<T, Allocator>::allocator_type atomic<T, Allocator>::get_allocator() const { return _allocator; } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE bool atomic<T, Allocator>::is_lock_free() const { return _value_ref.is_lock_free(); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE T atomic<T, Allocator>::load(const memory_order order) const { return _value_ref.load(order); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE atomic<T, Allocator>::operator T() const { return _value_ref.operator T(); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE void atomic<T, Allocator>::store(const T desired, const memory_order order) { _value_ref.store(desired, order); } template <typename T, typename Allocator> //NOLINT(misc-unconventional-assign-operator) inline STDGPU_HOST_DEVICE T //NOLINT(misc-unconventional-assign-operator) atomic<T, Allocator>::operator=(const T desired) { return _value_ref.operator=(desired); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::exchange(const T desired, const memory_order order) { return _value_ref.exchange(desired, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY bool atomic<T, Allocator>::compare_exchange_weak(T& expected, const T desired, const memory_order order) { return _value_ref.compare_exchange_weak(expected, desired, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY bool atomic<T, Allocator>::compare_exchange_strong(T& expected, const T desired, const memory_order order) { return _value_ref.compare_exchange_strong(expected, desired, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_add(const T arg, const memory_order order) { return _value_ref.fetch_add(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_sub(const T arg, const memory_order order) { return _value_ref.fetch_sub(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_and(const T arg, const memory_order order) { return _value_ref.fetch_and(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_or(const T arg, const memory_order order) { return _value_ref.fetch_or(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_xor(const T arg, const memory_order order) { return _value_ref.fetch_xor(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_min(const T arg, const memory_order order) { return _value_ref.fetch_min(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_max(const T arg, const memory_order order) { return _value_ref.fetch_max(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_same<T, unsigned int>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_inc_mod(const T arg, const memory_order order) { return _value_ref.fetch_inc_mod(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_same<T, unsigned int>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::fetch_dec_mod(const T arg, const memory_order order) { return _value_ref.fetch_dec_mod(arg, order); } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator++() { return ++_value_ref; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator++(int) { return _value_ref++; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator--() { return --_value_ref; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator--(int) { return _value_ref--; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator+=(const T arg) { return _value_ref += arg; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator-=(const T arg) { return _value_ref -= arg; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator&=(const T arg) { return _value_ref &= arg; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator|=(const T arg) { return _value_ref |= arg; } template <typename T, typename Allocator> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic<T, Allocator>::operator^=(const T arg) { return _value_ref ^= arg; } template <typename T> inline STDGPU_HOST_DEVICE atomic_ref<T>::atomic_ref(T& obj) { _value = &obj; } template <typename T> inline STDGPU_HOST_DEVICE atomic_ref<T>::atomic_ref(T* value) { _value = value; } template <typename T> inline STDGPU_HOST_DEVICE bool atomic_ref<T>::is_lock_free() const { return stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_is_lock_free(); } template <typename T> inline STDGPU_HOST_DEVICE T atomic_ref<T>::load(STDGPU_MAYBE_UNUSED const memory_order order) const { if (_value == nullptr) { return 0; } T local_value; #if STDGPU_CODE == STDGPU_CODE_DEVICE detail::atomic_load_thread_fence(order); local_value = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_load(_value); detail::atomic_consistency_thread_fence(order); #else copyDevice2HostArray<T>(_value, 1, &local_value, MemoryCopy::NO_CHECK); #endif return local_value; } template <typename T> inline STDGPU_HOST_DEVICE atomic_ref<T>::operator T() const { return load(); } template <typename T> inline STDGPU_HOST_DEVICE void atomic_ref<T>::store(const T desired, STDGPU_MAYBE_UNUSED const memory_order order) { if (_value == nullptr) { return; } #if STDGPU_CODE == STDGPU_CODE_DEVICE detail::atomic_consistency_thread_fence(order); stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_store(_value, desired); detail::atomic_store_thread_fence(order); #else copyHost2DeviceArray<T>(&desired, 1, _value, MemoryCopy::NO_CHECK); #endif } template <typename T> //NOLINT(misc-unconventional-assign-operator) inline STDGPU_HOST_DEVICE T //NOLINT(misc-unconventional-assign-operator) atomic_ref<T>::operator=(const T desired) { store(desired); return desired; } template <typename T> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::exchange(const T desired, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_exchange(_value, desired); detail::atomic_store_thread_fence(order); return result; } template <typename T> inline STDGPU_DEVICE_ONLY bool atomic_ref<T>::compare_exchange_weak(T& expected, const T desired, const memory_order order) { return compare_exchange_strong(expected, desired, order); } template <typename T> inline STDGPU_DEVICE_ONLY bool atomic_ref<T>::compare_exchange_strong(T& expected, const T desired, const memory_order order) { detail::atomic_load_thread_fence(order); T old = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_compare_exchange(_value, expected, desired); bool changed = (old == expected); if (!changed) { expected = old; } detail::atomic_store_thread_fence(order); return changed; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_add(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_add(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_sub(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_sub(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_and(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_and(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_or(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_or(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_xor(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_xor(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_min(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_min(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_max(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_max(_value, arg); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_same<T, unsigned int>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_inc_mod(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_inc_mod(_value, arg - 1); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_same<T, unsigned int>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::fetch_dec_mod(const T arg, const memory_order order) { detail::atomic_load_thread_fence(order); T result = stdgpu::STDGPU_BACKEND_NAMESPACE::atomic_fetch_dec_mod(_value, arg - 1); detail::atomic_store_thread_fence(order); return result; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator++() { return fetch_add(1) + 1; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator++(int) { return fetch_add(1); } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator--() { return fetch_sub(1) - 1; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator--(int) { return fetch_sub(1); } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator+=(const T arg) { return fetch_add(arg) + arg; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value || std::is_floating_point<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator-=(const T arg) { return fetch_sub(arg) - arg; } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator&=(const T arg) { return fetch_and(arg) & arg; // NOLINT(hicpp-signed-bitwise) } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator|=(const T arg) { return fetch_or(arg) | arg; // NOLINT(hicpp-signed-bitwise) } template <typename T> template <STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(std::is_integral<T>::value)> inline STDGPU_DEVICE_ONLY T atomic_ref<T>::operator^=(const T arg) { return fetch_xor(arg) ^ arg; // NOLINT(hicpp-signed-bitwise) } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE bool atomic_is_lock_free(const atomic<T, Allocator>* obj) { return obj->is_lock_free(); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE T atomic_load(const atomic<T, Allocator>* obj) { return obj->load(); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE T atomic_load_explicit(const atomic<T, Allocator>* obj, const memory_order order) { return obj->load(order); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE void atomic_store(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::value_type desired) { obj->store(desired); } template <typename T, typename Allocator> inline STDGPU_HOST_DEVICE void atomic_store_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::value_type desired, const memory_order order) { obj->store(desired, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_exchange(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::value_type desired) { return obj->exchange(desired); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_exchange_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::value_type desired, const memory_order order) { return obj->exchange(desired, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY bool atomic_compare_exchange_weak(atomic<T, Allocator>* obj, typename atomic<T, Allocator>::value_type* expected, const typename atomic<T, Allocator>::value_type desired) { return obj->compare_exchange_weak(*expected, desired); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY bool atomic_compare_exchange_strong(atomic<T, Allocator>* obj, typename atomic<T, Allocator>::value_type* expected, const typename atomic<T, Allocator>::value_type desired) { return obj->compare_exchange_strong(*expected, desired); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_add(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg) { return obj->fetch_add(arg); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_add_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg, const memory_order order) { return obj->fetch_add(arg, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_sub(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg) { return obj->fetch_sub(arg); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_sub_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg, const memory_order order) { return obj->fetch_sub(arg, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_and(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg) { return obj->fetch_and(arg); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_and_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg, const memory_order order) { return obj->fetch_and(arg, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_or(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg) { return obj->fetch_or(arg); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_or_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg, const memory_order order) { return obj->fetch_or(arg, order); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_xor(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg) { return obj->fetch_xor(arg); } template <typename T, typename Allocator> inline STDGPU_DEVICE_ONLY T atomic_fetch_xor_explicit(atomic<T, Allocator>* obj, const typename atomic<T, Allocator>::difference_type arg, const memory_order order) { return obj->fetch_xor(arg, order); } } // namespace stdgpu #endif // STDGPU_ATOMIC_DETAIL_H
the_stack
#include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include <builtin_types.h> #include <vector_functions.h> #include <math_functions.h> #include <float.h> #include <cuComplex.h> #include "Reduction\f_dot_f.cuh" #include "Reduction\Reduction.cu" #define ADD 0 #define SUB 1 #define MUL 2 #define AND 3 #define OR 4 #define OR_THRESHOLD 5 #define XOR 6 #define XNOR 7 #define IMP 8 #define PERM 9 #define INV_PERM 10 #define MODULO 11 #define DIVISION_INT 12 #define EQUAL 13 #define MAX_OPERANDS 20 #define MAX_SYMBOL_SIZE 4096 extern "C" { //kernel code performs no binarity checks __global__ void CombineVectorsKernel(float** inputs, int inputsCount, float* output, int method, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId >= count) return; float out = inputs[0][threadId]; switch (method) { case SUB: for (int i = 1; i < inputsCount; i++) out -= inputs[i][threadId]; break; case ADD: for (int i = 1; i < inputsCount; i++) out += inputs[i][threadId]; break; case AND: case MUL: for (int i = 1; i < inputsCount; i++) out *= inputs[i][threadId]; break; case OR: for (int i = 1; i < inputsCount; i++) out += inputs[i][threadId]; out = out >= 1; break; case OR_THRESHOLD: for (int i = 1; i < inputsCount; i++) out += inputs[i][threadId]; out = out >= (inputsCount * 0.5f); break; case XOR: for (int i = 1; i < inputsCount; i++) out += inputs[i][threadId]; out = ((int)out) % 2; break; case XNOR: for (int i = 1; i < inputsCount; i++) out += inputs[i][threadId]; out = ((int)out + 1) % 2; break; case PERM: __shared__ float tmp[MAX_SYMBOL_SIZE]; tmp[threadId] = out; __threadfence(); for (int i = 1; i < inputsCount; i++) { float val = tmp[__float2int_rn(inputs[i][threadId])]; __syncthreads(); tmp[threadId] = val; __threadfence(); } out = tmp[threadId]; break; case INV_PERM: __shared__ float i_tmp[MAX_SYMBOL_SIZE]; i_tmp[threadId] = out; __threadfence(); for (int i = 1; i < inputsCount; i++) { int idx = __float2int_rn(inputs[i][threadId]); float val = i_tmp[threadId]; __syncthreads(); i_tmp[idx] = val; __threadfence(); } out = i_tmp[threadId]; break; case EQUAL: // Warning: uses a strict equality comparison on floats { bool eq = true; for (int i = 1; eq && (i < inputsCount); i++) { eq = (eq && (out == inputs[i][threadId])); } out = eq ? 1.0f : 0.0f; break; } default: break; } output[threadId] = out; } __device__ __forceinline__ void CombineTwoVectorsInternal(const float& input1, const float& input2, float& output, int method) { switch (method) { case SUB: { output = input1 - input2; break; } case ADD: { output = input1 + input2; break; } case AND: case MUL: { output = input1 * input2; break; } case OR: case OR_THRESHOLD: { output = (input1 + input2) >= 1; break; } case XOR: { output = (input1 + input2) == 1; break; } case XNOR: { output = (input1 + input2) != 1; break; } case IMP: { output = input1 <= input2; break; } case MODULO: { int mod = __float2int_rn(input2); int n = __float2int_rd(input1 / mod); output = input1 - mod * n; break; } case DIVISION_INT: { output = __float2int_rz(input1 / input2); break; } case EQUAL: { output = (input1 == input2) ? 1.0f : 0.0f; break; } default: break; } } __global__ void CombineTwoVectorsKernel(const float* input1, const float* input2, float* output, int method, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (threadId >= count) return; switch (method) { case PERM: { float tmp = input1[(int)input2[threadId]]; if (input1 == output) __threadfence(); output[threadId] = tmp; break; } case INV_PERM: { int idx = (int)input2[threadId]; if (input1 == output) __threadfence(); output[idx] = input1[threadId]; break; } default: CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method); break; } } __device__ __forceinline__ void CombineTwoVectorsKernelVarSizeInternal(const float* input1, const float* input2, float* output, int method, int count1, int count2) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; switch (method) { case PERM: { if (count2 > count1) return; float tmp = input1[(int)input2[threadId]]; if (input1 == output) __threadfence(); output[threadId] = tmp; break; } case INV_PERM: { if (count2 > count1) return; int idx = (int)input2[threadId]; if (input1 == output) __threadfence(); output[idx] = input1[threadId]; break; } default: { int minCount = count1 <= count2 ? count1 : count2; if (threadId < minCount) { CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method); return; } if (count1 > count2) { if (threadId < count1) output[threadId] = input1[threadId]; } else if (count2 > count1) { if (threadId < count2) output[threadId] = method == SUB ? -input2[threadId] : input2[threadId]; } break; } } } __global__ void CombineTwoVectorsKernelVarSize(float* input1, float* input2, float* output, int method, int count1, int count2) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (count1 > 1) { if (count2 > 1) { CombineTwoVectorsKernelVarSizeInternal(input1, input2, output, method, count1, count2); } else if (threadId < count1) { CombineTwoVectorsInternal(input1[threadId], input2[0], output[threadId], method); } } else { if (count2 > 1 && threadId < count2) { CombineTwoVectorsInternal(input1[0], input2[threadId], output[threadId], method); } else { CombineTwoVectorsInternal(input1[0], input2[0], output[threadId], method); } } } __global__ void AddToIdcs(float* source, const float* idcs, float* target, int method, int idcsCount) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount return; float& tar = target[__float2int_rn(idcs[threadId])]; float& src = source[threadId]; switch (method) { case ADD: atomicAdd(&tar, src); break; case SUB: atomicAdd(&tar, -src); break; case OR: tar = src; break; default: break; } } __global__ void MapToIdcs(float* source, float* sourceLengthSq, const float* idcs, float* target, int idcsCount) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount return; float& tar = target[__float2int_rn(idcs[threadId])]; float& src = source[threadId]; float len = *sourceLengthSq; if (len < 0.0000001f) return; len = 1 / sqrtf(len); // Write the normalized vector back to output CombineTwoVectorsInternal(src, len, tar, MUL); } __global__ void LengthFromElements(float* element1, float* element2, float* output, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < count) { output[threadId] = sqrtf(element1[threadId] * element1[threadId] + element2[threadId] * element2[threadId]); } } __global__ void MulComplexElementWise(cuFloatComplex* input1, cuFloatComplex* input2, cuFloatComplex* output, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < count) { cuFloatComplex i1 = input1[threadId]; cuFloatComplex i2 = input2[threadId]; output[threadId] = cuCmulf(i1, i2); } } __global__ void InvolveVector(float* input, float* output, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize - 1) { output[0] = input[0]; output[threadId + 1] = input[inputSize - threadId - 1]; } } __global__ void Interpolate(float* input1, float* input2, float* output, float weight, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { if (weight <= 0) { output[threadId] = input1[threadId]; } else if (weight >= 1) { output[threadId] = input2[threadId]; } else { output[threadId] = (1 - weight) * input1[threadId] + weight * input2[threadId]; } } } __global__ void InterpolateFromMemBlock(float* input1, float* input2, float* output, float* weightMemBlock, int inputSize) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < inputSize) { if (weightMemBlock[0] <= 0) { output[threadId] = input1[threadId]; } else if (weightMemBlock[0] >= 1) { output[threadId] = input2[threadId]; } else { output[threadId] = (1 - weightMemBlock[0]) * input1[threadId] + weightMemBlock[0] * input2[threadId]; } } } // naive mat. multiplication // TODO: rewrite it with sync_threads... :) Check out nvida dev-blog or TestFeat/HMath.cu how it will be... __global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) { int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C float Cvalue = 0; if (idx < sizeC){ for (int e=0; e < nColsA; e++) Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col]; C[idx] = Cvalue; } } }
the_stack
#include <assert.h> #include <math.h> #include <stdio.h> #include <torch/serialize/tensor.h> #include <torch/types.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG __device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rz, float &local_x, float &local_y) { // should rotate pi/2 + alpha to translate LiDAR to local float rot_angle = rz + M_PI / 2; float cosa = cos(rot_angle), sina = sin(rot_angle); local_x = shift_x * cosa + shift_y * (-sina); local_y = shift_x * sina + shift_y * cosa; } __device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y) { // param pt: (x, y, z) // param box3d: (cx, cy, cz, w, l, h, rz) in LiDAR coordinate, cz in the // bottom center float x = pt[0], y = pt[1], z = pt[2]; float cx = box3d[0], cy = box3d[1], cz = box3d[2]; float w = box3d[3], l = box3d[4], h = box3d[5], rz = box3d[6]; cz += h / 2.0; // shift to the center since cz in box3d is the bottom center if (fabsf(z - cz) > h / 2.0) return 0; lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); float in_flag = (local_x > -l / 2.0) & (local_x < l / 2.0) & (local_y > -w / 2.0) & (local_y < w / 2.0); return in_flag; } __global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, int out_x, int out_y, int out_z, const float *rois, const float *pts, int *pts_mask) { // params rois: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate // params pts: (npoints, 3) [x, y, z] // params pts_mask: (N, npoints): -1 means point doesnot in this box, // otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; int box_idx = blockIdx.y; if (pt_idx >= pts_num || box_idx >= boxes_num) return; pts += pt_idx * 3; rois += box_idx * 7; pts_mask += box_idx * pts_num + pt_idx; float local_x = 0, local_y = 0; int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y); pts_mask[0] = -1; if (cur_in_flag > 0) { float local_z = pts[2] - rois[2]; float w = rois[3], l = rois[4], h = rois[5]; float x_res = l / out_x; float y_res = w / out_y; float z_res = h / out_z; unsigned int x_idx = int((local_x + l / 2) / x_res); unsigned int y_idx = int((local_y + w / 2) / y_res); unsigned int z_idx = int(local_z / z_res); x_idx = min(max(x_idx, 0), out_x - 1); y_idx = min(max(y_idx, 0), out_y - 1); z_idx = min(max(z_idx, 0), out_z - 1); unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx; #ifdef DEBUG printf( "mask: pts_%d(%.3f, %.3f, %.3f), local(%.3f, %.3f, %.3f), idx(%d, %d, " "%d), res(%.3f, %.3f, %.3f), idx_encoding=%x\n", pt_idx, pts[0], pts[1], pts[2], local_x, local_y, local_z, x_idx, y_idx, z_idx, x_res, y_res, z_res, idx_encoding); #endif pts_mask[0] = idx_encoding; } } __global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, int max_pts_each_voxel, int out_x, int out_y, int out_z, const int *pts_mask, int *pts_idx_of_voxels) { // params pts_mask: (N, npoints) 0 or 1 // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) int box_idx = blockIdx.x * blockDim.x + threadIdx.x; if (box_idx >= boxes_num) return; int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel; for (int k = 0; k < pts_num; k++) { if (pts_mask[box_idx * pts_num + k] != -1) { unsigned int idx_encoding = pts_mask[box_idx * pts_num + k]; unsigned int x_idx = (idx_encoding >> 16) & 0xFF; unsigned int y_idx = (idx_encoding >> 8) & 0xFF; unsigned int z_idx = idx_encoding & 0xFF; unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + y_idx * out_z * max_pts_each_voxel + z_idx * max_pts_each_voxel; unsigned int cnt = pts_idx_of_voxels[base_offset]; if (cnt < max_num_pts) { pts_idx_of_voxels[base_offset + cnt + 1] = k; pts_idx_of_voxels[base_offset]++; } #ifdef DEBUG printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n", k, x_idx, y_idx, z_idx, idx_encoding); #endif } } } __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features, int *argmax) { // params pts_feature: (npoints, C) // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) // params argmax: (N, out_x, out_y, out_z, C) int box_idx = blockIdx.z; int channel_idx = blockIdx.y; int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; int x_idx = voxel_idx_flat / (out_y * out_z); int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int z_idx = voxel_idx_flat % out_z; if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; #ifdef DEBUG printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels, argmax); #endif int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; int argmax_idx = -1; float max_val = -1e50; int total_pts = pts_idx_of_voxels[0]; for (int k = 1; k <= total_pts; k++) { if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val) { max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; argmax_idx = pts_idx_of_voxels[k]; } } if (argmax_idx != -1) { pooled_features[0] = max_val; } argmax[0] = argmax_idx; #ifdef DEBUG printf( "channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after " "pts_idx: %p, argmax: (%p, %d)\n", channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts, pts_idx_of_voxels, argmax, argmax_idx); #endif } __global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features) { // params pts_feature: (npoints, C) // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) // params argmax: (N, out_x, out_y, out_z, C) int box_idx = blockIdx.z; int channel_idx = blockIdx.y; int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; int x_idx = voxel_idx_flat / (out_y * out_z); int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int z_idx = voxel_idx_flat % out_z; if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; float sum_val = 0; int total_pts = pts_idx_of_voxels[0]; for (int k = 1; k <= total_pts; k++) { sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; } if (total_pts > 0) { pooled_features[0] = sum_val / total_pts; } } void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z, const float *rois, const float *pts, const float *pts_feature, int *argmax, int *pts_idx_of_voxels, float *pooled_features, int pool_method) { // params rois: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate // params pts: (npoints, 3) [x, y, z] in LiDAR coordinate // params pts_feature: (npoints, C) // params argmax: (N, out_x, out_y, out_z, C) // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) // params pooled_features: (N, out_x, out_y, out_z, C) // params pool_method: 0: max_pool 1: avg_pool int *pts_mask = NULL; cudaMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M) cudaMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int)); dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num); dim3 threads(THREADS_PER_BLOCK); generate_pts_mask_for_box3d<<<blocks_mask, threads>>>( boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask); // TODO: Merge the collect and pool functions, SS dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK)); collect_inside_pts_for_box3d<<<blocks_collect, threads>>>( boxes_num, pts_num, max_pts_each_voxel, out_x, out_y, out_z, pts_mask, pts_idx_of_voxels); dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num); if (pool_method == 0) { roiaware_maxpool3d<<<blocks_pool, threads>>>( boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, pts_feature, pts_idx_of_voxels, pooled_features, argmax); } else if (pool_method == 1) { roiaware_avgpool3d<<<blocks_pool, threads>>>( boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, pts_feature, pts_idx_of_voxels, pooled_features); } cudaFree(pts_mask); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } __global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z, const int *argmax, const float *grad_out, float *grad_in) { // params argmax: (N, out_x, out_y, out_z, C) // params grad_out: (N, out_x, out_y, out_z, C) // params grad_in: (npoints, C), return value int box_idx = blockIdx.z; int channel_idx = blockIdx.y; int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; int x_idx = voxel_idx_flat / (out_y * out_z); int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int z_idx = voxel_idx_flat % out_z; if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; if (argmax[0] == -1) return; atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1); } __global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z, int max_pts_each_voxel, const int *pts_idx_of_voxels, const float *grad_out, float *grad_in) { // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) // params grad_out: (N, out_x, out_y, out_z, C) // params grad_in: (npoints, C), return value int box_idx = blockIdx.z; int channel_idx = blockIdx.y; int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; int x_idx = voxel_idx_flat / (out_y * out_z); int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int z_idx = voxel_idx_flat % out_z; if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; int total_pts = pts_idx_of_voxels[0]; float cur_grad = 1 / fmaxf(float(total_pts), 1.0); for (int k = 1; k <= total_pts; k++) { atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, grad_out[0] * cur_grad); } } void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, int out_z, int channels, int max_pts_each_voxel, const int *pts_idx_of_voxels, const int *argmax, const float *grad_out, float *grad_in, int pool_method) { // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) // params argmax: (N, out_x, out_y, out_z, C) // params grad_out: (N, out_x, out_y, out_z, C) // params grad_in: (npoints, C), return value // params pool_method: 0: max_pool, 1: avg_pool dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num); dim3 threads(THREADS_PER_BLOCK); if (pool_method == 0) { roiaware_maxpool3d_backward<<<blocks, threads>>>( boxes_num, channels, out_x, out_y, out_z, argmax, grad_out, grad_in); } else if (pool_method == 1) { roiaware_avgpool3d_backward<<<blocks, threads>>>( boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, pts_idx_of_voxels, grad_out, grad_in); } }
the_stack
#include "k2/csrc/math.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = num_elems * threads_per_elem; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (std::min(next_row_split / dart_separation, num_tasks) - std::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<grid_size, block_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
the_stack
#include <stdio.h> #include <stdlib.h> #include <assert.h> // CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> // CHECK: #include <hipsparse.h> #include <cusparse.h> // NOTE: CUDA 10.0 /* compute | b - A*x|_inf */ void residaul_eval( int n, // CHECK: const hipsparseMatDescr_t descrA, const cusparseMatDescr_t descrA, const float *csrVal, const int *csrRowPtr, const int *csrColInd, const float *b, const float *x, float *r_nrminf_ptr) { // CHECK: const int base = (hipsparseGetMatIndexBase(descrA) != HIPSPARSE_INDEX_BASE_ONE) ? 0 : 1; const int base = (cusparseGetMatIndexBase(descrA) != CUSPARSE_INDEX_BASE_ONE) ? 0 : 1; // CHECK: const int lower = (HIPSPARSE_FILL_MODE_LOWER == hipsparseGetMatFillMode(descrA)) ? 1 : 0; const int lower = (CUSPARSE_FILL_MODE_LOWER == cusparseGetMatFillMode(descrA)) ? 1 : 0; // CHECK: const int unit = (HIPSPARSE_DIAG_TYPE_UNIT == hipsparseGetMatDiagType(descrA)) ? 1 : 0; const int unit = (CUSPARSE_DIAG_TYPE_UNIT == cusparseGetMatDiagType(descrA)) ? 1 : 0; float r_nrminf = 0; for (int row = 0; row < n; row++) { const int start = csrRowPtr[row] - base; const int end = csrRowPtr[row + 1] - base; float dot = 0; for (int colidx = start; colidx < end; colidx++) { const int col = csrColInd[colidx] - base; float Aij = csrVal[colidx]; float xj = x[col]; if ((row == col) && unit) { Aij = 1.0; } int valid = (row >= col) && lower || (row <= col) && !lower; if (valid) { dot += Aij * xj; } } float ri = b[row] - dot; r_nrminf = (r_nrminf > fabs(ri)) ? r_nrminf : fabs(ri); } *r_nrminf_ptr = r_nrminf; } int main(int argc, char*argv[]) { // CHECK: hipsparseHandle_t handle = NULL; cusparseHandle_t handle = NULL; // CHECK: hipStream_t stream = NULL; cudaStream_t stream = NULL; // CHECK: hipsparseMatDescr_t descrA = NULL; cusparseMatDescr_t descrA = NULL; // NOTE: CUDA 10.0 // TODO: csrsm2Info_t info = NULL; csrsm2Info_t info = NULL; // CHECK: hipsparseStatus_t status = HIPSPARSE_STATUS_SUCCESS; cusparseStatus_t status = CUSPARSE_STATUS_SUCCESS; // CHECK: hipError_t cudaStat1 = hipSuccess; cudaError_t cudaStat1 = cudaSuccess; const int nrhs = 2; const int n = 4; const int nnzA = 9; // CHECK: const hipsparseSolvePolicy_t policy = HIPSPARSE_SOLVE_POLICY_NO_LEVEL; const cusparseSolvePolicy_t policy = CUSPARSE_SOLVE_POLICY_NO_LEVEL; const float h_one = 1.0; /* * | 1 0 2 -3 | * | 0 4 0 0 | * A = | 5 0 6 7 | * | 0 8 0 9 | * * Regard A as a lower triangle matrix L with non-unit diagonal. * | 1 5 | | 1 5 | * Given B = | 2 6 |, X = L \ B = | 0.5 1.5 | * | 3 7 | | -0.3333 -3 | * | 4 8 | | 0 -0.4444 | */ const int csrRowPtrA[n + 1] = { 1, 4, 5, 8, 10 }; const int csrColIndA[nnzA] = { 1, 3, 4, 2, 1, 3, 4, 2, 4 }; const float csrValA[nnzA] = { 1, 2, -3, 4, 5, 6, 7, 8, 9 }; const float B[n*nrhs] = { 1,2,3,4,5,6,7,8 }; float X[n*nrhs]; int *d_csrRowPtrA = NULL; int *d_csrColIndA = NULL; float *d_csrValA = NULL; float *d_B = NULL; size_t lworkInBytes = 0; char *d_work = NULL; const int algo = 0; /* non-block version */ printf("example of csrsm2 \n"); /* step 1: create cusparse handle, bind a stream */ // CHECK: cudaStat1 = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); cudaStat1 = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: status = hipsparseCreate(&handle); status = cusparseCreate(&handle); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); status = cusparseSetStream(handle, stream); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // NOTE: CUDA 10.0 // TODO: status = hipsparseCreateCsrsm2Info(&info); status = cusparseCreateCsrsm2Info(&info); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 2: configuration of matrix A */ status = cusparseCreateMatDescr(&descrA); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* A is base-1*/ // CHECK: hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); // CHECK: hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); /* A is lower triangle */ // CHECK: hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_LOWER); cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER); /* A has non unit diagonal */ // CHECK: hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_NON_UNIT); cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrRowPtrA, sizeof(int)*(n + 1)); cudaStat1 = cudaMalloc((void**)&d_csrRowPtrA, sizeof(int)*(n + 1)); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrColIndA, sizeof(int)*nnzA); cudaStat1 = cudaMalloc((void**)&d_csrColIndA, sizeof(int)*nnzA); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_csrValA, sizeof(float)*nnzA); cudaStat1 = cudaMalloc((void**)&d_csrValA, sizeof(float)*nnzA); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMalloc((void**)&d_B, sizeof(float)*n*nrhs); cudaStat1 = cudaMalloc((void**)&d_B, sizeof(float)*n*nrhs); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int)*(n + 1), hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrRowPtrA, csrRowPtrA, sizeof(int)*(n + 1), cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrColIndA, csrColIndA, sizeof(int)*nnzA, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrColIndA, csrColIndA, sizeof(int)*nnzA, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_csrValA, csrValA, sizeof(float)*nnzA, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_csrValA, csrValA, sizeof(float)*nnzA, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: cudaStat1 = hipMemcpy(d_B, B, sizeof(float)*n*nrhs, hipMemcpyHostToDevice); cudaStat1 = cudaMemcpy(d_B, B, sizeof(float)*n*nrhs, cudaMemcpyHostToDevice); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 3: query workspace */ // NOTE: CUDA 10.0 // TODO: status = hipsparseScsrsm2_bufferSizeExt( // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, status = cusparseScsrsm2_bufferSizeExt( handle, algo, CUSPARSE_OPERATION_NON_TRANSPOSE, /* transA */ CUSPARSE_OPERATION_NON_TRANSPOSE, /* transB */ n, nrhs, nnzA, &h_one, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_B, n, /* ldb */ info, policy, &lworkInBytes); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); printf("lworkInBytes = %lld \n", (long long)lworkInBytes); // CHECK: if (NULL != d_work) { hipFree(d_work); } if (NULL != d_work) { cudaFree(d_work); } // CHECK: cudaStat1 = hipMalloc((void**)&d_work, lworkInBytes); cudaStat1 = cudaMalloc((void**)&d_work, lworkInBytes); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 4: analysis */ // NOTE: CUDA 10.0 // TODO: status = hipsparseScsrsm2_analysis( // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, status = cusparseScsrsm2_analysis( handle, algo, CUSPARSE_OPERATION_NON_TRANSPOSE, /* transA */ CUSPARSE_OPERATION_NON_TRANSPOSE, /* transB */ n, nrhs, nnzA, &h_one, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_B, n, /* ldb */ info, policy, d_work); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); /* step 5: solve L * X = B */ // NOTE: CUDA 10.0 // TODO: status = hipsparseScsrsm2_solve( // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, // CHECK: HIPSPARSE_OPERATION_NON_TRANSPOSE, status = cusparseScsrsm2_solve( handle, algo, CUSPARSE_OPERATION_NON_TRANSPOSE, /* transA */ CUSPARSE_OPERATION_NON_TRANSPOSE, /* transB */ n, nrhs, nnzA, &h_one, descrA, d_csrValA, d_csrRowPtrA, d_csrColIndA, d_B, n, /* ldb */ info, policy, d_work); // CHECK: assert(HIPSPARSE_STATUS_SUCCESS == status); assert(CUSPARSE_STATUS_SUCCESS == status); // CHECK: cudaStat1 = hipDeviceSynchronize(); cudaStat1 = cudaDeviceSynchronize(); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); /* step 6:measure residual B - A*X */ // CHECK: cudaStat1 = hipMemcpy(X, d_B, sizeof(float)*n*nrhs, hipMemcpyDeviceToHost); cudaStat1 = cudaMemcpy(X, d_B, sizeof(float)*n*nrhs, cudaMemcpyDeviceToHost); // CHECK: assert(hipSuccess == cudaStat1); assert(cudaSuccess == cudaStat1); // CHECK: hipDeviceSynchronize(); cudaDeviceSynchronize(); printf("==== x1 = inv(A)*b1 \n"); for (int j = 0; j < n; j++) { printf("x1[%d] = %f\n", j, X[j]); } float r1_nrminf; residaul_eval( n, descrA, csrValA, csrRowPtrA, csrColIndA, B, X, &r1_nrminf ); printf("|b1 - A*x1| = %E\n", r1_nrminf); printf("==== x2 = inv(A)*b2 \n"); for (int j = 0; j < n; j++) { printf("x2[%d] = %f\n", j, X[n + j]); } float r2_nrminf; residaul_eval( n, descrA, csrValA, csrRowPtrA, csrColIndA, B + n, X + n, &r2_nrminf ); printf("|b2 - A*x2| = %E\n", r2_nrminf); /* free resources */ // CHECK: if (d_csrRowPtrA) hipFree(d_csrRowPtrA); if (d_csrRowPtrA) cudaFree(d_csrRowPtrA); // CHECK: if (d_csrColIndA) hipFree(d_csrColIndA); if (d_csrColIndA) cudaFree(d_csrColIndA); // CHECK: if (d_csrValA) hipFree(d_csrValA); if (d_csrValA) cudaFree(d_csrValA); // CHECK: if (d_B) hipFree(d_B); if (d_B) cudaFree(d_B); // CHECK: if (handle) hipsparseDestroy(handle); if (handle) cusparseDestroy(handle); // CHECK: if (stream) hipStreamDestroy(stream); if (stream) cudaStreamDestroy(stream); // CHECK: if (descrA) hipsparseDestroyMatDescr(descrA); if (descrA) cusparseDestroyMatDescr(descrA); // NOTE: CUDA 10.0 // TODO: if (info) hipsparseDestroyCsrsm2Info(info); if (info) cusparseDestroyCsrsm2Info(info); // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
the_stack
// Visualizing Errors in Rendered High Dynamic Range Images // Eurographics 2021, // by Pontus Andersson, Jim Nilsson, Peter Shirley, and Tomas Akenine-Moller. // Pointer to the paper: https://research.nvidia.com/publication/2021-05_HDR-FLIP. // FLIP: A Difference Evaluator for Alternating Images // High Performance Graphics 2020, // by Pontus Andersson, Jim Nilsson, Tomas Akenine-Moller, // Magnus Oskarsson, Kalle Astrom, and Mark D. Fairchild. // Pointer to the paper: https://research.nvidia.com/publication/2020-07_FLIP. // Code by Pontus Andersson, Jim Nilsson, and Tomas Akenine-Moller. #pragma once #include <iostream> #include <string> #include "cuda_runtime.h" #include "device_launch_parameters.h" namespace FLIP { #define Max(x, y) ((x) > (y) ? (x) : (y)) #define Min(x, y) ((x) > (y) ? (y) : (x)) #define DEFAULT_ILLUMINANT { 0.950428545f, 1.000000000f, 1.088900371f } #define INV_DEFAULT_ILLUMINANT { 1.052156925f, 1.000000000f, 0.918357670f } class color3 { public: union { struct { float r, g, b; }; struct { float x, y, z; }; struct { float h, s, v; }; float3 float3; }; public: __host__ __device__ color3(void) { this->x = 0.0f; this->y = 0.0f; this->z = 0.0f; } __host__ __device__ color3(float v) { this->x = v; this->y = v; this->z = v; } __host__ __device__ color3(const float* pColor) { this->x = pColor[0]; this->y = pColor[1]; this->z = pColor[2]; } __host__ __device__ color3(const unsigned char* pColor) { this->x = float(pColor[0]); this->y = float(pColor[1]); this->z = float(pColor[2]); *this /= 255.0f; } __host__ __device__ color3(float _x, float _y, float _z) { this->x = _x; this->y = _y; this->z = _z; } __host__ __device__ color3(const color3& c) { this->x = c.x; this->y = c.y; this->z = c.z; } __host__ __device__ bool operator==(const color3 v) const { return this->x == v.x && this->y == v.y && this->z == v.z; } __host__ __device__ bool operator!=(const color3 v) const { return !(*this == v); } __host__ __device__ color3 operator+(const color3 v) const { return color3(this->x + v.x, this->y + v.y, this->z + v.z); } __host__ __device__ color3 operator-(const color3 v) const { return color3(this->x - v.x, this->y - v.y, this->z - v.z); } __host__ __device__ color3 operator*(const float v) const { return color3(this->x * v, this->y * v, this->z * v); } __host__ __device__ color3 operator*(const color3 v) const { return color3(this->x * v.x, this->y * v.y, this->z * v.z); } __host__ __device__ color3 operator/(const float v) const { return color3(this->x / v, this->y / v, this->z / v); } __host__ __device__ color3 operator/(const color3 v) const { return color3(this->x / v.x, this->y / v.y, this->z / v.z); } __host__ __device__ color3 operator+=(const color3 v) { this->x += v.x; this->y += v.y; this->z += v.z; return *this; } __host__ __device__ color3 operator*=(const color3 v) { this->x *= v.x; this->y *= v.y; this->z *= v.z; return *this; } __host__ __device__ color3 operator/=(const color3 v) { this->x /= v.x; this->y /= v.y; this->z /= v.z; return *this; } __host__ __device__ void clear(const color3 v = { 0.0f, 0.0f, 0.0f }) { this->x = v.x; this->y = v.y; this->z = v.z; } __host__ __device__ static inline color3 min(color3 v0, color3 v1) { return color3(Min(v0.x, v1.x), Min(v0.y, v1.y), Min(v0.z, v1.z)); } __host__ __device__ static inline color3 max(color3 v0, color3 v1) { return color3(Max(v0.x, v1.x), Max(v0.y, v1.y), Max(v0.z, v1.z)); } __host__ __device__ static inline color3 abs(color3 v) { return color3(std::abs(v.x), std::abs(v.y), std::abs(v.z)); } __host__ __device__ static inline color3 sqrt(color3 v) { return color3(std::sqrt(v.x), std::sqrt(v.y), std::sqrt(v.z)); } __host__ __device__ static inline color3 clamp(color3 v, float _min = 0.0f, float _max = 1.0f) { return color3(Min(Max(v.x, _min), _max), Min(Max(v.y, _min), _max), Min(Max(v.z, _min), _max)); } __host__ __device__ static inline float linearRGB2Luminance(color3 linearRGB) { return 0.2126f * linearRGB.r + 0.7152f * linearRGB.g + 0.0722f * linearRGB.b; } __host__ __device__ static inline float sRGB2LinearRGB(float sC) { if (sC <= 0.04045f) { return sC / 12.92f; } return powf((sC + 0.055f) / 1.055f, 2.4f); } __host__ __device__ static inline float LinearRGB2sRGB(float lC) { if (lC <= 0.0031308f) { return lC * 12.92f; } return 1.055f * powf(lC, 1.0f / 2.4f) - 0.055f; } __host__ __device__ static inline color3 sRGB2LinearRGB(color3 sRGB) { float R = sRGB2LinearRGB(sRGB.x); float G = sRGB2LinearRGB(sRGB.y); float B = sRGB2LinearRGB(sRGB.z); return color3(R, G, B); } __host__ __device__ static inline color3 LinearRGB2sRGB(color3 RGB) { float sR = LinearRGB2sRGB(RGB.x); float sG = LinearRGB2sRGB(RGB.y); float sB = LinearRGB2sRGB(RGB.z); return color3(sR, sG, sB); } __host__ __device__ static inline color3 LinearRGB2XYZ(color3 RGB) { // Source: https://www.image-engineering.de/library/technotes/958-how-to-convert-between-srgb-and-ciexyz // Assumes D65 standard illuminant const float a11 = 10135552.0f / 24577794.0f; const float a12 = 8788810.0f / 24577794.0f; const float a13 = 4435075.0f / 24577794.0f; const float a21 = 2613072.0f / 12288897.0f; const float a22 = 8788810.0f / 12288897.0f; const float a23 = 887015.0f / 12288897.0f; const float a31 = 1425312.0f / 73733382.0f; const float a32 = 8788810.0f / 73733382.0f; const float a33 = 70074185.0f / 73733382.0f; color3 XYZ; XYZ.x = a11 * RGB.x + a12 * RGB.y + a13 * RGB.z; XYZ.y = a21 * RGB.x + a22 * RGB.y + a23 * RGB.z; XYZ.z = a31 * RGB.x + a32 * RGB.y + a33 * RGB.z; return XYZ; } __host__ __device__ static inline color3 XYZ2LinearRGB(color3 XYZ) { // Return values in linear RGB, assuming D65 standard illuminant const float a11 = 3.241003275f; const float a12 = -1.537398934f; const float a13 = -0.498615861f; const float a21 = -0.969224334f; const float a22 = 1.875930071f; const float a23 = 0.041554224f; const float a31 = 0.055639423f; const float a32 = -0.204011202f; const float a33 = 1.057148933f; color3 RGB; RGB.x = a11 * XYZ.x + a12 * XYZ.y + a13 * XYZ.z; RGB.y = a21 * XYZ.x + a22 * XYZ.y + a23 * XYZ.z; RGB.z = a31 * XYZ.x + a32 * XYZ.y + a33 * XYZ.z; return RGB; } __host__ __device__ static inline color3 XYZ2CIELab(color3 XYZ, const color3 invReferenceIlluminant = INV_DEFAULT_ILLUMINANT) { const float delta = 6.0f / 29.0f; const float deltaSquare = delta * delta; const float deltaCube = delta * deltaSquare; const float factor = 1.0f / (3.0f * deltaSquare); const float term = 4.0f / 29.0f; // the default illuminant is D65 XYZ = XYZ * invReferenceIlluminant; XYZ.x = (XYZ.x > deltaCube ? powf(XYZ.x, 1.0f / 3.0f) : factor * XYZ.x + term); XYZ.y = (XYZ.y > deltaCube ? powf(XYZ.y, 1.0f / 3.0f) : factor * XYZ.y + term); XYZ.z = (XYZ.z > deltaCube ? powf(XYZ.z, 1.0f / 3.0f) : factor * XYZ.z + term); float L = 116.0f * XYZ.y - 16.0f; float a = 500.0f * (XYZ.x - XYZ.y); float b = 200.0f * (XYZ.y - XYZ.z); return color3(L, a, b); } __host__ __device__ static inline color3 CIELab2XYZ(color3 Lab, const color3 referenceIlluminant = DEFAULT_ILLUMINANT) { const float delta = 6.0f / 29.0f; const float factor = 3.0f * delta * delta; const float term = 4.0f / 29.0f; // the default illuminant is D65 float Y = (Lab.x + 16.0f) / 116.0f; float X = Lab.y / 500.0f + Y; float Z = Y - Lab.z / 200.0f; X = (X > delta ? X * X * X : (X - term) * factor); Y = (Y > delta ? Y * Y * Y : (Y - term) * factor); Z = (Z > delta ? Z * Z * Z : (Z - term) * factor); return color3(X, Y, Z) * referenceIlluminant; } __host__ __device__ static inline color3 XYZ2YCxCz(color3 XYZ, const color3 invReferenceIlluminant = INV_DEFAULT_ILLUMINANT) { // the default illuminant is D65 XYZ = XYZ * invReferenceIlluminant; float Y = 116.0f * XYZ.y - 16.0f; float Cx = 500.0f * (XYZ.x - XYZ.y); float Cz = 200.0f * (XYZ.y - XYZ.z); return color3(Y, Cx, Cz); } __host__ __device__ static inline color3 YCxCz2XYZ(color3 YCxCz, const color3 referenceIlluminant = DEFAULT_ILLUMINANT) { // the default illuminant is D65 const float Y = (YCxCz.x + 16.0f) / 116.0f; const float Cx = YCxCz.y / 500.0f; const float Cz = YCxCz.z / 200.0f; float X = Y + Cx; float Z = Y - Cz; return color3(X, Y, Z) * referenceIlluminant; } __host__ __device__ static inline float YCxCz2Gray(color3 YCxCz) { return (YCxCz.x + 16.0f) / 116.0f; // make it [0,1] } // FLIP-specific functions below __host__ __device__ static inline float Hunt(const float luminance, const float chrominance) { return 0.01f * luminance * chrominance; } __host__ __device__ static inline float HyAB(color3& refPixel, color3& testPixel) { float cityBlockDistanceL = std::fabs(refPixel.x - testPixel.x); float euclideanDistanceAB = std::sqrt((refPixel.y - testPixel.y) * (refPixel.y - testPixel.y) + (refPixel.z - testPixel.z) * (refPixel.z - testPixel.z)); return cityBlockDistanceL + euclideanDistanceAB; } __host__ __device__ static inline float computeMaxDistance(float gqc) { color3 greenLab = color3::XYZ2CIELab(color3::LinearRGB2XYZ(color3(0.0f, 1.0f, 0.0f))); color3 blueLab = color3::XYZ2CIELab(color3::LinearRGB2XYZ(color3(0.0f, 0.0f, 1.0f))); color3 greenLabHunt = color3(greenLab.x, Hunt(greenLab.x, greenLab.y), Hunt(greenLab.x, greenLab.z)); color3 blueLabHunt = color3(blueLab.x, Hunt(blueLab.x, blueLab.y), Hunt(blueLab.x, blueLab.z)); return powf(HyAB(greenLabHunt, blueLabHunt), gqc); } std::string toString(void) const { return "(" + std::to_string(this->x) + ", " + std::to_string(this->y) + ", " + std::to_string(this->z) + ")"; } friend std::ostream& operator<<(std::ostream& os, const color3& c) { os << c.toString(); return os; } }; }
the_stack
#include <vector> #include <random> #include <iostream> #include <chrono> #include <numeric> #include <math.h> #include <cuda_runtime.h> #include <stdexcept> #define CUDA_CHECK_STATUS( cuda_function_call ) \ if (cudaError_t const status = cuda_function_call) \ { \ throw std::runtime_error( cudaGetErrorString( status ) ) ; \ } template<class T> struct GPU_array { GPU_array(std::size_t const size) { CUDA_CHECK_STATUS(cudaMalloc(&data_, size * sizeof(T))); } GPU_array(std::vector<T> const & cpu_data) : GPU_array(cpu_data.size()) { write(cpu_data); } GPU_array(std::size_t const & count, T const value) : GPU_array(count) { set(count, value); } ~GPU_array() { cudaFree(data_); } operator T * () { return static_cast<T *>(data_); } void read(std::vector<T> & to) const { CUDA_CHECK_STATUS(cudaMemcpy( to.data(), data_, to.size() * sizeof(T), cudaMemcpyDeviceToHost)); } void write(std::vector<T> const & from) { CUDA_CHECK_STATUS(cudaMemcpy( data_, from.data(), from.size() * sizeof(T), cudaMemcpyHostToDevice)); } void set(std::size_t const count, T const value) { CUDA_CHECK_STATUS(cudaMemset(data_, 1, count * sizeof(T))); } private: void * data_; }; void generate_gauss_2d( std::vector<REAL> const & x_coordinates, std::vector<REAL> const & y_coordinates, std::vector<REAL> const & gauss_params, std::vector<REAL> & output_values) { // Generates a Gaussian 2D function at a set of X and Y coordinates. The Gaussian is defined by // an array of five parameters. // x_coordinates: Vector of X coordinates. // y_coordinates: Vector of Y coordinates. // gauss_params: Vector of function parameters. // output_values: Output vector containing the values of the Gaussian function at the // corresponding X, Y coordinates. // gauss_params[0]: Amplitude // gauss_params[1]: Center X position // guass_params[2]: Center Y position // gauss_params[3]: Gaussian width (standard deviation) // gauss_params[4]: Baseline offset // This code assumes that x_coordinates.size == y_coordinates.size == output_values.size for (size_t i = 0; i < x_coordinates.size(); i++) { REAL arg = -( (x_coordinates[i] - gauss_params[1]) * (x_coordinates[i] - gauss_params[1]) + (y_coordinates[i] - gauss_params[2]) * (y_coordinates[i] - gauss_params[2]) ) / (2 * gauss_params[3] * gauss_params[3]); output_values[i] = gauss_params[0] * exp(arg) + gauss_params[4]; } } void cuda_interface_example() { /* This example generates test data on the CPU in form of 10000 two dimensional Gaussian peaks with the size of 50x50 data points per peak. It is noised by Poisson distributed noise. The initial guesses were randomized, within a specified range of the true value. Before call to Gpufit the input data is transfered to GPU memory. The GAUSS_2D model is fitted to the test data sets using the MLE estimator. After calling Gpufit the output data is transfered to CPU memory. The console output shows - the execution time, - the ratio of converged fits including ratios of not converged fits for different reasons, - the values of the true parameters and the mean values of the fitted parameters including their standard deviation, - the mean chi square value - and the mean number of iterations needed. True parameters and noise and number of fits is the same as for the Matlab/Python 2D Gaussian examples. */ // number of fits, fit points and parameters size_t const n_fits = 10000; size_t const size_x = 20; size_t const n_points_per_fit = size_x * size_x; size_t const n_parameters = 5; // true parameters (amplitude, center x position, center y position, width, offset) std::vector< REAL > true_parameters{ 5, 14.5f, 14.5f, 3, 10}; std::cout << "generate example data" << std::endl; // initialize random number generator std::mt19937 rng; rng.seed(0); std::uniform_real_distribution< REAL> uniform_dist(0, 1); // initial parameters (randomized) std::vector< REAL > initial_parameters(n_fits * n_parameters); for (size_t i = 0; i < n_fits; i++) { for (size_t j = 0; j < n_parameters; j++) { if (j == 1 || j == 2) { initial_parameters[i * n_parameters + j] = true_parameters[j] + true_parameters[3] * (-.2f + .4f * uniform_dist(rng)); } else { initial_parameters[i * n_parameters + j] = true_parameters[j] * (.8f + .4f * uniform_dist(rng)); } } } // generate x and y values std::vector< REAL > x(n_points_per_fit); std::vector< REAL > y(n_points_per_fit); for (size_t i = 0; i < size_x; i++) { for (size_t j = 0; j < size_x; j++) { x[i * size_x + j] = static_cast<REAL>(j); y[i * size_x + j] = static_cast<REAL>(i); } } // generate test data with Poisson noise std::vector< REAL > temp(n_points_per_fit); generate_gauss_2d(x, y, true_parameters, temp); std::vector< REAL > data(n_fits * n_points_per_fit); for (size_t i = 0; i < n_fits; i++) { for (size_t j = 0; j < n_points_per_fit; j++) { std::poisson_distribution< int > poisson_dist(temp[j]); data[i * n_points_per_fit + j] = static_cast<REAL>(poisson_dist(rng)); } } // tolerance REAL const tolerance = .001f; // maximum number of iterations int const max_n_iterations = 20; // estimator ID int const estimator_id = MLE; // model ID int const model_id = GAUSS_2D; // parameters to fit (all of them) std::vector< int > parameters_to_fit(n_parameters, 1); // output parameters CPU std::vector< REAL > output_parameters(n_fits * n_parameters); std::vector< int > output_states(n_fits); std::vector< REAL > output_chi_squares(n_fits); std::vector< int > output_n_iterations(n_fits); // input parameters GPU GPU_array<REAL> gpu_data(data); GPU_array<REAL> gpu_weights(data.size(), 1); // input/output parameters GPU GPU_array<REAL> gpu_initial_parameters(initial_parameters); // output_parameters GPU GPU_array<int> gpu_states(n_fits); GPU_array<REAL> gpu_chi_squares(n_fits); GPU_array<int> gpu_n_iterations(n_fits); // call to gpufit_cuda_interface std::chrono::high_resolution_clock::time_point time_0 = std::chrono::high_resolution_clock::now(); int status = gpufit_cuda_interface ( n_fits, n_points_per_fit, gpu_data, gpu_weights, model_id, tolerance, max_n_iterations, parameters_to_fit.data(), estimator_id, 0, 0, gpu_initial_parameters, gpu_states, gpu_chi_squares, gpu_n_iterations ); std::chrono::high_resolution_clock::time_point time_1 = std::chrono::high_resolution_clock::now(); // check status if (status != ReturnState::OK) { throw std::runtime_error(gpufit_get_last_error()); } // copy output data to CPU memory gpu_initial_parameters.read(output_parameters); gpu_states.read(output_states); gpu_chi_squares.read(output_chi_squares); gpu_n_iterations.read(output_n_iterations); std::cout << std::endl << "unconstrained fit" << std::endl; // print execution time std::cout << "execution time " << std::chrono::duration_cast<std::chrono::milliseconds>(time_1 - time_0).count() << " ms" << std::endl; // get fit states std::vector< int > output_states_histogram(5, 0); for (std::vector< int >::iterator it = output_states.begin(); it != output_states.end(); ++it) { output_states_histogram[*it]++; } std::cout << "ratio converged " << (REAL)output_states_histogram[0] / n_fits << "\n"; std::cout << "ratio max iteration exceeded " << (REAL)output_states_histogram[1] / n_fits << "\n"; std::cout << "ratio singular hessian " << (REAL)output_states_histogram[2] / n_fits << "\n"; std::cout << "ratio neg curvature MLE " << (REAL)output_states_histogram[3] / n_fits << "\n"; std::cout << "ratio gpu not read " << (REAL)output_states_histogram[4] / n_fits << "\n"; // compute mean of fitted parameters for converged fits std::vector< REAL > output_parameters_mean(n_parameters, 0); for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { for (size_t j = 0; j < n_parameters; j++) { output_parameters_mean[j] += output_parameters[i * n_parameters + j]; } } } // normalize for (size_t j = 0; j < n_parameters; j++) { output_parameters_mean[j] /= output_states_histogram[0]; } // compute std of fitted parameters for converged fits std::vector< REAL > output_parameters_std(n_parameters, 0); for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { for (size_t j = 0; j < n_parameters; j++) { output_parameters_std[j] += (output_parameters[i * n_parameters + j] - output_parameters_mean[j]) * (output_parameters[i * n_parameters + j] - output_parameters_mean[j]); } } } // normalize and take square root for (size_t j = 0; j < n_parameters; j++) { output_parameters_std[j] = sqrt(output_parameters_std[j] / output_states_histogram[0]); } // print true value, fitted mean and std for every parameter for (size_t j = 0; j < n_parameters; j++) { std::cout << "parameter " << j << " true " << true_parameters[j] << " fitted mean " << output_parameters_mean[j] << " std " << output_parameters_std[j] << std::endl; } // compute mean chi-square for those converged REAL output_chi_square_mean = 0; for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { output_chi_square_mean += output_chi_squares[i]; } } output_chi_square_mean /= static_cast<REAL>(output_states_histogram[0]); std::cout << "mean chi square " << output_chi_square_mean << std::endl; // compute mean number of iterations for those converged REAL output_number_iterations_mean = 0; for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { output_number_iterations_mean += static_cast<REAL>(output_n_iterations[i]); } } // normalize output_number_iterations_mean /= static_cast<REAL>(output_states_histogram[0]); std::cout << "mean number of iterations " << output_number_iterations_mean << std::endl; // define constraints std::vector< REAL > constraints(n_fits * 2 * n_parameters, 0); for (size_t i = 0; i != n_fits; i++) { constraints[i * n_parameters * 2 + 6] = 2.9f; constraints[i * n_parameters * 2 + 7] = 3.1f; } GPU_array<REAL> gpu_constraints(constraints); std::vector< int > constraint_types(n_parameters, 0); constraint_types[0] = 1; // lower constraint_types[3] = 3; // lower and upper constraint_types[4] = 1; // lower // call to gpufit_constrained_cuda_interface time_0 = std::chrono::high_resolution_clock::now(); status = gpufit_constrained_cuda_interface ( n_fits, n_points_per_fit, gpu_data, gpu_weights, model_id, tolerance, max_n_iterations, parameters_to_fit.data(), gpu_constraints, constraint_types.data(), estimator_id, 0, 0, gpu_initial_parameters, gpu_states, gpu_chi_squares, gpu_n_iterations ); time_1 = std::chrono::high_resolution_clock::now(); // check status if (status != ReturnState::OK) { throw std::runtime_error(gpufit_get_last_error()); } // copy output data to CPU memory gpu_initial_parameters.read(output_parameters); gpu_states.read(output_states); gpu_chi_squares.read(output_chi_squares); gpu_n_iterations.read(output_n_iterations); std::cout << std::endl << "constrained fit" << std::endl; // print execution time std::cout << "execution time " << std::chrono::duration_cast<std::chrono::milliseconds>(time_1 - time_0).count() << " ms" << std::endl; // get fit states std::fill(output_states_histogram.begin(), output_states_histogram.end(), 0); for (std::vector< int >::iterator it = output_states.begin(); it != output_states.end(); ++it) { output_states_histogram[*it]++; } std::cout << "ratio converged " << (REAL)output_states_histogram[0] / n_fits << "\n"; std::cout << "ratio max iteration exceeded " << (REAL)output_states_histogram[1] / n_fits << "\n"; std::cout << "ratio singular hessian " << (REAL)output_states_histogram[2] / n_fits << "\n"; std::cout << "ratio neg curvature MLE " << (REAL)output_states_histogram[3] / n_fits << "\n"; std::cout << "ratio gpu not read " << (REAL)output_states_histogram[4] / n_fits << "\n"; // compute mean of fitted parameters for converged fits std::fill(output_parameters_mean.begin(), output_parameters_mean.end(), 0.f); for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { for (size_t j = 0; j < n_parameters; j++) { output_parameters_mean[j] += output_parameters[i * n_parameters + j]; } } } // normalize for (size_t j = 0; j < n_parameters; j++) { output_parameters_mean[j] /= output_states_histogram[0]; } // compute std of fitted parameters for converged fits std::fill(output_parameters_std.begin(), output_parameters_std.end(), 0.f); for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { for (size_t j = 0; j < n_parameters; j++) { output_parameters_std[j] += (output_parameters[i * n_parameters + j] - output_parameters_mean[j]) * (output_parameters[i * n_parameters + j] - output_parameters_mean[j]); } } } // normalize and take square root for (size_t j = 0; j < n_parameters; j++) { output_parameters_std[j] = sqrt(output_parameters_std[j] / output_states_histogram[0]); } // print true value, fitted mean and std for every parameter for (size_t j = 0; j < n_parameters; j++) { std::cout << "parameter " << j << " true " << true_parameters[j] << " fitted mean " << output_parameters_mean[j] << " std " << output_parameters_std[j] << std::endl; } // compute mean chi-square for those converged output_chi_square_mean = 0; for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { output_chi_square_mean += output_chi_squares[i]; } } output_chi_square_mean /= static_cast<REAL>(output_states_histogram[0]); std::cout << "mean chi square " << output_chi_square_mean << std::endl; // compute mean number of iterations for those converged output_number_iterations_mean = 0; for (size_t i = 0; i != n_fits; i++) { if (output_states[i] == FitState::CONVERGED) { output_number_iterations_mean += static_cast<REAL>(output_n_iterations[i]); } } // normalize output_number_iterations_mean /= static_cast<REAL>(output_states_histogram[0]); std::cout << "mean number of iterations " << output_number_iterations_mean << std::endl; } int main(int argc, char *argv[]) { cuda_interface_example(); std::cout << std::endl << "Example completed!" << std::endl; std::cout << "Press ENTER to exit" << std::endl; std::getchar(); return 0; }
the_stack
* \file * cub::BlockReduceByKeyiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. */ #pragma once #include <iterator> #include "scan_tiles_types.cuh" #include "../../block/block_load.cuh" #include "../../block/block_discontinuity.cuh" #include "../../block/block_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Utility data types ******************************************************************************/ /// Scan tuple data type for reduce-value-by-key template <typename Value, typename SizeT> struct ReduceByKeyuple { Value value; // Initially set as value, contains segment aggregate after prefix scan SizeT flag; // Initially set as a tail flag, contains scatter offset after prefix scan }; /// Binary reduce-by-key scan operator template <typename ReductionOp> struct ReduceByKeyScanOp { /// Reduction functor ReductionOp reduction_op; /// Constructor ReduceByKeyScanOp(ReductionOp reduction_op) : reduction_op(reduction_op) {} /// Binary scan operator template <typename ReduceByKeyuple> __device__ __forceinline__ ReduceByKeyuple operator()( const ReduceByKeyuple &first, const ReduceByKeyuple &second) { ReduceByKeyuple retval; retval.val = (second.flag) ? second.val : reduction_op(first.val, second.val); retval.flag = first.flag + second.flag; return retval; } }; /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Tuning policy for BlockReduceByKeyiles */ template < int _BLOCK_THREADS, int _ITEMS_PER_THREAD, BlockLoadAlgorithm _LOAD_ALGORITHM, bool _LOAD_WARP_TIME_SLICING, PtxLoadModifier _LOAD_MODIFIER, BlockScanAlgorithm _SCAN_ALGORITHM> struct BlockReduceByKeyilesPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ITEMS_PER_THREAD = _ITEMS_PER_THREAD, LOAD_WARP_TIME_SLICING = _LOAD_WARP_TIME_SLICING, }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; static const PtxLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief BlockReduceByKeyiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan. */ template < typename BlockReduceByKeyilesPolicy, ///< Tuning policy typename KeyInputIteratorRA, ///< Random-access input iterator type for keys typename KeyOutputIteratorRA, ///< Random-access output iterator type for keys typename ValueInputIteratorRA, ///< Random-access input iterator type for values typename ValueOutputIteratorRA, ///< Random-access output iterator type for values typename ReductionOp, ///< Reduction functor type typename SizeT> ///< Offset integer type struct BlockReduceByKeyiles { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Data types of input iterators typedef typename std::iterator_traits<KeyInputIteratorRA>::value_type Key; // Key data type typedef typename std::iterator_traits<ValueInputIteratorRA>::value_type Value; // Value data type // Constants enum { BLOCK_THREADS = BlockReduceByKeyilesPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = BlockReduceByKeyilesPolicy::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, STATUS_PADDING = PtxArchProps::WARP_THREADS, }; // Block load type for keys typedef BlockLoad< KeyInputIteratorRA, BlockReduceByKeyilesPolicy::BLOCK_THREADS, BlockReduceByKeyilesPolicy::ITEMS_PER_THREAD, BlockReduceByKeyilesPolicy::LOAD_ALGORITHM, BlockReduceByKeyilesPolicy::LOAD_MODIFIER, BlockReduceByKeyilesPolicy::LOAD_WARP_TIME_SLICING> BlockLoadKeys; // Block load type for values typedef BlockLoad< ValueInputIteratorRA, BlockReduceByKeyilesPolicy::BLOCK_THREADS, BlockReduceByKeyilesPolicy::ITEMS_PER_THREAD, BlockReduceByKeyilesPolicy::LOAD_ALGORITHM, BlockReduceByKeyilesPolicy::LOAD_MODIFIER, BlockReduceByKeyilesPolicy::LOAD_WARP_TIME_SLICING> BlockLoadValues; // Block discontinuity type for setting tail flags typedef BlockDiscontinuity<Key, BLOCK_THREADS> BlockDiscontinuityKeys; // Scan tuple type typedef ReduceByKeyuple<Value, SizeT> ScanTuple; // Tile status descriptor type typedef ScanTileDescriptor<ScanTuple> ScanTileDescriptorT; // Block scan functor type typedef ReduceByKeyScanOp<ReductionOp> ScanOp; // Block scan prefix callback type typedef DeviceScanBlockPrefixOp<ScanTuple, ScanOp> PrefixCallback; // Block scan type typedef BlockScan< ScanTuple, BlockReduceByKeyilesPolicy::BLOCK_THREADS, BlockReduceByKeyilesPolicy::SCAN_ALGORITHM> BlockScanT; /// Shared memory type for this threadblock struct _TempStorage { union { typename BlockLoadKeys::TempStorage load_keys; // Smem needed for loading tiles of keys typename BlockLoadValues::TempStorage load_values; // Smem needed for loading tiles of values struct { typename BlockScanT::TempStorage scan; // Smem needed for tile scanning typename PrefixCallback::TempStorage prefix; // Smem needed for cooperative prefix callback }; }; typename BlockDiscontinuityKeys::TempStorage flagging; // Smem needed for tile scanning SizeT tile_idx; // Shared tile index }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage &temp_storage; ///< Reference to temp_storage KeyInputIteratorRA d_keys_in; ///< Key input data KeyOutputIteratorRA d_keys_out; ///< Key output data ValueInputIteratorRA d_values_in; ///< Value input data ValueOutputIteratorRA d_values_out; ///< Value output data ScanTileDescriptorT *d_tile_status; ///< Global list of tile status ScanOp scan_op; ///< Binary scan operator int num_tiles; ///< Total number of input tiles for the entire problem SizeT num_items; ///< Total number of scan items for the entire problem //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- // Constructor __device__ __forceinline__ BlockReduceByKeyiles( TempStorage &temp_storage, ///< Reference to temp_storage KeyInputIteratorRA d_keys_in, ///< Key input data KeyOutputIteratorRA d_keys_out, ///< Key output data ValueInputIteratorRA d_values_in, ///< Value input data ValueOutputIteratorRA d_values_out, ///< Value output data ScanTileDescriptorT *d_tile_status, ///< Global list of tile status ReductionOp reduction_op, ///< Binary scan operator int num_tiles, ///< Total number of input tiles for the entire problem SizeT num_items) ///< Total number of scan items for the entire problem : temp_storage(temp_storage.Alias()), d_keys_in(d_keys_in), d_keys_out(d_keys_out), d_values_in(d_values_in), d_values_out(d_values_out), d_tile_status(d_tile_status), scan_op(reduction_op), num_tiles(num_tiles), num_items(num_items) {} /** * Process a tile of input */ template <bool FULL_TILE> __device__ __forceinline__ void ConsumeTile( int tile_idx, ///< Tile index SizeT block_offset, ///< Tile offset int valid_items = TILE_ITEMS) ///< Number of valid items in the tile { Key keys[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; int tail_flags[ITEMS_PER_THREAD]; ScanTuple scan_tuples[ITEMS_PER_THREAD]; // Load keys if (FULL_TILE) BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + block_offset, keys); else BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + block_offset, keys, valid_items); // Set tail flags if (tile_idx == num_tiles - 1) { // Last tile BlockDiscontinuityKeys(temp_storage.flagging).FlagTails(tail_flags, keys, Equality()); } else { // Preceding tiles require the first element of the next tile Key tile_suffix_item; if (threadIdx.x == 0) tile_suffix_item = d_keys_in[block_offset + TILE_ITEMS]; BlockDiscontinuityKeys(temp_storage.flagging).FlagTails(tail_flags, keys, Equality(), tile_suffix_item); } __syncthreads(); // Load values if (FULL_TILE) BlockLoadValues(temp_storage.load_values).Load(d_values_in + block_offset, values); else BlockLoadValues(temp_storage.load_values).Load(d_values_in + block_offset, values, valid_items); // Assemble scan tuples #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { scan_tuples[ITEM].value = values[ITEM]; scan_tuples[ITEM].flag = tail_flags[ITEM]; } __syncthreads(); // Perform inclusive prefix scan ScanTuple block_aggregate; if (tile_idx == 0) { // Without prefix callback BlockScanT(temp_storage.scan).InclusiveScan(scan_tuples, scan_tuples, scan_op, block_aggregate); // Update tile status if (threadIdx.x == 0) ScanTileDescriptorT::SetPrefix(d_tile_status, block_aggregate); } else { // With prefix callback PrefixCallback prefix_op(d_tile_status, temp_storage.prefix, scan_op, tile_idx); BlockScanT(temp_storage.scan).InclusiveScan(scan_tuples, scan_tuples, scan_op, block_aggregate, prefix_op); } // Scatter flagged keys and values to output #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int tile_item = (threadIdx.x * ITEMS_PER_THREAD) + ITEM; // Set the head flag on the last item in a partially-full tile if (!FULL_TILE && (tile_item == valid_items - 1)) tail_flags[ITEM] = 1; // Decrement scatter offset scan_tuples[ITEM].flag--; // Scatter key and aggregate value if flagged and in range if ((FULL_TILE || (tile_item < valid_items)) && (tail_flags[ITEM])) { d_keys_out[scan_tuples[ITEM].flag] = keys[ITEM]; d_values_out[scan_tuples[ITEM].flag] = scan_tuples[ITEM].value; } } } /** * Dequeue and scan tiles of elements */ __device__ __forceinline__ void ProcessTiles(GridQueue<int> queue) ///< Queue descriptor for assigning tiles of work to thread blocks { // We give each thread block at least one tile of input int tile_idx = blockIdx.x; // Consume full tiles of input SizeT block_offset = SizeT(TILE_ITEMS) * tile_idx; while (block_offset + TILE_ITEMS <= num_items) { ConsumeTile<true>(tile_idx, block_offset); // Get next tile #if CUB_PTX_ARCH < 200 // No concurrent kernels allowed, so just stripe tiles tile_idx += gridDim.x; #else // Concurrent kernels are allowed, so we must only use active blocks to dequeue tile indices if (threadIdx.x == 0) temp_storage.tile_idx = queue.Drain(1) + gridDim.x; __syncthreads(); tile_idx = temp_storage.tile_idx; #endif block_offset = SizeT(TILE_ITEMS) * tile_idx; } // Consume a partially-full tile if (block_offset < num_items) { // Consume a partially-full tile int valid_items = num_items - block_offset; ConsumeTile<false>(tile_idx, block_offset, valid_items); } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#ifndef NO_CUDA_MAIN __constant__ float Acuda[48]; #endif // typedef struct // { // float *coefs; // uint3 stride; // float3 gridInv; // int num_splines; // } multi_UBspline_3d_s_cuda; #ifndef NO_CUDA_MAIN multi_UBspline_3d_s_cuda* create_multi_UBspline_3d_s_cuda (multi_UBspline_3d_s* spline) { float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0, 3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0, -3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0, 1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0, 0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0 }; cudaMemcpyToSymbol(Acuda, A_h, 48*sizeof(float), 0, cudaMemcpyHostToDevice); multi_UBspline_3d_s_cuda *cuda_spline = (multi_UBspline_3d_s_cuda*) malloc (sizeof (multi_UBspline_3d_s_cuda*)); cuda_spline->num_splines = spline->num_splines; int Nx = spline->x_grid.num+3; int Ny = spline->y_grid.num+3; int Nz = spline->z_grid.num+3; int N = spline->num_splines; if ((N%SPLINE_BLOCK_SIZE) != 0) N += 64 - (N%SPLINE_BLOCK_SIZE); cuda_spline->stride.x = Ny*Nz*N; cuda_spline->stride.y = Nz*N; cuda_spline->stride.z = N; cuda_spline->gridInv.x = spline->x_grid.delta_inv; cuda_spline->gridInv.y = spline->y_grid.delta_inv; cuda_spline->gridInv.z = spline->z_grid.delta_inv; size_t size = Nx*Ny*Nz*N*sizeof(float); cudaMalloc((void**)&(cuda_spline->coefs), size); float *spline_buff = (float*)malloc(size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int isp=0; isp<spline->num_splines; isp++) { spline_buff[ix*cuda_spline->stride.x + iy*cuda_spline->stride.y + iz*cuda_spline->stride.z + isp] = spline->coefs[ix*spline->x_stride + iy*spline->y_stride + iz*spline->z_stride + isp]; } cudaMemcpy(cuda_spline->coefs, spline_buff, size, cudaMemcpyHostToDevice); //free(spline_buff); return cuda_spline; } #endif __global__ static void eval_multi_multi_UBspline_3d_s_kernel (float *pos, float3 drInv, const float *coefs, float *vals[], uint3 strides) { int block = blockIdx.x; int thr = threadIdx.x; int ir = blockIdx.y; int off = block*SPLINE_BLOCK_SIZE+thr; __shared__ float *myval; __shared__ float abc[64]; __shared__ float3 r; if (thr == 0) { r.x = pos[3*ir+0]; r.y = pos[3*ir+1]; r.z = pos[3*ir+2]; myval = vals[ir]; } __syncthreads(); int3 index; float3 t; float s, sf; float4 tp[3]; s = r.x * drInv.x; sf = floor(s); index.x = (int)sf; t.x = s - sf; s = r.y * drInv.y; sf = floor(s); index.y = (int)sf; t.y = s - sf; s = r.z * drInv.z; sf = floor(s); index.z = (int)sf; t.z = s - sf; tp[0] = make_float4(t.x*t.x*t.x, t.x*t.x, t.x, 1.0); tp[1] = make_float4(t.y*t.y*t.y, t.y*t.y, t.y, 1.0); tp[2] = make_float4(t.z*t.z*t.z, t.z*t.z, t.z, 1.0); __shared__ float a[4], b[4], c[4]; if (thr < 4) { a[thr] = Acuda[4*thr+0]*tp[0].x + Acuda[4*thr+1]*tp[0].y + Acuda[4*thr+2]*tp[0].z + Acuda[4*thr+3]*tp[0].w; b[thr] = Acuda[4*thr+0]*tp[1].x + Acuda[4*thr+1]*tp[1].y + Acuda[4*thr+2]*tp[1].z + Acuda[4*thr+3]*tp[1].w; c[thr] = Acuda[4*thr+0]*tp[2].x + Acuda[4*thr+1]*tp[2].y + Acuda[4*thr+2]*tp[2].z + Acuda[4*thr+3]*tp[2].w; } __syncthreads(); int i = (thr>>4)&3; int j = (thr>>2)&3; int k = (thr & 3); if (thr < 64) abc[thr] = a[i]*b[j]*c[k]; __syncthreads(); float val = 0.0; for (int i=0; i<4; i++) { for (int j=0; j<4; j++) { float *base = coefs + (index.x+i)*strides.x + (index.y+j)*strides.y + index.z*strides.z; for (int k=0; k<4; k++) val += abc[16*i+4*j+k] * base[off+k*strides.z]; } } myval[off] = val; } __global__ static void eval_multi_multi_UBspline_3d_s_vgh_kernel (float *pos, float3 drInv, const float *coefs, float *vals[], float *grads[], float *hess[], uint3 strides) { int block = blockIdx.x; int thr = threadIdx.x; int ir = blockIdx.y; int off = block*SPLINE_BLOCK_SIZE+threadIdx.x; __shared__ float *myval, *mygrad, *myhess; __shared__ float3 r; if (thr == 0) { r.x = pos[3*ir+0]; r.y = pos[3*ir+1]; r.z = pos[3*ir+2]; myval = vals[ir]; mygrad = grads[ir]; myhess = hess[ir]; } __syncthreads(); int3 index; float3 t; float s, sf; float4 tp[3]; s = r.x * drInv.x; sf = floor(s); index.x = (int)sf; t.x = s - sf; s = r.y * drInv.y; sf = floor(s); index.y = (int)sf; t.y = s - sf; s = r.z * drInv.z; sf = floor(s); index.z = (int)sf; t.z = s - sf; tp[0] = make_float4(t.x*t.x*t.x, t.x*t.x, t.x, 1.0); tp[1] = make_float4(t.y*t.y*t.y, t.y*t.y, t.y, 1.0); tp[2] = make_float4(t.z*t.z*t.z, t.z*t.z, t.z, 1.0); // First 4 of a are value, second 4 are derivative, last four are // second derivative. __shared__ float a[12], b[12], c[12]; if (thr < 12) { a[thr] = Acuda[4*thr+0]*tp[0].x + Acuda[4*thr+1]*tp[0].y + Acuda[4*thr+2]*tp[0].z + Acuda[4*thr+3]*tp[0].z; b[thr] = Acuda[4*thr+0]*tp[1].x + Acuda[4*thr+1]*tp[1].y + Acuda[4*thr+2]*tp[1].z + Acuda[4*thr+3]*tp[1].z; c[thr] = Acuda[4*thr+0]*tp[2].x + Acuda[4*thr+1]*tp[2].y + Acuda[4*thr+2]*tp[2].z + Acuda[4*thr+3]*tp[2].z; } __syncthreads(); __shared__ float abc[640]; int i = (thr>>4)&3; int j = (thr>>2)&3; int k = (thr & 3); abc[(16*i+4*j+k)+0] = a[i+0]*b[j+0]*c[k+0]; // val abc[(16*i+4*j+k)+64] = a[i+4]*b[j+0]*c[k+0]; // d/dx abc[(16*i+4*j+k)+128] = a[i+0]*b[j+4]*c[k+0]; // d/dy abc[(16*i+4*j+k)+192] = a[i+0]*b[j+0]*c[k+4]; // d/dz abc[(16*i+4*j+k)+256] = a[i+8]*b[j+0]*c[k+0]; // d2/dx2 abc[(16*i+4*j+k)+320] = a[i+4]*b[j+4]*c[k+0]; // d2/dxdy abc[(16*i+4*j+k)+384] = a[i+4]*b[j+0]*c[k+4]; // d2/dxdz abc[(16*i+4*j+k)+448] = a[i+0]*b[j+8]*c[k+0]; // d2/dy2 abc[(16*i+4*j+k)+512] = a[i+0]*b[j+4]*c[k+4]; // d2/dydz abc[(16*i+4*j+k)+576] = a[i+0]*b[j+0]*c[k+8]; // d2/dz2 __syncthreads(); float v = 0.0, g0=0.0, g1=0.0, g2=0.0, h00=0.0, h01=0.0, h02=0.0, h11=0.0, h12=0.0, h22=0.0; int n = 0; float *b0 = coefs + index.x*strides.x + index.y*strides.y + index.z*strides.z + off; for (int i=0; i<4; i++) { for (int j=0; j<4; j++) { float *base = b0 + i*strides.x + j*strides.y; for (int k=0; k<4; k++) { float c = base[k*strides.z]; v += abc[n+0] * c; g0 += abc[n+1] * c; g1 += abc[n+2] * c; g2 += abc[n+3] * c; h00 += abc[n+4] * c; h01 += abc[n+5] * c; h02 += abc[n+6] * c; h11 += abc[n+7] * c; h12 += abc[n+8] * c; h22 += abc[n+9] * c; n += 10; } } } g0 *= drInv.x; g1 *= drInv.y; g2 *= drInv.z; h00 *= drInv.x * drInv.x; h01 *= drInv.x * drInv.y; h02 *= drInv.x * drInv.z; h11 *= drInv.y * drInv.y; h12 *= drInv.y * drInv.z; h22 *= drInv.z * drInv.z; // __shared__ float buff[6*SPLINE_BLOCK_SIZE]; // Note, we can reuse abc, by replacing buff with abc. myval[off] = v; abc[3*thr+0] = g0; abc[3*thr+1] = g1; abc[3*thr+2] = g2; __syncthreads(); for (int i=0; i<3; i++) mygrad[(3*block+i)*SPLINE_BLOCK_SIZE+thr] = abc[i*SPLINE_BLOCK_SIZE+thr]; __syncthreads(); // Write first half of Hessians abc[6*thr+0] = h00; abc[6*thr+1] = h01; abc[6*thr+2] = h02; abc[6*thr+3] = h11; abc[6*thr+4] = h12; abc[6*thr+5] = h22; __syncthreads(); for (int i=0; i<6; i++) myhess[(6*block+i)*SPLINE_BLOCK_SIZE+thr] = abc[i*SPLINE_BLOCK_SIZE+thr]; } extern "C" void eval_multi_multi_UBspline_3d_s_cuda (const multi_UBspline_3d_s_cuda *spline, float *pos_d, float *vals_d[], int num) { dim3 dimBlock(SPLINE_BLOCK_SIZE); dim3 dimGrid(spline->num_splines/SPLINE_BLOCK_SIZE, num); eval_multi_multi_UBspline_3d_s_kernel<<<dimGrid,dimBlock>>> (pos_d, spline->gridInv, spline->coefs, vals_d, spline->stride); } void test_multi_cuda2() { int numWalkers = 1000; float *vals[numWalkers], *grads[numWalkers], *hess[numWalkers]; float *coefs, __device__ **vals_d, **grads_d, **hess_d; float *r_d, *r_h; int xs, ys, zs, N; int Nx, Ny, Nz; N = 128; Nx = Ny = Nz = 32; xs = Ny*Nz*N; ys = Nz*N; zs = N; // Setup Bspline coefficients int size = Nx*Ny*Nz*N*sizeof(float); posix_memalign((void**)&coefs, 16, size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int n=0; n<N; n++) coefs[ix*xs + iy*ys + iz*zs + n] = drand48(); Ugrid x_grid, y_grid, z_grid; x_grid.start = 0.0; x_grid.end = 1.0; x_grid.num = Nx; y_grid.start = 0.0; y_grid.end = 1.0; y_grid.num = Ny; z_grid.start = 0.0; z_grid.end = 1.0; z_grid.num = Nz; BCtype_s xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; multi_UBspline_3d_s *spline = create_multi_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, N); for (int i=0; i<N; i++) set_multi_UBspline_3d_s (spline, i, coefs); multi_UBspline_3d_s_cuda *cudaspline = create_multi_UBspline_3d_s_cuda (spline); // Setup device value storage int numVals = N*numWalkers*10; float *valBlock_d, *valBlock_h; cudaMalloc((void**)&(valBlock_d), numVals*sizeof(float)); cudaMallocHost((void**)&(valBlock_h), numVals*sizeof(float)); cudaMalloc((void**)&(vals_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(grads_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(hess_d), numWalkers*sizeof(float*)); fprintf (stderr, "valBlock_d = %p\n", valBlock_d); for (int i=0; i<numWalkers; i++) { vals[i] = valBlock_d + i*N; grads[i] = valBlock_d + N*numWalkers + 3*i*N; hess[i] = valBlock_d + 4*N*numWalkers + 6*i*N; } cudaMemcpy(vals_d, vals, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(grads_d, grads, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(hess_d, hess, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); fprintf (stderr, "Finished cuda allocations.\n"); // Setup walker positions cudaMalloc((void**)&(r_d), 3*numWalkers*sizeof(float)); cudaMallocHost((void**)&(r_h), 3*numWalkers*sizeof(float)); for (int ir=0; ir<numWalkers; ir++) { r_h[3*ir+0] = 0.5*drand48(); r_h[3*ir+1] = 0.5*drand48(); r_h[3*ir+2] = 0.5*drand48(); } dim3 dimBlock(SPLINE_BLOCK_SIZE); dim3 dimGrid(N/SPLINE_BLOCK_SIZE,numWalkers); float vals_host[N], vals_cuda[N]; // Check value for (int w=0; w<numWalkers; w++) { eval_multi_UBspline_3d_s (spline, r_h[3*w+0], r_h[3*w+1], r_h[3*w+2], vals_host); cudaMemcpy(r_d, r_h, 3*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_s_kernel<<<dimGrid,dimBlock>>> (r_d, cudaspline->gridInv, cudaspline->coefs, vals_d, cudaspline->stride); cudaMemcpy(vals_cuda, valBlock_d+(N*w), N*sizeof(float), cudaMemcpyDeviceToHost); //for (int i=0; i<N; i++) fprintf (stderr, "%3i %15.8e %15.8e\n", w, vals_host[0], vals_cuda[0]); } clock_t start, end; start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 3*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_s_kernel<<<dimGrid,dimBlock>>> (r_d, cudaspline->gridInv, cudaspline->coefs, vals_d, cudaspline->stride); } end = clock(); double time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "Evals per second = %1.8e\n", 1.0/time); start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 3*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_s_vgh_kernel<<<dimGrid,dimBlock>>> (r_d, cudaspline->gridInv, cudaspline->coefs, vals_d, grads_d, hess_d, cudaspline->stride); } end = clock(); time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "VGH Evals per second = %1.8e\n", 1.0/time); cudaFree (spline->coefs); cudaFree (valBlock_d); cudaFree (vals_d); cudaFree (grads_d); cudaFree (hess_d); cudaFree (r_d); } static void * test_multi_cuda(void *thread) { cudaSetDevice((int)(size_t)thread); fprintf (stderr, "In thread %p\n", thread); int numWalkers = 1000; float *coefs , __device__ *vals[numWalkers], *grads[numWalkers], *hess[numWalkers]; float *coefs_d, __device__ **vals_d, **grads_d, **hess_d; float A_h[48] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0, 3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0, -3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0, 1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0, 0.0, -0.5, 1.0, -0.5, 0.0, 1.5, -2.0, 0.0, 0.0, -1.5, 1.0, 0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 3.0, -2.0, 0.0, 0.0, -3.0, 1.0, 0.0, 0.0, 1.0, 0.0 }; // Copy A to host cudaMemcpy(Acuda, A_h, 48*sizeof(float), cudaMemcpyHostToDevice); float *r_d, *r_h; int xs, ys, zs, N; int Nx, Ny, Nz; N = 128; Nx = Ny = Nz = 32; xs = Ny*Nz*N; ys = Nz*N; zs = N; float3 drInv; drInv.x = 1.0/float(Nx); drInv.y = 1.0/float(Ny); drInv.z = 1.0/float(Nz); // Setup Bspline coefficients int size = Nx*Ny*Nz*N*sizeof(float); posix_memalign((void**)&coefs, 16, size); for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) for (int iz=0; iz<Nz; iz++) for (int n=0; n<N; n++) coefs[ix*xs + iy*ys + iz*zs + n] = drand48(); fprintf (stderr, "Filled in coefs.\n"); fprintf (stderr, "size = %d\n", size); // Setup CUDA coefficients cudaMalloc((void**)&coefs_d, 2*size); cudaMemcpy(coefs_d, coefs, size, cudaMemcpyHostToDevice); // Setup device value storage int numVals = N*numWalkers*10; float *valBlock_d, *valBlock_h; cudaMalloc((void**)&(valBlock_d), numVals*sizeof(float)); cudaMallocHost((void**)&(valBlock_h), numVals*sizeof(float)); cudaMalloc((void**)&(vals_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(grads_d), numWalkers*sizeof(float*)); cudaMalloc((void**)&(hess_d), numWalkers*sizeof(float*)); fprintf (stderr, "valBlock_d = %p\n", valBlock_d); for (int i=0; i<numWalkers; i++) { vals[i] = valBlock_d + i*N; grads[i] = valBlock_d + N*numWalkers + 3*i*N; hess[i] = valBlock_d + 4*N*numWalkers + 6*i*N; } cudaMemcpy(vals_d, vals, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(grads_d, grads, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpy(hess_d, hess, numWalkers*sizeof(float*), cudaMemcpyHostToDevice); fprintf (stderr, "Finished cuda allocations.\n"); // Setup walker positions cudaMalloc((void**)&(r_d), 4*numWalkers*sizeof(float)); cudaMallocHost((void**)&(r_h), 4*numWalkers*sizeof(float)); for (int ir=0; ir<numWalkers; ir++) { r_h[4*ir+0] = 0.5*drand48(); r_h[4*ir+1] = 0.5*drand48(); r_h[4*ir+2] = 0.5*drand48(); } uint3 strides; strides.x = xs; strides.y = ys; strides.z = zs; dim3 dimBlock(SPLINE_BLOCK_SIZE); dim3 dimGrid(N/SPLINE_BLOCK_SIZE,numWalkers); clock_t start, end; start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 4*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_s_kernel<<<dimGrid,dimBlock>>> (r_d, drInv, coefs_d, vals_d, strides); } end = clock(); double time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "VGH evals per second = %1.8e\n", 1.0/time); start = clock(); for (int i=0; i<10000; i++) { if ((i%1000) == 0) fprintf (stderr, "i = %d\n", i); cudaMemcpy(r_d, r_h, 4*numWalkers*sizeof(float), cudaMemcpyHostToDevice); eval_multi_multi_UBspline_3d_s_vgh_kernel<<<dimGrid,dimBlock>>> (r_d, drInv, coefs_d, vals_d, grads_d, hess_d, strides); } end = clock(); time = (double)(end-start)/(double)((double)CLOCKS_PER_SEC*(double)10000*N*numWalkers); fprintf (stderr, "Evals per second = %1.8e\n", 1.0/time); // cudaFree (valBlock_d); // cudaFree (vals_d); // cudaFree (coefs_d); // cudaFree (r_d); return NULL; } #ifndef NO_CUDA_MAIN main() { int deviceCount; cudaGetDeviceCount(&deviceCount); fprintf (stderr, "Detected %d CUDA devices.\n", deviceCount); // test_cuda(); for (int device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); fprintf (stderr, "Device %d:\n", device); fprintf (stderr, " Global memory: %10d\n", deviceProp.totalGlobalMem); fprintf (stderr, " MultiProcessors: %10d\n", deviceProp.multiProcessorCount); fprintf (stderr, " Registers: %10d\n", deviceProp.regsPerBlock); fprintf (stderr, " Constant memory: %10d\n", deviceProp.totalConstMem); fprintf (stderr, " Shared memory: %10d\n", deviceProp.sharedMemPerBlock); fprintf (stderr, " Clock rate: %10d\n", deviceProp.clockRate); } // test_multi_cuda((void*)0); test_multi_cuda2(); fprintf (stderr, "After frees.\n"); } #endif
the_stack
#include <cuda.h> // for CUDA_VERSION #include "../common/interp.h" #define ipow(a,b) ( (magma_int_t)(std::pow( (float)(a), (float)(b) ) ) ) ////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int P, int Q> static magma_int_t interp_generic_kernel_driver( magma_int_t dim, magma_int_t ncomp, const T *dT, magma_trans_t transT, const T *dU, magma_int_t estrdU, magma_int_t cstrdU, T *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t nelem, magma_queue_t queue) { magma_device_t device; magma_getdevice( &device ); magma_int_t shmem_max, nthreads_max; magma_int_t pre = ipow(P, dim-1); //ncomp*CeedIntPow(P, dim-1); magma_int_t post = 1; // ncomp*Q*CeedIntPow(P>Q?P:Q,dim-1); // originally the exponent is (dim-1), but we use dim because // we have to read the original u in shared memory // the original implementation access u directly magma_int_t tmp_size = ipow(max(P,Q), dim); //ncomp * Q * ipow(max(P,Q), dim); magma_int_t shmem = P * Q * sizeof(T); shmem += 2 * tmp_size * sizeof(T); magma_int_t nthreads = max(P, ipow(Q, dim-1) ); nthreads = magma_roundup( nthreads, Q ); // nthreads must be multiple of Q cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device); #if CUDA_VERSION >= 9000 cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); if (shmem <= shmem_max) { cudaFuncSetAttribute(interp_generic_kernel<T, P, Q>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem); } #else cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device); #endif // CUDA_VERSION >= 9000 if ( nthreads > nthreads_max || shmem > shmem_max ) { return 1; } else { dim3 threads(nthreads, 1, 1); dim3 grid(nelem, ncomp, 1); interp_generic_kernel<T, P, Q><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>> ( dim, ncomp, pre, post, tmp_size, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV ); return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1; } } ////////////////////////////////////////////////////////////////////////////////////////// template<int P> static magma_int_t magma_interp_generic_q( magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar *dT, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (Q) { case 1: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 1> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 2: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 2> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 3: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 3> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 4: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 4> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 5: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 5> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 6: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 6> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 7: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 7> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 8: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 8> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 9: launch_failed = interp_generic_kernel_driver<CeedScalar, P, 9> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 10: launch_failed = interp_generic_kernel_driver<CeedScalar, P,10> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 11: launch_failed = interp_generic_kernel_driver<CeedScalar, P,11> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 12: launch_failed = interp_generic_kernel_driver<CeedScalar, P,12> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 13: launch_failed = interp_generic_kernel_driver<CeedScalar, P,13> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 14: launch_failed = interp_generic_kernel_driver<CeedScalar, P,14> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 15: launch_failed = interp_generic_kernel_driver<CeedScalar, P,15> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 16: launch_failed = interp_generic_kernel_driver<CeedScalar, P,16> (dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// magma_int_t static magma_interp_generic_q_p( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar *dT, magma_trans_t transT, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; switch (P) { case 1: launch_failed = magma_interp_generic_q< 1> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 2: launch_failed = magma_interp_generic_q< 2> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 3: launch_failed = magma_interp_generic_q< 3> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 4: launch_failed = magma_interp_generic_q< 4> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 5: launch_failed = magma_interp_generic_q< 5> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 6: launch_failed = magma_interp_generic_q< 6> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 7: launch_failed = magma_interp_generic_q< 7> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 8: launch_failed = magma_interp_generic_q< 8> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 9: launch_failed = magma_interp_generic_q< 9> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 10: launch_failed = magma_interp_generic_q<10> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 11: launch_failed = magma_interp_generic_q<11> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 12: launch_failed = magma_interp_generic_q<12> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 13: launch_failed = magma_interp_generic_q<13> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 14: launch_failed = magma_interp_generic_q<14> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 15: launch_failed = magma_interp_generic_q<15> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; case 16: launch_failed = magma_interp_generic_q<16> (Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); break; default: launch_failed = 1; } return launch_failed; } ////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_interp_generic( magma_int_t P, magma_int_t Q, magma_int_t dim, magma_int_t ncomp, const CeedScalar *dT, CeedTransposeMode tmode, const CeedScalar *dU, magma_int_t estrdU, magma_int_t cstrdU, CeedScalar *dV, magma_int_t estrdV, magma_int_t cstrdV, magma_int_t nelem, magma_queue_t queue) { magma_int_t launch_failed = 0; magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans; launch_failed = magma_interp_generic_q_p( P, Q, dim, ncomp, dT, transT, dU, estrdU, cstrdU, dV, estrdV, cstrdV, nelem, queue); return launch_failed; }
the_stack
* \file * AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep . */ #pragma once #include "../thread/thread_reduce.cuh" #include "../thread/thread_load.cuh" #include "../warp/warp_reduce.cuh" #include "../block/block_load.cuh" #include "../util_type.cuh" #include "../iterator/cache_modified_input_iterator.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for AgentRadixSortUpsweep */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins) struct AgentRadixSortUpsweepPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins) }; static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort upsweep . */ template < typename AgentRadixSortUpsweepPolicy, ///< Parameterized AgentRadixSortUpsweepPolicy tuning policy type typename KeyT, ///< KeyT type typename OffsetT> ///< Signed integer type for global offsets struct AgentRadixSortUpsweep { //--------------------------------------------------------------------- // Type definitions and constants //--------------------------------------------------------------------- typedef typename Traits<KeyT>::UnsignedBits UnsignedBits; // Integer type for digit counters (to be packed into words of PackedCounters) typedef unsigned char DigitCounter; // Integer type for packing DigitCounters into columns of shared memory banks typedef unsigned int PackedCounter; static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortUpsweepPolicy::LOAD_MODIFIER; enum { RADIX_BITS = AgentRadixSortUpsweepPolicy::RADIX_BITS, BLOCK_THREADS = AgentRadixSortUpsweepPolicy::BLOCK_THREADS, KEYS_PER_THREAD = AgentRadixSortUpsweepPolicy::ITEMS_PER_THREAD, RADIX_DIGITS = 1 << RADIX_BITS, LOG_WARP_THREADS = CUB_PTX_LOG_WARP_THREADS, WARP_THREADS = 1 << LOG_WARP_THREADS, WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, TILE_ITEMS = BLOCK_THREADS * KEYS_PER_THREAD, BYTES_PER_COUNTER = sizeof(DigitCounter), LOG_BYTES_PER_COUNTER = Log2<BYTES_PER_COUNTER>::VALUE, PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter), LOG_PACKING_RATIO = Log2<PACKING_RATIO>::VALUE, LOG_COUNTER_LANES = CUB_MAX(0, RADIX_BITS - LOG_PACKING_RATIO), COUNTER_LANES = 1 << LOG_COUNTER_LANES, // To prevent counter overflow, we must periodically unpack and aggregate the // digit counters back into registers. Each counter lane is assigned to a // warp for aggregation. LANES_PER_WARP = CUB_MAX(1, (COUNTER_LANES + WARPS - 1) / WARPS), // Unroll tiles in batches without risk of counter overflow UNROLL_COUNT = CUB_MIN(64, 255 / KEYS_PER_THREAD), UNROLLED_ELEMENTS = UNROLL_COUNT * TILE_ITEMS, }; // Input iterator wrapper type (for applying cache modifier)s typedef CacheModifiedInputIterator<LOAD_MODIFIER, UnsignedBits, OffsetT> KeysItr; /** * Shared memory storage layout */ union __align__(16) _TempStorage { DigitCounter thread_counters[COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO]; PackedCounter packed_thread_counters[COUNTER_LANES][BLOCK_THREADS]; OffsetT block_counters[WARP_THREADS][RADIX_DIGITS]; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Thread fields (aggregate state bundle) //--------------------------------------------------------------------- // Shared storage for this CTA _TempStorage &temp_storage; // Thread-local counters for periodically aggregating composite-counter lanes OffsetT local_counts[LANES_PER_WARP][PACKING_RATIO]; // Input and output device pointers KeysItr d_keys_in; // The least-significant bit position of the current digit to extract int current_bit; // Number of bits in current digit int num_bits; //--------------------------------------------------------------------- // Helper structure for templated iteration //--------------------------------------------------------------------- // Iterate template <int COUNT, int MAX> struct Iterate { // BucketKeys static __device__ __forceinline__ void BucketKeys( AgentRadixSortUpsweep &cta, UnsignedBits keys[KEYS_PER_THREAD]) { cta.Bucket(keys[COUNT]); // Next Iterate<COUNT + 1, MAX>::BucketKeys(cta, keys); } }; // Terminate template <int MAX> struct Iterate<MAX, MAX> { // BucketKeys static __device__ __forceinline__ void BucketKeys(AgentRadixSortUpsweep &/*cta*/, UnsignedBits /*keys*/[KEYS_PER_THREAD]) {} }; //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- /** * Decode a key and increment corresponding smem digit counter */ __device__ __forceinline__ void Bucket(UnsignedBits key) { // Perform transform op UnsignedBits converted_key = Traits<KeyT>::TwiddleIn(key); // Extract current digit bits UnsignedBits digit = BFE(converted_key, current_bit, num_bits); // Get sub-counter offset UnsignedBits sub_counter = digit & (PACKING_RATIO - 1); // Get row offset UnsignedBits row_offset = digit >> LOG_PACKING_RATIO; // Increment counter temp_storage.thread_counters[row_offset][threadIdx.x][sub_counter]++; } /** * Reset composite counters */ __device__ __forceinline__ void ResetDigitCounters() { #pragma unroll for (int LANE = 0; LANE < COUNTER_LANES; LANE++) { temp_storage.packed_thread_counters[LANE][threadIdx.x] = 0; } } /** * Reset the unpacked counters in each thread */ __device__ __forceinline__ void ResetUnpackedCounters() { #pragma unroll for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) { #pragma unroll for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) { local_counts[LANE][UNPACKED_COUNTER] = 0; } } } /** * Extracts and aggregates the digit counters for each counter lane * owned by this warp */ __device__ __forceinline__ void UnpackDigitCounts() { unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; unsigned int warp_tid = LaneId(); #pragma unroll for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) { const int counter_lane = (LANE * WARPS) + warp_id; if (counter_lane < COUNTER_LANES) { #pragma unroll for (int PACKED_COUNTER = 0; PACKED_COUNTER < BLOCK_THREADS; PACKED_COUNTER += WARP_THREADS) { #pragma unroll for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) { OffsetT counter = temp_storage.thread_counters[counter_lane][warp_tid + PACKED_COUNTER][UNPACKED_COUNTER]; local_counts[LANE][UNPACKED_COUNTER] += counter; } } } } } /** * Processes a single, full tile */ __device__ __forceinline__ void ProcessFullTile(OffsetT block_offset) { // Tile of keys UnsignedBits keys[KEYS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_in + block_offset, keys); // Prevent hoisting CTA_SYNC(); // Bucket tile of keys Iterate<0, KEYS_PER_THREAD>::BucketKeys(*this, keys); } /** * Processes a single load (may have some threads masked off) */ __device__ __forceinline__ void ProcessPartialTile( OffsetT block_offset, const OffsetT &block_end) { // Process partial tile if necessary using single loads block_offset += threadIdx.x; while (block_offset < block_end) { // Load and bucket key UnsignedBits key = d_keys_in[block_offset]; Bucket(key); block_offset += BLOCK_THREADS; } } //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ AgentRadixSortUpsweep( TempStorage &temp_storage, const KeyT *d_keys_in, int current_bit, int num_bits) : temp_storage(temp_storage.Alias()), d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)), current_bit(current_bit), num_bits(num_bits) {} /** * Compute radix digit histograms from a segment of input tiles. */ __device__ __forceinline__ void ProcessRegion( OffsetT block_offset, const OffsetT &block_end) { // Reset digit counters in smem and unpacked counters in registers ResetDigitCounters(); ResetUnpackedCounters(); // Unroll batches of full tiles while (block_offset + UNROLLED_ELEMENTS <= block_end) { for (int i = 0; i < UNROLL_COUNT; ++i) { ProcessFullTile(block_offset); block_offset += TILE_ITEMS; } CTA_SYNC(); // Aggregate back into local_count registers to prevent overflow UnpackDigitCounts(); CTA_SYNC(); // Reset composite counters in lanes ResetDigitCounters(); } // Unroll single full tiles while (block_offset + TILE_ITEMS <= block_end) { ProcessFullTile(block_offset); block_offset += TILE_ITEMS; } // Process partial tile if necessary ProcessPartialTile( block_offset, block_end); CTA_SYNC(); // Aggregate back into local_count registers UnpackDigitCounts(); } /** * Extract counts (saving them to the external array) */ template <bool IS_DESCENDING> __device__ __forceinline__ void ExtractCounts( OffsetT *counters, int bin_stride = 1, int bin_offset = 0) { unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; unsigned int warp_tid = LaneId(); // Place unpacked digit counters in shared memory #pragma unroll for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) { int counter_lane = (LANE * WARPS) + warp_id; if (counter_lane < COUNTER_LANES) { int digit_row = counter_lane << LOG_PACKING_RATIO; #pragma unroll for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) { int bin_idx = digit_row + UNPACKED_COUNTER; temp_storage.block_counters[warp_tid][bin_idx] = local_counts[LANE][UNPACKED_COUNTER]; } } } CTA_SYNC(); // Rake-reduce bin_count reductions // Whole blocks #pragma unroll for (int BIN_BASE = RADIX_DIGITS % BLOCK_THREADS; (BIN_BASE + BLOCK_THREADS) <= RADIX_DIGITS; BIN_BASE += BLOCK_THREADS) { int bin_idx = BIN_BASE + threadIdx.x; OffsetT bin_count = 0; #pragma unroll for (int i = 0; i < WARP_THREADS; ++i) bin_count += temp_storage.block_counters[i][bin_idx]; if (IS_DESCENDING) bin_idx = RADIX_DIGITS - bin_idx - 1; counters[(bin_stride * bin_idx) + bin_offset] = bin_count; } // Remainder if ((RADIX_DIGITS % BLOCK_THREADS != 0) && (threadIdx.x < RADIX_DIGITS)) { int bin_idx = threadIdx.x; OffsetT bin_count = 0; #pragma unroll for (int i = 0; i < WARP_THREADS; ++i) bin_count += temp_storage.block_counters[i][bin_idx]; if (IS_DESCENDING) bin_idx = RADIX_DIGITS - bin_idx - 1; counters[(bin_stride * bin_idx) + bin_offset] = bin_count; } } /** * Extract counts */ template <int BINS_TRACKED_PER_THREAD> __device__ __forceinline__ void ExtractCounts( OffsetT (&bin_count)[BINS_TRACKED_PER_THREAD]) ///< [out] The exclusive prefix sum for the digits [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1] { unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; unsigned int warp_tid = LaneId(); // Place unpacked digit counters in shared memory #pragma unroll for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) { int counter_lane = (LANE * WARPS) + warp_id; if (counter_lane < COUNTER_LANES) { int digit_row = counter_lane << LOG_PACKING_RATIO; #pragma unroll for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) { int bin_idx = digit_row + UNPACKED_COUNTER; temp_storage.block_counters[warp_tid][bin_idx] = local_counts[LANE][UNPACKED_COUNTER]; } } } CTA_SYNC(); // Rake-reduce bin_count reductions #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { bin_count[track] = 0; #pragma unroll for (int i = 0; i < WARP_THREADS; ++i) bin_count[track] += temp_storage.block_counters[i][bin_idx]; } } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include "pcl/gpu/utils/safe_call.hpp" #include "pcl/gpu/utils/device/warp.hpp" #include "pcl/gpu/utils/device/functional.hpp" #include "pcl/gpu/utils/device/funcattrib.hpp" #include "pcl/gpu/utils/timers_cuda.hpp" #include "pcl/gpu/features/device/pair_features.hpp" #include <iostream> using namespace pcl::gpu; using namespace pcl::device; using namespace std; #ifndef M_PI #define M_PI 3.14159265358979323846f #endif namespace pcl { namespace device { struct KernelBase { enum { CTA_SIZE = 256, WAPRS = CTA_SIZE/Warp::WARP_SIZE, bins1 = 11, bins2 = 11, bins3 = 11, FSize = bins1 + bins2 + bins3 }; const PointType* point_cloud; size_t work_size; PtrStep<int> gindices; const int* sizes; template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) const { //return tr(ptr[index]); return *(float3*)&ptr[index]; } }; struct Spfh : public KernelBase { const NormalType* normals; mutable PtrStep<float> output; const int *indices; __device__ __forceinline__ void operator()() const { __shared__ float shists[WAPRS * FSize]; __shared__ float3 current_point[WAPRS]; __shared__ float3 current_nomal[WAPRS]; int lane = Warp::laneId(); int warp_idx = Warp::id(); int idx = WAPRS * blockIdx.x + warp_idx; if (idx >= work_size) return; int point_idx = indices ? indices[idx] : idx; float* shist = shists + warp_idx * FSize; float* shist_b1 = shist; float* shist_b2 = shist_b1 + bins1; float* shist_b3 = shist_b2 + bins2; Warp::fill(shist, shist + FSize, 0.f); if (lane == 0) { current_point[warp_idx] = fetch(point_cloud, point_idx); current_nomal[warp_idx] = fetch(normals, point_idx); } const int *ginds = gindices.ptr(idx); int size = sizes[idx]; float hist_incr = 100.f / (float)(size - 1); // or 100/(size - 1) ??? //now [inds, inds + size) contains indices of neighb points for idx-th point in cloud //this list also contains idx itseelf. for(int i = lane; i < size; i += Warp::STRIDE) { int point_index = ginds[i]; if (point_index != point_idx) // skip itself { float3 p = fetch(point_cloud, point_index); float3 n = fetch(normals, point_index); int h_index; float f1, f2, f3, f4; if (computePairFeatures (current_point[warp_idx], current_nomal[warp_idx], p, n, f1, f2, f3, f4)) { // Normalize the f1, f2, f3 features and push them in the histogram h_index = floorf (bins1 * ((f1 + M_PI) * (1.0f / (2.0f * M_PI)))); h_index = min(bins1 - 1, max(0, h_index)); atomicAdd(shist_b1 + h_index, hist_incr); h_index = floorf (bins2 * ((f2 + 1.0f) * 0.5f)); h_index = min(bins2 - 1, max (0, h_index)); atomicAdd(shist_b2 + h_index, hist_incr); h_index = floorf (bins3 * ((f3 + 1.0f) * 0.5f)); h_index = min(bins3 - 1, max (0, h_index)); atomicAdd(shist_b3 + h_index, hist_incr); } } } float *out = output.ptr(idx); Warp::copy(shist, shist + FSize, out); } }; __global__ void SpfhKernel(const Spfh spfh33) { spfh33(); } } } void pcl::device::computeSPFH(const PointCloud& surface, const Normals& normals, const Indices& indices, const NeighborIndices& neighbours, DeviceArray2D<FPFHSignature33>& spfh33) { spfh33.create(indices.empty() ? (int)surface.size() : (int)indices.size(), 1); std::vector<int> inds; indices.download(inds); Spfh spfh; spfh.point_cloud = surface; spfh.normals = normals; spfh.indices = indices; spfh.work_size = spfh33.rows(); spfh.sizes = neighbours.sizes; spfh.gindices = neighbours; spfh.output = spfh33; int block = KernelBase::CTA_SIZE; int grid = divUp((int)spfh.work_size, KernelBase::WAPRS); SpfhKernel<<<grid, block>>>(spfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall(cudaDeviceSynchronize()); } namespace pcl { namespace device { struct Fpfh : public KernelBase { const PtrStep<float> spfh; const PointType* surface; const int* indices; const int* lookup; mutable PtrStep<float> fpfh; Fpfh(PtrStep<float> spfh_arg) : spfh(spfh_arg) {} __device__ __forceinline__ void operator()() const { int lane = Warp::laneId(); int warp_idx = Warp::id(); int idx = WAPRS * blockIdx.x + warp_idx; // "index in neighbours" == "index in output" == "index in indices". if (idx >= work_size) return; __shared__ float3 current_point[WAPRS]; __shared__ float features[WAPRS * FSize]; __shared__ int sindices[CTA_SIZE]; int point_idx = indices ? indices[idx] : idx; //index in cloud if (lane == 0) current_point[warp_idx] = fetch(point_cloud, point_idx); volatile float *feature_beg = features + FSize * warp_idx ; volatile float *feature_end = feature_beg + FSize; Warp::fill(feature_beg, feature_end, 0.f); const int *ginds = gindices.ptr(idx); int *sinds = sindices + Warp::WARP_SIZE * warp_idx; int size = sizes[idx]; for(int i = lane; __any(i < size); i += Warp::STRIDE) { if (i < size) sinds[lane] = ginds[i]; int inds_num = __popc(__ballot(i < size)); for(int j = 0; j < inds_num; ++j) { int point_index = sinds[j]; // index in surface if (surface == point_cloud) { if(point_index != point_idx) //surface == cloud -> point_index and idx are indeces both for the same array. { float3 p = fetch(point_cloud, point_index); //float dist = norm(p, current_point[warp_idx]); float dx = p.x - current_point[warp_idx].x; float dy = p.y - current_point[warp_idx].y; float dz = p.z - current_point[warp_idx].z; float dist = dx * dx + dy * dy + dz * dz; float weight = 1.f / dist; const float *spfh_ptr = spfh.ptr( lookup ? lookup[point_index] : point_index ); Warp::transform(feature_beg, feature_end, spfh_ptr, feature_beg, plusWeighted<volatile float, float>(weight)); } } else { float3 p = fetch(surface, point_index); float dx = p.x - current_point[warp_idx].x; float dy = p.y - current_point[warp_idx].y; float dz = p.z - current_point[warp_idx].z; float dist = dx * dx + dy * dy + dz * dz; if (dist == 0) continue; float weight = 1.f / dist; const float *spfh_ptr = spfh.ptr( lookup[point_index] ); Warp::transform(feature_beg, feature_end, spfh_ptr, feature_beg, plusWeighted<volatile float, float>(weight)); } } } float *buffer = (float*)&sindices[threadIdx.x - lane]; normalizeFeature<bins1>(feature_beg, buffer, lane); normalizeFeature<bins2>(feature_beg + bins1, buffer, lane); normalizeFeature<bins3>(feature_beg + bins1 + bins2, buffer, lane); Warp::copy(feature_beg, feature_end, fpfh.ptr(idx)); } template<int bins> __device__ __forceinline__ void normalizeFeature(volatile float *feature, volatile float *buffer, int lane) const { //nomalize buns float value = (lane < bins) ? feature[lane] : 0.f; float sum = Warp::reduce(buffer, value, plus<volatile float>()); if (sum != 0) sum = 100.0 / sum; if (lane < bins) feature[lane] *= sum; } }; __global__ void FpfhKernel(const Fpfh fpfh33) { fpfh33(); } } } void pcl::device::computeFPFH(const PointCloud& cloud, const NeighborIndices& neighbours, const DeviceArray2D<FPFHSignature33>& spfh, DeviceArray2D<FPFHSignature33>& features) { Fpfh fpfh(spfh); fpfh.point_cloud = cloud; fpfh.surface = cloud; fpfh.work_size = neighbours.sizes.size(); fpfh.lookup = 0; fpfh.indices = 0; fpfh.sizes = neighbours.sizes; fpfh.gindices = neighbours; fpfh.fpfh = features; int block = KernelBase::CTA_SIZE; int grid = divUp((int)fpfh.work_size, KernelBase::WAPRS); FpfhKernel<<<grid, block>>>(fpfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall(cudaDeviceSynchronize()); } void pcl::device::computeFPFH(const PointCloud& cloud, const Indices& indices, const PointCloud& surface, const NeighborIndices& neighbours, DeviceArray<int>& lookup, const DeviceArray2D<FPFHSignature33>& spfh, DeviceArray2D<FPFHSignature33>& features) { Fpfh fpfh(spfh); fpfh.point_cloud = cloud; fpfh.surface = surface; fpfh.work_size = neighbours.sizes.size(); fpfh.indices = indices; fpfh.lookup = lookup; fpfh.sizes = neighbours.sizes; fpfh.gindices = neighbours; fpfh.fpfh = features; int block = KernelBase::CTA_SIZE; int grid = divUp((int)fpfh.work_size, KernelBase::WAPRS); FpfhKernel<<<grid, block>>>(fpfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall(cudaDeviceSynchronize()); }
the_stack
#include "cupoch/geometry/trianglemesh.h" #include "cupoch/utility/console.h" #include "cupoch/utility/platform.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/shader/texture_simple_shader.h" #include "cupoch/visualization/utility/color_map.h" using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry) .texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f *vertices, const int *triangles, const Eigen::Vector2f *triangle_uvs) : vertices_(vertices), triangles_(triangles), triangle_uvs_(triangle_uvs){}; const Eigen::Vector3f *vertices_; const int *triangles_; const Eigen::Vector2f *triangle_uvs_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector2f> operator()( size_t k) const { int vi = triangles_[k]; return thrust::make_tuple(vertices_[vi], triangle_uvs_[k]); } }; } // namespace bool TextureSimpleShader::Compile() { if (CompileShaders(texture_simple_vertex_shader, NULL, texture_simple_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); texture_ = glGetUniformLocation(program_, "diffuse_texture"); MVP_ = glGetUniformLocation(program_, "MVP"); return true; } void TextureSimpleShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TextureSimpleShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &texture_buffer_); glBindTexture(GL_TEXTURE_2D, texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_uv_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], texture_pixel_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f *raw_points_ptr; Eigen::Vector2f *raw_uvs_ptr; uint8_t *raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(3, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer( (void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[2])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(3); bound_ = true; return true; } bool TextureSimpleShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_uv_); return true; } void TextureSimpleShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsUnregisterResource( cuda_graphics_resources_[2])); } glDeleteTextures(1, &texture_pixel_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &texture_buffer_); bound_ = false; } } bool TextureSimpleShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool TextureSimpleShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } copy_trianglemesh_functor func( thrust::raw_pointer_cast(mesh.vertices_.data()), (int *)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_uvs_.data())); thrust::transform( thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TextureSimpleShaderForTriangleMesh::GetDataSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TextureSimpleShaderForTriangleMesh::GetTextureSize( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TextureSimpleShaderForTriangleMesh::GetTextureHeight( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TextureSimpleShaderForTriangleMesh::GetTextureWidth( const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
the_stack
///////////////////////////////////////////////////////////////////// Domain::Domain(Int_t numRanks, Index_t colLoc, Index_t rowLoc, Index_t planeLoc, Index_t nx, int tp, int nr, int balance, Int_t cost) : m_e_cut(Real_t(1.0e-7)), m_p_cut(Real_t(1.0e-7)), m_q_cut(Real_t(1.0e-7)), m_v_cut(Real_t(1.0e-10)), m_u_cut(Real_t(1.0e-7)), m_hgcoef(Real_t(3.0)), m_ss4o3(Real_t(4.0)/Real_t(3.0)), m_qstop(Real_t(1.0e+12)), m_monoq_max_slope(Real_t(1.0)), m_monoq_limiter_mult(Real_t(2.0)), m_qlc_monoq(Real_t(0.5)), m_qqc_monoq(Real_t(2.0)/Real_t(3.0)), m_qqc(Real_t(2.0)), m_eosvmax(Real_t(1.0e+9)), m_eosvmin(Real_t(1.0e-9)), m_pmin(Real_t(0.)), m_emin(Real_t(-1.0e+15)), m_dvovmax(Real_t(0.1)), m_refdens(Real_t(1.0)) { Index_t edgeElems = nx ; Index_t edgeNodes = edgeElems+1 ; this->cost() = cost; m_tp = tp ; m_numRanks = numRanks ; /////////////////////////////// // Initialize Sedov Mesh /////////////////////////////// // construct a uniform box for this processor m_colLoc = colLoc ; m_rowLoc = rowLoc ; m_planeLoc = planeLoc ; m_sizeX = edgeElems ; m_sizeY = edgeElems ; m_sizeZ = edgeElems ; m_numElem = edgeElems*edgeElems*edgeElems ; m_numNode = edgeNodes*edgeNodes*edgeNodes ; m_regNumList = new Index_t[numElem()] ; // material indexset // Elem-centered AllocateElemPersistent(numElem()) ; // Node-centered AllocateNodePersistent(numNode()) ; SetupCommBuffers(edgeNodes); // Basic Field Initialization for (Index_t i=0; i<numElem(); ++i) { e(i) = Real_t(0.0) ; p(i) = Real_t(0.0) ; q(i) = Real_t(0.0) ; ss(i) = Real_t(0.0) ; } // Note - v initializes to 1.0, not 0.0! for (Index_t i=0; i<numElem(); ++i) { v(i) = Real_t(1.0) ; } for (Index_t i=0; i<numNode(); ++i) { xd(i) = Real_t(0.0) ; yd(i) = Real_t(0.0) ; zd(i) = Real_t(0.0) ; } for (Index_t i=0; i<numNode(); ++i) { xdd(i) = Real_t(0.0) ; ydd(i) = Real_t(0.0) ; zdd(i) = Real_t(0.0) ; } for (Index_t i=0; i<numNode(); ++i) { nodalMass(i) = Real_t(0.0) ; } BuildMesh(nx, edgeNodes, edgeElems); SetupThreadSupportStructures(); // Setup region index sets. For now, these are constant sized // throughout the run, but could be changed every cycle to // simulate effects of ALE on the lagrange solver CreateRegionIndexSets(nr, balance); // Setup symmetry nodesets SetupSymmetryPlanes(edgeNodes); // Setup element connectivities SetupElementConnectivities(edgeElems); // Setup symmetry planes and free surface boundary arrays SetupBoundaryConditions(edgeElems); // Setup defaults // These can be changed (requires recompile) if you want to run // with a fixed timestep, or to a different end time, but it's // probably easier/better to just run a fixed number of timesteps // using the -i flag in 2.x dtfixed() = Real_t(-1.0e-6) ; // Negative means use courant condition stoptime() = Real_t(1.0e-2); // *Real_t(edgeElems*tp/45.0) ; // Initial conditions deltatimemultlb() = Real_t(1.1) ; deltatimemultub() = Real_t(1.2) ; dtcourant() = Real_t(1.0e+20) ; dthydro() = Real_t(1.0e+20) ; dtmax() = Real_t(1.0e-2) ; time() = Real_t(0.) ; cycle() = Int_t(0) ; // initialize field data for (Index_t i=0; i<numElem(); ++i) { Real_t x_local[8], y_local[8], z_local[8] ; Index_t *elemToNode = nodelist(i) ; for( Index_t lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemToNode[lnode]; x_local[lnode] = x(gnode); y_local[lnode] = y(gnode); z_local[lnode] = z(gnode); } // volume calculations Real_t volume = CalcElemVolume(x_local, y_local, z_local ); volo(i) = volume ; elemMass(i) = volume ; for (Index_t j=0; j<8; ++j) { Index_t idx = elemToNode[j] ; nodalMass(idx) += volume / Real_t(8.0) ; } } // deposit initial energy // An energy of 3.948746e+7 is correct for a problem with // 45 zones along a side - we need to scale it const Real_t ebase = Real_t(3.948746e+7); Real_t scale = (nx*m_tp)/Real_t(45.0); Real_t einit = ebase*scale*scale*scale; if (m_rowLoc + m_colLoc + m_planeLoc == 0) { // Dump into the first zone (which we know is in the corner) // of the domain that sits at the origin e(0) = einit; } //set initial deltatime base on analytic CFL calculation deltatime() = (Real_t(.5)*cbrt(volo(0)))/sqrt(Real_t(2.0)*einit); } // End constructor //////////////////////////////////////////////////////////////////////////////// void Domain::BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems) { Index_t meshEdgeElems = m_tp*nx ; // initialize nodal coordinates Index_t nidx = 0 ; Real_t tz = Real_t(1.125)*Real_t(m_planeLoc*nx)/Real_t(meshEdgeElems) ; for (Index_t plane=0; plane<edgeNodes; ++plane) { Real_t ty = Real_t(1.125)*Real_t(m_rowLoc*nx)/Real_t(meshEdgeElems) ; for (Index_t row=0; row<edgeNodes; ++row) { Real_t tx = Real_t(1.125)*Real_t(m_colLoc*nx)/Real_t(meshEdgeElems) ; for (Index_t col=0; col<edgeNodes; ++col) { x(nidx) = tx ; y(nidx) = ty ; z(nidx) = tz ; ++nidx ; // tx += ds ; // may accumulate roundoff... tx = Real_t(1.125)*Real_t(m_colLoc*nx+col+1)/Real_t(meshEdgeElems) ; } // ty += ds ; // may accumulate roundoff... ty = Real_t(1.125)*Real_t(m_rowLoc*nx+row+1)/Real_t(meshEdgeElems) ; } // tz += ds ; // may accumulate roundoff... tz = Real_t(1.125)*Real_t(m_planeLoc*nx+plane+1)/Real_t(meshEdgeElems) ; } // embed hexehedral elements in nodal point lattice Index_t zidx = 0 ; nidx = 0 ; for (Index_t plane=0; plane<edgeElems; ++plane) { for (Index_t row=0; row<edgeElems; ++row) { for (Index_t col=0; col<edgeElems; ++col) { Index_t *localNode = nodelist(zidx) ; localNode[0] = nidx ; localNode[1] = nidx + 1 ; localNode[2] = nidx + edgeNodes + 1 ; localNode[3] = nidx + edgeNodes ; localNode[4] = nidx + edgeNodes*edgeNodes ; localNode[5] = nidx + edgeNodes*edgeNodes + 1 ; localNode[6] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ; localNode[7] = nidx + edgeNodes*edgeNodes + edgeNodes ; ++zidx ; ++nidx ; } ++nidx ; } nidx += edgeNodes ; } } //////////////////////////////////////////////////////////////////////////////// void Domain::SetupThreadSupportStructures() { Index_t numthreads = NT; if (numthreads > 1) { // set up node-centered indexing of elements Index_t *nodeElemCount = new Index_t[numNode()] ; for (Index_t i=0; i<numNode(); ++i) { nodeElemCount[i] = 0 ; } for (Index_t i=0; i<numElem(); ++i) { Index_t *nl = nodelist(i) ; for (Index_t j=0; j < 8; ++j) { ++(nodeElemCount[nl[j]] ); } } m_nodeElemStart = new Index_t[numNode()+1] ; m_nodeElemStart[0] = 0; for (Index_t i=1; i <= numNode(); ++i) { m_nodeElemStart[i] = m_nodeElemStart[i-1] + nodeElemCount[i-1] ; } m_nodeElemCornerList = new Index_t[m_nodeElemStart[numNode()]]; for (Index_t i=0; i < numNode(); ++i) { nodeElemCount[i] = 0; } for (Index_t i=0; i < numElem(); ++i) { Index_t *nl = nodelist(i) ; for (Index_t j=0; j < 8; ++j) { Index_t m = nl[j]; Index_t k = i*8 + j ; Index_t offset = m_nodeElemStart[m] + nodeElemCount[m] ; m_nodeElemCornerList[offset] = k; ++(nodeElemCount[m]) ; } } Index_t clSize = m_nodeElemStart[numNode()] ; for (Index_t i=0; i < clSize; ++i) { Index_t clv = m_nodeElemCornerList[i] ; if ((clv < 0) || (clv > numElem()*8)) { fprintf(stderr, "AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n"); exit(-1); } } delete [] nodeElemCount ; } else { // These arrays are not used if we're not threaded m_nodeElemStart = NULL; m_nodeElemCornerList = NULL; } } //////////////////////////////////////////////////////////////////////////////// void Domain::SetupCommBuffers(Int_t edgeNodes) { // allocate a buffer large enough for nodal ghost data Index_t maxEdgeSize = MAX(this->sizeX(), MAX(this->sizeY(), this->sizeZ()))+1 ; m_maxPlaneSize = CACHE_ALIGN_REAL(maxEdgeSize*maxEdgeSize) ; m_maxEdgeSize = CACHE_ALIGN_REAL(maxEdgeSize) ; // assume communication to 6 neighbors by default m_rowMin = (m_rowLoc == 0) ? 0 : 1; m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1; m_colMin = (m_colLoc == 0) ? 0 : 1; m_colMax = (m_colLoc == m_tp-1) ? 0 : 1; m_planeMin = (m_planeLoc == 0) ? 0 : 1; m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1; // Boundary nodesets if (m_colLoc == 0) m_symmX.resize(edgeNodes*edgeNodes); if (m_rowLoc == 0) m_symmY.resize(edgeNodes*edgeNodes); if (m_planeLoc == 0) m_symmZ.resize(edgeNodes*edgeNodes); } //////////////////////////////////////////////////////////////////////////////// void Domain::CreateRegionIndexSets(Int_t nr, Int_t balance) { srand(0); Index_t myRank = 0; this->numReg() = nr; m_regElemSize = new Index_t[numReg()]; m_regElemlist = new Index_t*[numReg()]; Index_t nextIndex = 0; //if we only have one region just fill it // Fill out the regNumList with material numbers, which are always // the region index plus one if(numReg() == 1) { while (nextIndex < numElem()) { this->regNumList(nextIndex) = 1; nextIndex++; } regElemSize(0) = 0; } //If we have more than one region distribute the elements. else { Int_t regionNum; Int_t regionVar; Int_t lastReg = -1; Int_t binSize; Index_t elements; Index_t runto = 0; Int_t costDenominator = 0; Int_t* regBinEnd = new Int_t[numReg()]; //Determine the relative weights of all the regions. This is based off the -b flag. Balance is the value passed into b. for (Index_t i=0 ; i<numReg() ; ++i) { regElemSize(i) = 0; costDenominator += pow((i+1), balance); //Total sum of all regions weights regBinEnd[i] = costDenominator; //Chance of hitting a given region is (regBinEnd[i] - regBinEdn[i-1])/costDenominator } //Until all elements are assigned while (nextIndex < numElem()) { //pick the region regionVar = rand() % costDenominator; Index_t i = 0; while(regionVar >= regBinEnd[i]) i++; //rotate the regions based on MPI rank. Rotation is Rank % NumRegions this makes each domain have a different region with //the highest representation regionNum = ((i + myRank) % numReg()) + 1; // make sure we don't pick the same region twice in a row while(regionNum == lastReg) { regionVar = rand() % costDenominator; i = 0; while(regionVar >= regBinEnd[i]) i++; regionNum = ((i + myRank) % numReg()) + 1; } //Pick the bin size of the region and determine the number of elements. binSize = rand() % 1000; if(binSize < 773) { elements = rand() % 15 + 1; } else if(binSize < 937) { elements = rand() % 16 + 16; } else if(binSize < 970) { elements = rand() % 32 + 32; } else if(binSize < 974) { elements = rand() % 64 + 64; } else if(binSize < 978) { elements = rand() % 128 + 128; } else if(binSize < 981) { elements = rand() % 256 + 256; } else elements = rand() % 1537 + 512; runto = elements + nextIndex; //Store the elements. If we hit the end before we run out of elements then just stop. while (nextIndex < runto && nextIndex < numElem()) { this->regNumList(nextIndex) = regionNum; nextIndex++; } lastReg = regionNum; } } // Convert regNumList to region index sets // First, count size of each region for (Index_t i=0 ; i<numElem() ; ++i) { int r = this->regNumList(i)-1; // region index == regnum-1 regElemSize(r)++; } // Second, allocate each region index set for (Index_t i=0 ; i<numReg() ; ++i) { m_regElemlist[i] = new Index_t[regElemSize(i)]; regElemSize(i) = 0; } // Third, fill index sets for (Index_t i=0 ; i<numElem() ; ++i) { Index_t r = regNumList(i)-1; // region index == regnum-1 Index_t regndx = regElemSize(r)++; // Note increment regElemlist(r,regndx) = i; } } ///////////////////////////////////////////////////////////// void Domain::SetupSymmetryPlanes(Int_t edgeNodes) { Index_t nidx = 0 ; for (Index_t i=0; i<edgeNodes; ++i) { Index_t planeInc = i*edgeNodes*edgeNodes ; Index_t rowInc = i*edgeNodes ; for (Index_t j=0; j<edgeNodes; ++j) { if (m_planeLoc == 0) { m_symmZ[nidx] = rowInc + j ; } if (m_rowLoc == 0) { m_symmY[nidx] = planeInc + j ; } if (m_colLoc == 0) { m_symmX[nidx] = planeInc + j*edgeNodes ; } ++nidx ; } } } ///////////////////////////////////////////////////////////// void Domain::SetupElementConnectivities(Int_t edgeElems) { lxim(0) = 0 ; for (Index_t i=1; i<numElem(); ++i) { lxim(i) = i-1 ; lxip(i-1) = i ; } lxip(numElem()-1) = numElem()-1 ; for (Index_t i=0; i<edgeElems; ++i) { letam(i) = i ; letap(numElem()-edgeElems+i) = numElem()-edgeElems+i ; } for (Index_t i=edgeElems; i<numElem(); ++i) { letam(i) = i-edgeElems ; letap(i-edgeElems) = i ; } for (Index_t i=0; i<edgeElems*edgeElems; ++i) { lzetam(i) = i ; lzetap(numElem()-edgeElems*edgeElems+i) = numElem()-edgeElems*edgeElems+i ; } for (Index_t i=edgeElems*edgeElems; i<numElem(); ++i) { lzetam(i) = i - edgeElems*edgeElems ; lzetap(i-edgeElems*edgeElems) = i ; } } ///////////////////////////////////////////////////////////// void Domain::SetupBoundaryConditions(Int_t edgeElems) { Index_t ghostIdx[6] ; // offsets to ghost locations // set up boundary condition information for (Index_t i=0; i<numElem(); ++i) { elemBC(i) = Int_t(0) ; } for (Index_t i=0; i<6; ++i) { ghostIdx[i] = INT_MIN ; } Int_t pidx = numElem() ; if (m_planeMin != 0) { ghostIdx[0] = pidx ; pidx += sizeX()*sizeY() ; } if (m_planeMax != 0) { ghostIdx[1] = pidx ; pidx += sizeX()*sizeY() ; } if (m_rowMin != 0) { ghostIdx[2] = pidx ; pidx += sizeX()*sizeZ() ; } if (m_rowMax != 0) { ghostIdx[3] = pidx ; pidx += sizeX()*sizeZ() ; } if (m_colMin != 0) { ghostIdx[4] = pidx ; pidx += sizeY()*sizeZ() ; } if (m_colMax != 0) { ghostIdx[5] = pidx ; } // symmetry plane or free surface BCs for (Index_t i=0; i<edgeElems; ++i) { Index_t planeInc = i*edgeElems*edgeElems ; Index_t rowInc = i*edgeElems ; for (Index_t j=0; j<edgeElems; ++j) { if (m_planeLoc == 0) { elemBC(rowInc+j) |= ZETA_M_SYMM ; } else { elemBC(rowInc+j) |= ZETA_M_COMM ; lzetam(rowInc+j) = ghostIdx[0] + rowInc + j ; } if (m_planeLoc == m_tp-1) { elemBC(rowInc+j+numElem()-edgeElems*edgeElems) |= ZETA_P_FREE; } else { elemBC(rowInc+j+numElem()-edgeElems*edgeElems) |= ZETA_P_COMM ; lzetap(rowInc+j+numElem()-edgeElems*edgeElems) = ghostIdx[1] + rowInc + j ; } if (m_rowLoc == 0) { elemBC(planeInc+j) |= ETA_M_SYMM ; } else { elemBC(planeInc+j) |= ETA_M_COMM ; letam(planeInc+j) = ghostIdx[2] + rowInc + j ; } if (m_rowLoc == m_tp-1) { elemBC(planeInc+j+edgeElems*edgeElems-edgeElems) |= ETA_P_FREE ; } else { elemBC(planeInc+j+edgeElems*edgeElems-edgeElems) |= ETA_P_COMM ; letap(planeInc+j+edgeElems*edgeElems-edgeElems) = ghostIdx[3] + rowInc + j ; } if (m_colLoc == 0) { elemBC(planeInc+j*edgeElems) |= XI_M_SYMM ; } else { elemBC(planeInc+j*edgeElems) |= XI_M_COMM ; lxim(planeInc+j*edgeElems) = ghostIdx[4] + rowInc + j ; } if (m_colLoc == m_tp-1) { elemBC(planeInc+j*edgeElems+edgeElems-1) |= XI_P_FREE ; } else { elemBC(planeInc+j*edgeElems+edgeElems-1) |= XI_P_COMM ; lxip(planeInc+j*edgeElems+edgeElems-1) = ghostIdx[5] + rowInc + j ; } } } } /////////////////////////////////////////////////////////////////////////// void InitMeshDecomp(Int_t numRanks, Int_t myRank, Int_t *col, Int_t *row, Int_t *plane, Int_t *side) { Int_t testProcs; Int_t dx, dy, dz; Int_t myDom; // Assume cube processor layout for now testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ; if (testProcs*testProcs*testProcs != numRanks) { printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ; exit(-1); } if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) { printf("Operations only support float and double right now...\n"); exit(-1); } dx = testProcs ; dy = testProcs ; dz = testProcs ; // temporary test if (dx*dy*dz != numRanks) { printf("error -- must have as many domains as procs\n") ; exit(-1); } Int_t remainder = dx*dy*dz % numRanks ; if (myRank < remainder) { myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ; } else { myDom = remainder*( 1+ (dx*dy*dz / numRanks)) + (myRank - remainder)*(dx*dy*dz/numRanks) ; } *col = myDom % dx ; *row = (myDom / dx) % dy ; *plane = myDom / (dx*dy) ; *side = testProcs; return; }
the_stack
* \brief Implements PME GPU spline calculation and charge spreading in CUDA. * TODO: consider always pre-sorting particles (as in DD case). * * \author Aleksei Iupinov <[email protected]> */ #include "gmxpre.h" #include <cassert> #include "gromacs/gpu_utils/cuda_kernel_utils.cuh" #include "gromacs/gpu_utils/typecasts.cuh" #include "pme.cuh" #include "pme_gpu_calculate_splines.cuh" #include "pme_grid.h" /* * This define affects the spline calculation behaviour in the kernel. * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing * (order) spline values and derivatives). 1: (order) threads do redundant work on this same task, * each one stores only a single theta and single dtheta into global arrays. The only efficiency * difference is less global store operations, countered by more redundant spline computation. * * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so). */ #define PME_GPU_PARALLEL_SPLINE 0 /*! \brief * Charge spreading onto the grid. * This corresponds to the CPU function spread_coefficients_bsplines_thread(). * Optional second stage of the spline_and_spread_kernel. * * \tparam order PME interpolation order. * \tparam wrapX Whether the grid overlap in dimension X should be wrapped. * \tparam wrapY Whether the grid overlap in dimension Y should be wrapped. * \tparam gridIndex The index of the grid to use in the kernel. * \tparam threadsPerAtom How many threads work on each atom * * \param[in] kernelParams Input PME CUDA data in constant memory. * \param[in] atomCharge Atom charge/coefficient of atom processed by thread. * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory. * \param[in] sm_theta Atom spline values in the shared memory. */ template<int order, bool wrapX, bool wrapY, int gridIndex, ThreadsPerAtom threadsPerAtom> __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams, const float* atomCharge, const int* __restrict__ sm_gridlineIndices, const float* __restrict__ sm_theta) { /* Global memory pointer to the output grid */ float* __restrict__ gm_grid = kernelParams.grid.d_realGrid[gridIndex]; // Number of atoms processed by a single warp in spread and gather const int threadsPerAtomValue = (threadsPerAtom == ThreadsPerAtom::Order) ? order : order * order; const int atomsPerWarp = warp_size / threadsPerAtomValue; const int nx = kernelParams.grid.realGridSize[XX]; const int ny = kernelParams.grid.realGridSize[YY]; const int nz = kernelParams.grid.realGridSize[ZZ]; const int pny = kernelParams.grid.realGridSizePadded[YY]; const int pnz = kernelParams.grid.realGridSizePadded[ZZ]; const int offx = 0, offy = 0, offz = 0; // unused for now const int atomIndexLocal = threadIdx.z; const int chargeCheck = pme_gpu_check_atom_charge(*atomCharge); if (chargeCheck) { // Spline Z coordinates const int ithz = threadIdx.x; const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx; const int iyBase = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy; int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz; if (iz >= nz) { iz -= nz; } /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */ const int atomWarpIndex = atomIndexLocal % atomsPerWarp; /* Warp index w.r.t. block - could probably be obtained easier? */ const int warpIndex = atomIndexLocal / atomsPerWarp; const int splineIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex); const int splineIndexZ = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, ZZ, ithz); const float thetaZ = sm_theta[splineIndexZ]; /* loop not used if order*order threads per atom */ const int ithyMin = (threadsPerAtom == ThreadsPerAtom::Order) ? 0 : threadIdx.y; const int ithyMax = (threadsPerAtom == ThreadsPerAtom::Order) ? order : threadIdx.y + 1; for (int ithy = ithyMin; ithy < ithyMax; ithy++) { int iy = iyBase + ithy; if (wrapY & (iy >= ny)) { iy -= ny; } const int splineIndexY = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, YY, ithy); float thetaY = sm_theta[splineIndexY]; const float Val = thetaZ * thetaY * (*atomCharge); assert(isfinite(Val)); const int offset = iy * pnz + iz; #pragma unroll for (int ithx = 0; (ithx < order); ithx++) { int ix = ixBase + ithx; if (wrapX & (ix >= nx)) { ix -= nx; } const int gridIndexGlobal = ix * pny * pnz + offset; const int splineIndexX = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, XX, ithx); const float thetaX = sm_theta[splineIndexX]; assert(isfinite(thetaX)); assert(isfinite(gm_grid[gridIndexGlobal])); atomicAdd(gm_grid + gridIndexGlobal, thetaX * Val); } } } } /*! \brief * A spline computation and charge spreading kernel function. * * Two tuning parameters can be used for additional performance. For small systems and for debugging * writeGlobal should be used removing the need to recalculate the theta values in the gather kernel. * Similarly for useOrderThreads large systems order threads per atom gives higher performance than order*order threads * * \tparam order PME interpolation order. * \tparam computeSplines A boolean which tells if the spline parameter and * gridline indices' computation should be performed. * \tparam spreadCharges A boolean which tells if the charge spreading should be performed. * \tparam wrapX A boolean which tells if the grid overlap in dimension X should be wrapped. * \tparam wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped. * \tparam numGrids The number of grids to use in the kernel. Can be 1 or 2. * \tparam writeGlobal A boolean which tells if the theta values and gridlines should be written to global memory. * \tparam threadsPerAtom How many threads work on each atom * \param[in] kernelParams Input PME CUDA data in constant memory. */ template<int order, bool computeSplines, bool spreadCharges, bool wrapX, bool wrapY, int numGrids, bool writeGlobal, ThreadsPerAtom threadsPerAtom> __launch_bounds__(c_spreadMaxThreadsPerBlock) CLANG_DISABLE_OPTIMIZATION_ATTRIBUTE __global__ void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams) { const int threadsPerAtomValue = (threadsPerAtom == ThreadsPerAtom::Order) ? order : order * order; const int atomsPerBlock = c_spreadMaxThreadsPerBlock / threadsPerAtomValue; // Number of atoms processed by a single warp in spread and gather const int atomsPerWarp = warp_size / threadsPerAtomValue; // Gridline indices, ivec __shared__ int sm_gridlineIndices[atomsPerBlock * DIM]; // Charges __shared__ float sm_coefficients[atomsPerBlock]; // Spline values __shared__ float sm_theta[atomsPerBlock * DIM * order]; float dtheta; float3 atomX; float atomCharge; const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x; const int atomIndexOffset = blockIndex * atomsPerBlock + kernelParams.pipelineAtomStart; /* Thread index w.r.t. block */ const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; /* Warp index w.r.t. block - could probably be obtained easier? */ const int warpIndex = threadLocalId / warp_size; /* Atom index w.r.t. warp */ const int atomWarpIndex = threadIdx.z % atomsPerWarp; /* Atom index w.r.t. block/shared memory */ const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex; /* Atom index w.r.t. global memory */ const int atomIndexGlobal = atomIndexOffset + atomIndexLocal; /* Early return for fully empty blocks at the end * (should only happen for billions of input atoms) */ if (atomIndexOffset >= kernelParams.atoms.nAtoms) { return; } /* Charges, required for both spline and spread */ if (c_useAtomDataPrefetch) { pme_gpu_stage_atom_data<float, atomsPerBlock, 1>( sm_coefficients, &kernelParams.atoms.d_coefficients[0][kernelParams.pipelineAtomStart]); __syncthreads(); atomCharge = sm_coefficients[atomIndexLocal]; } else { atomCharge = kernelParams.atoms.d_coefficients[0][atomIndexGlobal]; } if (computeSplines) { const float3* __restrict__ gm_coordinates = asFloat3(&kernelParams.atoms.d_coordinates[kernelParams.pipelineAtomStart]); if (c_useAtomDataPrefetch) { // Coordinates __shared__ float3 sm_coordinates[atomsPerBlock]; /* Staging coordinates */ pme_gpu_stage_atom_data<float3, atomsPerBlock, 1>(sm_coordinates, gm_coordinates); __syncthreads(); atomX = sm_coordinates[atomIndexLocal]; } else { atomX = gm_coordinates[atomIndexGlobal]; } calculate_splines<order, atomsPerBlock, atomsPerWarp, false, writeGlobal, numGrids>( kernelParams, atomIndexOffset, atomX, atomCharge, sm_theta, &dtheta, sm_gridlineIndices); __syncwarp(); } else { /* Staging the data for spread * (the data is assumed to be in GPU global memory with proper layout already, * as in after running the spline kernel) */ /* Spline data - only thetas (dthetas will only be needed in gather) */ pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(sm_theta, kernelParams.atoms.d_theta); /* Gridline indices */ pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(sm_gridlineIndices, kernelParams.atoms.d_gridlineIndices); __syncthreads(); } /* Spreading */ if (spreadCharges) { if (!kernelParams.usePipeline || (atomIndexGlobal < kernelParams.pipelineAtomEnd)) { spread_charges<order, wrapX, wrapY, 0, threadsPerAtom>( kernelParams, &atomCharge, sm_gridlineIndices, sm_theta); } } if (numGrids == 2) { __syncthreads(); if (c_useAtomDataPrefetch) { pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(sm_coefficients, kernelParams.atoms.d_coefficients[1]); __syncthreads(); atomCharge = sm_coefficients[atomIndexLocal]; } else { atomCharge = kernelParams.atoms.d_coefficients[1][atomIndexGlobal]; } if (spreadCharges) { if (!kernelParams.usePipeline || (atomIndexGlobal < kernelParams.pipelineAtomEnd)) { spread_charges<order, wrapX, wrapY, 1, threadsPerAtom>( kernelParams, &atomCharge, sm_gridlineIndices, sm_theta); } } } } //! Kernel instantiations // clang-format off template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 1, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, false, true, true, 1, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, false, true, true, true, 1, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 1, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 1, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, false, true, true, 1, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, false, true, true, true, 1, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 1, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 2, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, false, true, true, 2, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, false, true, true, true, 2, true, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 2, false, ThreadsPerAtom::Order> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 2, true, ThreadsPerAtom::OrderSquared> (const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, false, true, true, 2, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, false, true, true, true, 2, true, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true, 2, false, ThreadsPerAtom::OrderSquared>(const PmeGpuCudaKernelParams); // clang-format on
the_stack
#include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include "cudacommon.h" #include "CUDAStencil.cpp" using std::cout; using std::cin; // // We are using the "trick" illustrated by the NVIDIA simpleTemplates example // for accessing dynamically-allocated shared memory from a templatized // function. The strategy uses a templatized struct with specialized // accessor functions that declare the actual symbol with the type in // their type name (to avoid naming conflicts). // template<typename T> struct SharedMemory { // Should never be instantiated. // We enforce this at compile time. __device__ T* GetPointer( void ) { extern __device__ void error( void ); error(); return NULL; } }; // specializations for types we use template<> struct SharedMemory<float> { __device__ float* GetPointer( void ) { extern __shared__ float sh_float[]; // printf( "sh_float=%p\n", sh_float ); return sh_float; } }; template<> struct SharedMemory<double> { __device__ double* GetPointer( void ) { extern __shared__ double sh_double[]; // printf( "sh_double=%p\n", sh_double ); return sh_double; } }; __device__ int ToGlobalRow( int gidRow, int lszRow, int lidRow ) { return gidRow * lszRow + lidRow; } __device__ int ToGlobalCol( int gidCol, int lszCol, int lidCol ) { return gidCol * lszCol + lidCol; } __device__ int ToFlatIdx( int row, int col, int rowWidth ) { // assumes input coordinates and dimensions are logical (without halo) // and a halo width of 1 return (row+1)*(rowWidth + 2) + (col+1); } template<class T> __global__ void StencilKernel( T* data, T* newData, int alignment, int nStripItems, T wCenter, T wCardinal, T wDiagonal ) { // determine our location in the coordinate system // see the comment in operator() at the definition of the dimGrid // and dimBlock dim3s to understand why .x == row and .y == column. int gidRow = blockIdx.x; int gidCol = blockIdx.y; // int gszRow = gridDim.x; int gszCol = gridDim.y; int lidRow = threadIdx.x; int lidCol = threadIdx.y; int lszRow = nStripItems; int lszCol = blockDim.y; // determine our logical global data coordinates (without halo) int gRow = ToGlobalRow( gidRow, lszRow, lidRow ); int gCol = ToGlobalCol( gidCol, lszCol, lidCol ); // determine pitch of rows (without halo) int nCols = gszCol * lszCol + 2; // assume halo is there for computing padding int nPaddedCols = nCols + (((nCols % alignment) == 0) ? 0 : (alignment - (nCols % alignment))); int gRowWidth = nPaddedCols - 2; // remove the halo // Copy my global data item to a shared local buffer. // That local buffer is passed to us. // We assume it is large enough to hold all the data computed by // our thread block, plus a halo of width 1. SharedMemory<T> shobj; T* sh = shobj.GetPointer(); int lRowWidth = lszCol; for( int i = 0; i < (lszRow + 2); i++ ) { int lidx = ToFlatIdx( lidRow - 1 + i, lidCol, lRowWidth ); int gidx = ToFlatIdx( gRow - 1 + i, gCol, gRowWidth ); sh[lidx] = data[gidx]; } // Copy the "left" and "right" halo rows into our local memory buffer. // Only two threads are involved (first column and last column). if( lidCol == 0 ) { for( int i = 0; i < (lszRow + 2); i++ ) { int lidx = ToFlatIdx(lidRow - 1 + i, lidCol - 1, lRowWidth ); int gidx = ToFlatIdx(gRow - 1 + i, gCol - 1, gRowWidth ); sh[lidx] = data[gidx]; } } else if( lidCol == (lszCol - 1) ) { for( int i = 0; i < (lszRow + 2); i++ ) { int lidx = ToFlatIdx(lidRow - 1 + i, lidCol + 1, lRowWidth ); int gidx = ToFlatIdx(gRow - 1 + i, gCol + 1, gRowWidth ); sh[lidx] = data[gidx]; } } // let all those loads finish __syncthreads(); // do my part of the smoothing operation for( int i = 0; i < lszRow; i++ ) { int cidx = ToFlatIdx( lidRow + i, lidCol , lRowWidth ); int nidx = ToFlatIdx( lidRow - 1 + i, lidCol , lRowWidth ); int sidx = ToFlatIdx( lidRow + 1 + i, lidCol , lRowWidth ); int eidx = ToFlatIdx( lidRow + i, lidCol + 1, lRowWidth ); int widx = ToFlatIdx( lidRow + i, lidCol - 1, lRowWidth ); int neidx = ToFlatIdx( lidRow - 1 + i, lidCol + 1, lRowWidth ); int seidx = ToFlatIdx( lidRow + 1 + i, lidCol + 1, lRowWidth ); int nwidx = ToFlatIdx( lidRow - 1 + i, lidCol - 1, lRowWidth ); int swidx = ToFlatIdx( lidRow + 1 + i, lidCol - 1, lRowWidth ); T centerValue = sh[cidx]; T cardinalValueSum = sh[nidx] + sh[sidx] + sh[eidx] + sh[widx]; T diagonalValueSum = sh[neidx] + sh[seidx] + sh[nwidx] + sh[swidx]; newData[ToFlatIdx(gRow + i, gCol, gRowWidth)] = wCenter * centerValue + wCardinal * cardinalValueSum + wDiagonal * diagonalValueSum; } } template <class T> void CUDAStencil<T>::operator()( Matrix2D<T>& mtx, unsigned int nIters ) { // assume a 1-wide halo size_t gRows = mtx.GetNumRows() - 2; size_t gCols = mtx.GetNumColumns() - 2; assert( gRows % lRows == 0 ); assert( gCols % lCols == 0 ); // Note: this is confusing. C/C++ code on the host and CUDA C on // the device use row-major ordering where the first dimension is // the row and the second is the column. In a dim3, the constituent // items are named .x, .y, and .z. Normally, x is considered // horizontal (which would correspond to column position), y is // vertical (which would correspond to row position). We use // .x == row (first dimension) // .y == column (second dimension) // // Since each GPU thread is responsible for a strip of data // from the original, our index space is scaled smaller in // one dimension relative to the actual data dim3 dimGrid( gRows / lRows, gCols / lCols ); dim3 dimBlock( 1, lCols ); // size of data to transfer to/from device - assume 1-wide halo size_t matDataSize = mtx.GetDataSize(); size_t localDataSize = sizeof(T) * (lRows + 2) * (lCols + 2); T* da = NULL; T* db = NULL; // allocate space on device in global memory cudaMalloc( (void**)&da, matDataSize ); cudaMalloc( (void**)&db, matDataSize ); // copy initial data to global memory T* currData = da; T* newData = db; cudaMemcpy( currData, mtx.GetFlatData(), matDataSize, cudaMemcpyHostToDevice ); // copy the halo from the initial buffer into the second buffer // Note: when doing local iterations, these values do not change // but they can change in the MPI version after an inter-process // halo exchange. // // copy the sides with contiguous data size_t rowExtent = mtx.GetNumPaddedColumns() * sizeof(T); cudaMemcpy2D( newData, // destination rowExtent, // destination pitch currData, // source rowExtent, // source pitch rowExtent, // width of data to transfer (bytes) 1, // height of data to transfer (rows) cudaMemcpyDeviceToDevice ); cudaMemcpy2D( newData + (mtx.GetNumRows() - 1) * mtx.GetNumPaddedColumns(), // destination rowExtent, // destination pitch currData + (mtx.GetNumRows() - 1) * mtx.GetNumPaddedColumns(), // source rowExtent, // source pitch rowExtent, // width of data to transfer (bytes) 1, // height of data to transfer (rows) cudaMemcpyDeviceToDevice ); // copy the non-contiguous data cudaMemcpy2D( newData, // destination rowExtent, // destination pitch currData, // source rowExtent, // source pitch sizeof(T), // width of data to transfer (bytes) mtx.GetNumRows(), // height of data to transfer (rows) cudaMemcpyDeviceToDevice ); cudaMemcpy2D( newData + (mtx.GetNumColumns() - 1), // destination rowExtent, // destination pitch currData + (mtx.GetNumColumns() - 1), // source rowExtent, // source pitch sizeof(T), // width of data to transfer (bytes) mtx.GetNumRows(), // height of data to transfer (rows) cudaMemcpyDeviceToDevice ); // run the CUDA kernel for( unsigned int iter = 0; iter < nIters; iter++ ) { this->DoPreIterationWork( currData, newData, mtx, iter ); // do the stencil operation StencilKernel<<<dimGrid, dimBlock, localDataSize>>>( currData, newData, mtx.GetPad(), lRows, this->wCenter, this->wCardinal, this->wDiagonal ); CHECK_CUDA_ERROR(); // swap our notion of which buffer holds the "real" data if( currData == da ) { currData = db; newData = da; } else { currData = da; newData = db; } } // get the final result cudaMemcpy( mtx.GetFlatData(), currData, matDataSize, cudaMemcpyDeviceToHost ); // clean up CUDA cudaFree( da ); cudaFree( db ); } // Ensure our template classes get instantiated for the types needed by // the rest of the code. // Note that we are instantiating objects here. Some of the other // SHOC benchmarks also need to force template instantiations for specific // types, but they are using function templates. This mechanism (putting // them in a function) will not work if nvcc is using more recent version of // g++ underneath. Instead, just declare the function specialization outside // of any function with a 'template' keyword like so: // // template void Func( int, int, int, float*, int ); // template void Func( int, int, int, double*, int ); // void EnsureStencilInstantiation( void ) { CUDAStencil<float> csf( 0, 0, 0, 0, 0, 0 ); Matrix2D<float> mf( 2, 2 ); csf( mf, 0 ); CUDAStencil<double> csd( 0, 0, 0, 0, 0, 0 ); Matrix2D<double> md( 2, 2 ); csd( md, 0 ); }
the_stack
#include "common.h" #include "device_tensor.h" namespace { template<typename DType, typename Acctype> struct KD2Op { __device__ KD2Op(DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : X(x), C(c), STD(std) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { DType r = (X[b][i][d] - C[k][d]) / STD[k][d]; return ScalarConvert<DType, Acctype>::to(r * r); } DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; }; template<typename DType, typename Acctype> __global__ void Encoding_Dist_Forward_kernel ( DeviceTensor<DType, 3> KD, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; k = blockIdx.x; i = blockIdx.y; D = X.getSize(2); /* main operation */ KD2Op<DType, Acctype> g(X, C, STD); KD[b][i][k] = reduceD<Acctype>(g, b, i, k, D);; } template<typename DType, typename Acctype> struct EncGradXOp { __device__ EncGradXOp( DeviceTensor<DType, 3> gkd, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {} // DeviceTensor<DType, 1> s, S(s) __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to( 2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) / (STD[k][d] * STD[k][d])); } DeviceTensor<DType, 3> GKD; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; // DeviceTensor<DType, 1> S; }; template<typename DType, typename Acctype> __global__ void Encoding_GradX_kernel ( DeviceTensor<DType, 3> GKD, DeviceTensor<DType, 3> GX, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD) { // DeviceTensor<DType, 1> S /* declarations of the variables */ int b, d, i, K; /* Get the index and channels */ b = blockIdx.z; i = blockIdx.y; d = blockIdx.x; K = C.getSize(0); /* main operation */ EncGradXOp<DType, Acctype> g(GKD, X, C, STD); GX[b][i][d] = reduceK<Acctype>(g, b, i, d, K); } template<typename DType, typename Acctype> struct EncGradSTDOp { __device__ EncGradSTDOp( DeviceTensor<DType, 3> gkd, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : GKD(gkd), X(x), C(c), STD(std) {} // DeviceTensor<DType, 1> s, S(s) __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to( -2 * GKD[b][i][k] * (X[b][i][d] - C[k][d]) * (X[b][i][d] - C[k][d]) / (STD[k][d] * STD[k][d] * STD[k][d])); } DeviceTensor<DType, 3> GKD; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; // DeviceTensor<DType, 1> S; }; template<typename DType, typename Acctype> __global__ void Encoding_GradCSTD_kernel ( DeviceTensor<DType, 3> GKD, DeviceTensor<DType, 2> GC, DeviceTensor<DType, 2> GSTD, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD) { /* declarations of the variables */ int k, d, B, N; /* Get the index and channels */ d = blockIdx.x; k = blockIdx.y; B = X.getSize(0); N = X.getSize(1); /* main operation */ EncGradXOp<DType, Acctype> g1(GKD, X, C, STD); EncGradSTDOp<DType, Acctype> g2(GKD, X, C, STD); GC[k][d] = -reduceBN<Acctype>(g1, k, d, B, N); GSTD[k][d] += reduceBN<Acctype>(g2, k, d, B, N); } template<typename DType, typename Acctype> struct EncGradSTDXOp { __device__ EncGradSTDXOp( DeviceTensor<DType, 2> gstd, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : GSTD(gstd), X(x), C(c), STD(std) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to( GSTD[k][d] * (X[b][i][d] - C[k][d]) / STD[k][d]); } DeviceTensor<DType, 2> GSTD; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; }; template<typename DType, typename Acctype> __global__ void Encoding_GradSTDX_kernel ( DeviceTensor<DType, 2> GSTD, DeviceTensor<DType, 3> GX, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD, int N) { /* declarations of the variables */ int b, d, i, K; /* Get the index and channels */ b = blockIdx.z; i = blockIdx.y; d = blockIdx.x; K = C.getSize(0); /* main operation */ EncGradSTDXOp<DType, Acctype> g(GSTD, X, C, STD); GX[b][i][d] += reduceK<Acctype>(g, b, i, d, K) / N; } template<typename DType, typename Acctype> struct AggOpV2 { __device__ AggOpV2(DeviceTensor<DType, 3> a, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : A(a), X(x), C(c), STD(std) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(A[b][i][k] * (X[b][i][d] - C[k][d]) / STD[k][d]); } DeviceTensor<DType, 3> A; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; }; template<typename DType, typename Acctype> __global__ void AggregateV2_Forward_kernel ( DeviceTensor<DType, 3> E, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD) { /* declarations of the variables */ int b, k, d, N; /* Get the index and channels */ b = blockIdx.z; d = blockIdx.x; k = blockIdx.y; N = X.getSize(1); /* main operation */ AggOpV2<DType, Acctype> g(A, X, C, STD); E[b][k][d] = reduceN<Acctype>(g, b, k, d, N); } template<typename DType, typename Acctype> struct AggV2BackOp { __device__ AggV2BackOp(DeviceTensor<DType, 3> g, DeviceTensor<DType, 3> x, DeviceTensor<DType, 2> c, DeviceTensor<DType, 2> std) : G(g), X(x), C(c), STD(std) {} __device__ __forceinline__ Acctype operator()(int b, int i, int k, int d) { return ScalarConvert<DType, Acctype>::to(G[b][k][d] * (X[b][i][d] - C[k][d]) / STD[k][d]); } DeviceTensor<DType, 3> G; DeviceTensor<DType, 3> X; DeviceTensor<DType, 2> C; DeviceTensor<DType, 2> STD; }; template<typename DType, typename Acctype> __global__ void AggregateV2_Backward_kernel ( DeviceTensor<DType, 3> GA, DeviceTensor<DType, 3> GE, DeviceTensor<DType, 3> A, DeviceTensor<DType, 3> X, DeviceTensor<DType, 2> C, DeviceTensor<DType, 2> STD) { /* declarations of the variables */ int b, k, i, D; /* Get the index and channels */ b = blockIdx.z; i = blockIdx.y; k = blockIdx.x; D = GE.getSize(2); /* main operation */ AggV2BackOp<DType, Acctype> g(GE, X, C, STD); GA[b][i][k] = reduceD<Acctype>(g, b, i, k, D); } } // namespace at::Tensor Encoding_Dist_Inference_Forward_CUDA( const at::Tensor X_, const at::Tensor C_, const at::Tensor STD_) { // const at::Tensor S_, // X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K auto KD_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options()); // E(x), E(x^2) int N = X_.size(0) * X_.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); // calculate the kernel distance AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Inference_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); /* kernel function */ Encoding_Dist_Forward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>> (KD, X, C, STD); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return KD_; } std::vector<at::Tensor> Encoding_Dist_Inference_Backward_CUDA( const at::Tensor GKD_, const at::Tensor KD_, const at::Tensor X_, const at::Tensor C_, const at::Tensor STD_) { auto GX_ = at::zeros_like(X_); auto GC_ = at::zeros_like(C_); auto GSTD_ = at::zeros_like(STD_); /* kernel function */ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks1(X_.size(2), X_.size(1), X_.size(0)); dim3 threads1(getNumThreads(C_.size(0))); dim3 blocks2(C_.size(1), C_.size(0)); dim3 threads2(getNumThreads(X_.size(1))); int N = X_.size(0) * X_.size(1); AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_); DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD_); DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_); DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); Encoding_GradX_kernel<scalar_t, scalar_t> <<<blocks1, threads1, 0, stream>>> (GKD, GX, X, C, STD); AT_ASSERT(cudaGetLastError() == cudaSuccess); Encoding_GradCSTD_kernel<scalar_t, scalar_t> <<<blocks2, threads2, 0, stream>>> (GKD, GC, GSTD, X, C, STD); AT_ASSERT(cudaGetLastError() == cudaSuccess); })); return {GX_, GC_, GSTD_}; } std::vector<at::Tensor> Encoding_Dist_Forward_CUDA( const at::Tensor X_, const at::Tensor C_, double eps) { // const at::Tensor S_, // X \in R^{B, N, D}, C \in R^{K, D}, S \in R^K auto KD_ = torch::zeros({X_.size(0), X_.size(1), C_.size(0)}, X_.options()); // E(x), E(x^2) int N = X_.size(0) * X_.size(1); auto SVar_ = (X_.pow(2).sum(0).sum(0).view({1, X_.size(2)}) - 2 * C_ * X_.sum(0).sum(0).view({1, X_.size(2)})).expand_as(C_) + C_.pow(2) * N; auto STD_ = at::sqrt(SVar_ / N + eps); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); // calculate the kernel distance AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> KD = devicetensor<scalar_t, 3>(KD_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); /* kernel function */ Encoding_Dist_Forward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>> (KD, X, C, STD); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {KD_, STD_, SVar_ / (N - 1)}; } std::vector<at::Tensor> Encoding_Dist_Backward_CUDA( const at::Tensor GKD_, const at::Tensor GSTD_, const at::Tensor KD_, const at::Tensor X_, const at::Tensor C_, const at::Tensor STD_) { auto GX_ = at::zeros_like(X_); auto GC_ = at::zeros_like(C_); auto GSTD2_ = GSTD_.clone(); /* kernel function */ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks1(X_.size(2), X_.size(1), X_.size(0)); dim3 threads1(getNumThreads(C_.size(0))); dim3 blocks2(C_.size(1), C_.size(0)); dim3 threads2(getNumThreads(X_.size(1))); int N = X_.size(0) * X_.size(1); AT_DISPATCH_FLOATING_TYPES(X_.type(), "Encoding_Dist_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GKD = devicetensor<scalar_t, 3>(GKD_); DeviceTensor<scalar_t, 2> GSTD = devicetensor<scalar_t, 2>(GSTD2_); DeviceTensor<scalar_t, 3> GX = devicetensor<scalar_t, 3>(GX_); DeviceTensor<scalar_t, 2> GC = devicetensor<scalar_t, 2>(GC_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); Encoding_GradX_kernel<scalar_t, scalar_t> <<<blocks1, threads1, 0, stream>>> (GKD, GX, X, C, STD); AT_ASSERT(cudaGetLastError() == cudaSuccess); Encoding_GradCSTD_kernel<scalar_t, scalar_t> <<<blocks2, threads2, 0, stream>>> (GKD, GC, GSTD, X, C, STD); AT_ASSERT(cudaGetLastError() == cudaSuccess); Encoding_GradSTDX_kernel<scalar_t, scalar_t> <<<blocks1, threads1, 0, stream>>> (GSTD, GX, X, C, STD, N); AT_ASSERT(cudaGetLastError() == cudaSuccess); })); // d_sigma/d_c GC_ = GC_ - GSTD2_ * (X_.mean(0).mean(0) - C_) / STD_; return {GX_, GC_}; } at::Tensor AggregateV2_Forward_CUDA( const at::Tensor A_, const at::Tensor X_, const at::Tensor C_, const at::Tensor STD_) { /* Device tensors */ auto E_ = torch::zeros({A_.size(0), C_.size(0), C_.size(1)}, A_.options()); // auto IS_ = 1.0f / (S_ + eps).sqrt(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // B, K, D dim3 blocks(C_.size(1), C_.size(0), X_.size(0)); dim3 threads(getNumThreads(X_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Forward_CUDA", ([&] { DeviceTensor<scalar_t, 3> E = devicetensor<scalar_t, 3>(E_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); /* kernel function */ AggregateV2_Forward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>>(E, A, X, C, STD); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return E_; } std::vector<at::Tensor> AggregateV2_Backward_CUDA( const at::Tensor GE_, const at::Tensor E_, const at::Tensor A_, const at::Tensor X_, const at::Tensor C_, const at::Tensor STD_) { auto gradA_ = at::zeros_like(A_); auto gradX_ = at::bmm(A_ , (GE_ / STD_.unsqueeze(0))); auto gradC_ = -(A_.sum(1).unsqueeze(2) * GE_ / STD_.unsqueeze(0)).sum(0); auto gradSTD_ = -(GE_ * E_).sum(0) / STD_; // auto gradS_ = -0.5 * (GE_ * E_).sum(2).sum(0) / S_; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // B, K, D dim3 blocks(C_.size(0), X_.size(1), X_.size(0)); dim3 threads(getNumThreads(C_.size(1))); AT_DISPATCH_FLOATING_TYPES(A_.type(), "Aggregate_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> GA = devicetensor<scalar_t, 3>(gradA_); DeviceTensor<scalar_t, 3> GE = devicetensor<scalar_t, 3>(GE_); DeviceTensor<scalar_t, 3> A = devicetensor<scalar_t, 3>(A_); DeviceTensor<scalar_t, 3> X = devicetensor<scalar_t, 3>(X_); DeviceTensor<scalar_t, 2> C = devicetensor<scalar_t, 2>(C_); DeviceTensor<scalar_t, 2> STD = devicetensor<scalar_t, 2>(STD_); AggregateV2_Backward_kernel<scalar_t, scalar_t> <<<blocks, threads, 0, stream>>> (GA, GE, A, X, C, STD); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradA_, gradX_, gradC_, gradSTD_}; }
the_stack