hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
82a839c0d888dc182b347f900dd65318717d0860.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <stdio.h>
#include <iostream>
#include <chrono>
#include "common.h"
using namespace std;
__global__ void normalize_image(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int gray_tid = yIndex * grayWidthStep + xIndex;
if (xIndex < width && yIndex < height) {
output[gray_tid] = h_s[input[gray_tid]];
}
}
__global__ void normalize_histogram(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
float size = width*height;
float normalize = 255/size;
if (nxy < 256 && blockIdx.x == 0 && blockIdx.y == 0){
for (int i=0; i<=nxy; i++){
h_s[nxy] += h[i];
}
h_s[nxy] = h_s[nxy]*normalize;
}
}
__global__ void histogram(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
const int gray_tid = yIndex * grayWidthStep + xIndex;
__shared__ int h_temp[256];
if (nxy < 256) {
h_temp[nxy] = 0;
}
__syncthreads();
if (xIndex < width && yIndex < height) {
atomicAdd(&h_temp[input[gray_tid]], 1);
}
__syncthreads();
if (nxy < 256) {
atomicAdd(&h[nxy], h_temp[nxy]);
}
__syncthreads();
}
void normalize(const cv::Mat& input, cv::Mat& output) {
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
// Calculate total number of bytes of input and output image
// Step = cols * number of colors
size_t grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
float * h = {};
float * h_s = {};
// Allocate device memory
SAFE_CALL(hipMalloc<unsigned char>(&d_input, grayBytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&h, 256*sizeof(float)), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&h_s, 256*sizeof(float)), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input, input.ptr(), grayBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_output, output.ptr(), grayBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(16,16);
// Calculate grid size to cover the whole image
// const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y);
const dim3 grid((input.cols)/block.x, (input.rows)/block.y);
// printf("bgr_to_gray_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
// Launch the color conversion kernel
auto start_cpu = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( histogram), dim3(grid),dim3(block), 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
hipLaunchKernelGGL(( normalize_histogram), dim3(grid),dim3(block), 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
hipLaunchKernelGGL(( normalize_image), dim3(grid),dim3(block), 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("elapsed %f ms\n", duration_ms.count());
// Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(hipFree(d_input), "CUDA Free Failed");
SAFE_CALL(hipFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[]) {
string imagePath;
if(argc < 2)
imagePath = "Images/woman3.jpg";
else
imagePath = argv[1];
cout << imagePath << endl;
// Read input image from the disk
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//Create output image
// cv::Mat input_bw(input.rows, input.cols, CV_8UC1);
cv::Mat output(input.rows, input.cols, CV_8UC1);
cv::cvtColor(input, output, cv::COLOR_BGR2GRAY);
cv::Mat output_n(input.rows, input.cols, CV_8UC1);
normalize(output, output_n);
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
cv::resizeWindow("Input", 800, 600);
cv::resizeWindow("Output", 800, 600);
// output = input_bw.clone();
imshow("Input", output);
imshow("Output", output_n);
//Wait for key press
cv::waitKey();
return 0;
}
| 82a839c0d888dc182b347f900dd65318717d0860.cu | #include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <stdio.h>
#include <iostream>
#include <chrono>
#include "common.h"
using namespace std;
__global__ void normalize_image(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int gray_tid = yIndex * grayWidthStep + xIndex;
if (xIndex < width && yIndex < height) {
output[gray_tid] = h_s[input[gray_tid]];
}
}
__global__ void normalize_histogram(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
float size = width*height;
float normalize = 255/size;
if (nxy < 256 && blockIdx.x == 0 && blockIdx.y == 0){
for (int i=0; i<=nxy; i++){
h_s[nxy] += h[i];
}
h_s[nxy] = h_s[nxy]*normalize;
}
}
__global__ void histogram(unsigned char* input, unsigned char* output, int width, int height, int grayWidthStep, float* h, float* h_s) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int nxy = threadIdx.x + threadIdx.y * blockDim.x;
const int gray_tid = yIndex * grayWidthStep + xIndex;
__shared__ int h_temp[256];
if (nxy < 256) {
h_temp[nxy] = 0;
}
__syncthreads();
if (xIndex < width && yIndex < height) {
atomicAdd(&h_temp[input[gray_tid]], 1);
}
__syncthreads();
if (nxy < 256) {
atomicAdd(&h[nxy], h_temp[nxy]);
}
__syncthreads();
}
void normalize(const cv::Mat& input, cv::Mat& output) {
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
// Calculate total number of bytes of input and output image
// Step = cols * number of colors
size_t grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
float * h = {};
float * h_s = {};
// Allocate device memory
SAFE_CALL(cudaMalloc<unsigned char>(&d_input, grayBytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&h, 256*sizeof(float)), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&h_s, 256*sizeof(float)), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input, input.ptr(), grayBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_output, output.ptr(), grayBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(16,16);
// Calculate grid size to cover the whole image
// const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y);
const dim3 grid((input.cols)/block.x, (input.rows)/block.y);
// printf("bgr_to_gray_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
// Launch the color conversion kernel
auto start_cpu = chrono::high_resolution_clock::now();
histogram<<<grid,block>>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
normalize_histogram<<<grid,block>>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
normalize_image<<<grid,block>>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), h, h_s);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("elapsed %f ms\n", duration_ms.count());
// Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(cudaFree(d_input), "CUDA Free Failed");
SAFE_CALL(cudaFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[]) {
string imagePath;
if(argc < 2)
imagePath = "Images/woman3.jpg";
else
imagePath = argv[1];
cout << imagePath << endl;
// Read input image from the disk
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
//Create output image
// cv::Mat input_bw(input.rows, input.cols, CV_8UC1);
cv::Mat output(input.rows, input.cols, CV_8UC1);
cv::cvtColor(input, output, cv::COLOR_BGR2GRAY);
cv::Mat output_n(input.rows, input.cols, CV_8UC1);
normalize(output, output_n);
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
cv::resizeWindow("Input", 800, 600);
cv::resizeWindow("Output", 800, 600);
// output = input_bw.clone();
imshow("Input", output);
imshow("Output", output_n);
//Wait for key press
cv::waitKey();
return 0;
}
|
6b7034047fd451c3fd514f36ff79b83a509fb671.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "test_utils.h"
#include "linalg/multiply.h"
#include "random/rng.h"
#include "unary_op.h"
namespace MLCommon {
namespace LinAlg {
template<typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
naiveScale(out_ref, in, params.scalar, len);
multiplyScalar(out, in, params.scalar, len);
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
| 6b7034047fd451c3fd514f36ff79b83a509fb671.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "test_utils.h"
#include "linalg/multiply.h"
#include "random/rng.h"
#include "unary_op.h"
namespace MLCommon {
namespace LinAlg {
template<typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
naiveScale(out_ref, in, params.scalar, len);
multiplyScalar(out, in, params.scalar, len);
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
034efa21087cbf3246f6092db7d19b7f3bd83a61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// Fast R-CNN
// Copyright (c) Microsoft. All rights reserved.
// Written by Ross Girshick, 2015.
// Licensed under the BSD 2-clause "Simplified" license.
// See LICENSE in the Fast R-CNN project root for license
// information.
// --------------------------------------------------------
#include "caffe/layers/roi_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(::floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(::floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(::ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(::ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| 034efa21087cbf3246f6092db7d19b7f3bd83a61.cu | // --------------------------------------------------------
// Fast R-CNN
// Copyright (c) Microsoft. All rights reserved.
// Written by Ross Girshick, 2015.
// Licensed under the BSD 2-clause "Simplified" license.
// See LICENSE in the Fast R-CNN project root for license
// information.
// --------------------------------------------------------
#include "caffe/layers/roi_pooling_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(std::floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(std::floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(std::ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(std::ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
4ffd71b9b05a9df933f5bfe56e960bfa49fd8d64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UpsampleLayer.h"
namespace nvinfer1
{
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
hipLaunchKernelGGL(( upscale), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, input,output, numElem, mScale, C, H, W);
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(hipMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), hipMemcpyDeviceToDevice, stream));
return 0;
}
CUDA_CHECK(hipStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
| 4ffd71b9b05a9df933f5bfe56e960bfa49fd8d64.cu | #include "UpsampleLayer.h"
namespace nvinfer1
{
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
upscale<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>>(input,output, numElem, mScale, C, H, W);
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(cudaMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), cudaMemcpyDeviceToDevice, stream));
return 0;
}
CUDA_CHECK(cudaStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
|
bef86f8336c47114a62eddc57e5fd77dc229174a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The file has been adapted from the two files:
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh
// Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4
// We retain the following license from the original files:
// Copyright 2021, Jiaao He. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License").
#include "paddle/fluid/operators/number_count_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
using LoDTensor = framework::LoDTensor;
using Tensor = phi::DenseTensor;
template <typename T>
__global__ void initialize_zero_kernel(T* data, const int length) {
CUDA_KERNEL_LOOP(idx, length) { data[idx] = static_cast<T>(0); }
}
template <typename T>
__global__ void NumberCount(const T* numbers,
T* number_count,
int64_t batch_size,
int upper_range) {
int res_tmp[PERTHREAD_EXPERTS] = {0};
int expert_min = blockIdx.x * PERTHREAD_EXPERTS;
int expert_max = expert_min + PERTHREAD_EXPERTS;
if (expert_max > upper_range) {
expert_max = upper_range;
}
for (int i = threadIdx.x; i < batch_size; i += blockDim.x) {
T idx = numbers[i];
if (idx == -1) {
continue;
}
if (idx < expert_min || idx >= expert_max) {
continue;
}
res_tmp[idx - expert_min] += 1;
}
for (int i = expert_min; i < expert_max; ++i) {
int x = res_tmp[i - expert_min];
#pragma unroll
for (int j = 1; j < WARP_SIZE; j <<= 1) {
#ifdef __HIPCC__
x = x + __shfl_down(x, j);
#else
x = x + __shfl_down_sync(-1u, x, j);
#endif
}
if (threadIdx.x % WARP_SIZE == 0) {
platform::CudaAtomicAdd(number_count + i, x);
}
}
}
template <typename T>
class NumberCountOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto numbers = context.Input<LoDTensor>("numbers");
auto upper_range = context.Attr<int>("upper_range");
auto number_count = context.Output<LoDTensor>("Out");
int64_t batch_size = numbers->numel();
auto place = context.GetPlace();
const auto& dev_ctx = context.template device_context<phi::GPUContext>();
framework::DDim out_dims = phi::make_ddim({upper_range});
auto out_data = number_count->mutable_data<T>(out_dims, place);
const T* gate_data = numbers->data<T>();
hipLaunchKernelGGL(( initialize_zero_kernel<T>)
, dim3(GET_BLOCKS(upper_range)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
out_data, upper_range);
hipLaunchKernelGGL(( NumberCount<T>)
, dim3(CEIL(upper_range, PERTHREAD_EXPERTS)), dim3(256), 0, dev_ctx.stream(),
gate_data, out_data, batch_size, upper_range);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(number_count, ops::NumberCountOpCUDAKernel<int64_t>);
| bef86f8336c47114a62eddc57e5fd77dc229174a.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// The file has been adapted from the two files:
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cu
// https://github.com/laekov/fastmoe/blob/master/cuda/local_exchange.cuh
// Git commit hash: 295a615aacce7e54a37e7935274ba15e901c78e4
// We retain the following license from the original files:
// Copyright 2021, Jiaao He. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License").
#include "paddle/fluid/operators/number_count_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
using LoDTensor = framework::LoDTensor;
using Tensor = phi::DenseTensor;
template <typename T>
__global__ void initialize_zero_kernel(T* data, const int length) {
CUDA_KERNEL_LOOP(idx, length) { data[idx] = static_cast<T>(0); }
}
template <typename T>
__global__ void NumberCount(const T* numbers,
T* number_count,
int64_t batch_size,
int upper_range) {
int res_tmp[PERTHREAD_EXPERTS] = {0};
int expert_min = blockIdx.x * PERTHREAD_EXPERTS;
int expert_max = expert_min + PERTHREAD_EXPERTS;
if (expert_max > upper_range) {
expert_max = upper_range;
}
for (int i = threadIdx.x; i < batch_size; i += blockDim.x) {
T idx = numbers[i];
if (idx == -1) {
continue;
}
if (idx < expert_min || idx >= expert_max) {
continue;
}
res_tmp[idx - expert_min] += 1;
}
for (int i = expert_min; i < expert_max; ++i) {
int x = res_tmp[i - expert_min];
#pragma unroll
for (int j = 1; j < WARP_SIZE; j <<= 1) {
#ifdef __HIPCC__
x = x + __shfl_down(x, j);
#else
x = x + __shfl_down_sync(-1u, x, j);
#endif
}
if (threadIdx.x % WARP_SIZE == 0) {
platform::CudaAtomicAdd(number_count + i, x);
}
}
}
template <typename T>
class NumberCountOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto numbers = context.Input<LoDTensor>("numbers");
auto upper_range = context.Attr<int>("upper_range");
auto number_count = context.Output<LoDTensor>("Out");
int64_t batch_size = numbers->numel();
auto place = context.GetPlace();
const auto& dev_ctx = context.template device_context<phi::GPUContext>();
framework::DDim out_dims = phi::make_ddim({upper_range});
auto out_data = number_count->mutable_data<T>(out_dims, place);
const T* gate_data = numbers->data<T>();
initialize_zero_kernel<T>
<<<GET_BLOCKS(upper_range), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
out_data, upper_range);
NumberCount<T>
<<<CEIL(upper_range, PERTHREAD_EXPERTS), 256, 0, dev_ctx.stream()>>>(
gate_data, out_data, batch_size, upper_range);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(number_count, ops::NumberCountOpCUDAKernel<int64_t>);
|
11a8375a62082e76b74211eb4b4f5f6df4991a36.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %cuda_args
// CHECK: #include "hip/hip_runtime.h"
// CHECK-NOT: #include "hip/hip_runtime.h"
// CHECK: #include <stdio.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
| 11a8375a62082e76b74211eb4b4f5f6df4991a36.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
// CHECK: #include "hip/hip_runtime.h"
// CHECK-NOT: #include "cuda_runtime.h"
// CHECK: #include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include <stdio.h>
|
abc2262f99a21bed0d9f146ae197127a2c830dab.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <TH/THHalf.h>
#include <THH/THHNumerics.cuh>
#include <THH/THHApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct softmargin_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
return log(1 + exp(ScalarConvert<Dtype, Acctype>::to(-x)*y));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_no_reduce_functor
{
__host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *out) const
{
*out = ScalarConvert<Acctype, Dtype>::to(log(ScalarConvert<int, Acctype>::to(1)
+ exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y)));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_functor
{
const Acctype norm;
const Dtype gradOutput;
softmargin_updateGradInput_functor(Acctype norm_, Dtype gradOutput_) :
norm(norm_), gradOutput(gradOutput_) {}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-x)*y);
return ScalarConvert<Acctype, Dtype>::to(-y*temp*norm/(ScalarConvert<int, Acctype>::to(1) + temp) * gradOutput);
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y);
*gradInput = ScalarConvert<Acctype, Dtype>::to(-*y * temp / (ScalarConvert<int, Acctype>::to(1) + temp));
}
};
#include <THHUNN/generic/SoftMarginCriterion.hip>
#include <THH/THHGenerateFloatTypes.h>
| abc2262f99a21bed0d9f146ae197127a2c830dab.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <TH/THHalf.h>
#include <THC/THCNumerics.cuh>
#include <THC/THCApply.cuh>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
template <typename Dtype, typename Acctype>
struct softmargin_functor
{
__host__ __device__ Acctype operator()(const Dtype& x, const Dtype& y) const
{
return log(1 + exp(ScalarConvert<Dtype, Acctype>::to(-x)*y));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_no_reduce_functor
{
__host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *out) const
{
*out = ScalarConvert<Acctype, Dtype>::to(log(ScalarConvert<int, Acctype>::to(1)
+ exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y)));
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_functor
{
const Acctype norm;
const Dtype gradOutput;
softmargin_updateGradInput_functor(Acctype norm_, Dtype gradOutput_) :
norm(norm_), gradOutput(gradOutput_) {}
__host__ __device__ Dtype operator()(const Dtype& x, const Dtype& y) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-x)*y);
return ScalarConvert<Acctype, Dtype>::to(-y*temp*norm/(ScalarConvert<int, Acctype>::to(1) + temp) * gradOutput);
}
};
template <typename Dtype, typename Acctype>
struct softmargin_updateGradInput_no_reduce_functor
{
__forceinline__ __host__ __device__ void operator()(
const Dtype *x,
const Dtype *y,
Dtype *gradInput) const
{
Acctype temp = exp(ScalarConvert<Dtype, Acctype>::to(-*x) * *y);
*gradInput = ScalarConvert<Acctype, Dtype>::to(-*y * temp / (ScalarConvert<int, Acctype>::to(1) + temp));
}
};
#include <THCUNN/generic/SoftMarginCriterion.cu>
#include <THC/THCGenerateFloatTypes.h>
|
b2a5603bd393752540e33730779b2801a09e37d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "executeFourthLayer.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Layer4_Neurons_GPU = NULL;
hipMalloc(&Layer4_Neurons_GPU, XSIZE*YSIZE);
float *Layer4_Weights_GPU = NULL;
hipMalloc(&Layer4_Weights_GPU, XSIZE*YSIZE);
float *Layer5_Neurons_GPU = NULL;
hipMalloc(&Layer5_Neurons_GPU, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
executeFourthLayer), dim3(gridBlock),dim3(threadBlock), 0, 0, Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
executeFourthLayer), dim3(gridBlock),dim3(threadBlock), 0, 0, Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
executeFourthLayer), dim3(gridBlock),dim3(threadBlock), 0, 0, Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b2a5603bd393752540e33730779b2801a09e37d1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "executeFourthLayer.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Layer4_Neurons_GPU = NULL;
cudaMalloc(&Layer4_Neurons_GPU, XSIZE*YSIZE);
float *Layer4_Weights_GPU = NULL;
cudaMalloc(&Layer4_Weights_GPU, XSIZE*YSIZE);
float *Layer5_Neurons_GPU = NULL;
cudaMalloc(&Layer5_Neurons_GPU, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
executeFourthLayer<<<gridBlock,threadBlock>>>(Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
executeFourthLayer<<<gridBlock,threadBlock>>>(Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
executeFourthLayer<<<gridBlock,threadBlock>>>(Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
39453d01ce644384ae5936243fd7c85c4e653e88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//https://github.com/flyingwolfz/point-oriented-algorithms-CGH-cuda
#include <stdio.h>
#include <stdlib.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
__global__ void test(float *dimage, float *dholo )
{
int h = blockIdx.x*blockDim.x + threadIdx.x;
int l = blockIdx.y*blockDim.y + threadIdx.y;
float x = (float)h;
float y = (float)l;
float hx = (x - 512.0)*0.008;
float hy = (y - 512.0)*0.008;
float lambda = 639.0 * 0.000001;
float d = 0.008*10.0, holo = 0.0, c = 0.0, s = 0.0;
float ii = 0.0, jj = 0.0;
long int prt = h + l * 1024;
register float xx;
register float yy,image,r,phi;
register int k=0;
float dep = 0.0f;
for (register int i = 0; i < 100; i++)
{
for (register int j = 0; j < 100; j++)
{
xx = ((float)i - 50.0)*d;
yy = ((float)j - 50.0)*d;
image = dimage[k];
dep = 300;
k++;
r = (hx - xx)*(hx - xx) + (hy - yy)*(hy - yy) + dep*dep;
phi = 2 * 3.14159 / lambda * sqrt(r);
c = c+ image * cos(phi);
s = s+ image * sin(phi);
}
}
float jiao = atan2(s, c);
if (jiao < 0)
{
jiao = jiao + 2.0 * 3.14159;
}
dholo[prt] = jiao / 2.0 / 3.14159;
}
extern "C" void kernel(float *dimage, float *dholo)
{
dim3 block(32, 32);
dim3 grid(32, 32);
hipLaunchKernelGGL(( test) , dim3(grid), dim3(block) , 0, 0, dimage, dholo);
}
| 39453d01ce644384ae5936243fd7c85c4e653e88.cu | //https://github.com/flyingwolfz/point-oriented-algorithms-CGH-cuda
#include <stdio.h>
#include <stdlib.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
__global__ void test(float *dimage, float *dholo )
{
int h = blockIdx.x*blockDim.x + threadIdx.x;
int l = blockIdx.y*blockDim.y + threadIdx.y;
float x = (float)h;
float y = (float)l;
float hx = (x - 512.0)*0.008;
float hy = (y - 512.0)*0.008;
float lambda = 639.0 * 0.000001;
float d = 0.008*10.0, holo = 0.0, c = 0.0, s = 0.0;
float ii = 0.0, jj = 0.0;
long int prt = h + l * 1024;
register float xx;
register float yy,image,r,phi;
register int k=0;
float dep = 0.0f;
for (register int i = 0; i < 100; i++)
{
for (register int j = 0; j < 100; j++)
{
xx = ((float)i - 50.0)*d;
yy = ((float)j - 50.0)*d;
image = dimage[k];
dep = 300;
k++;
r = (hx - xx)*(hx - xx) + (hy - yy)*(hy - yy) + dep*dep;
phi = 2 * 3.14159 / lambda * sqrt(r);
c = c+ image * cos(phi);
s = s+ image * sin(phi);
}
}
float jiao = atan2(s, c);
if (jiao < 0)
{
jiao = jiao + 2.0 * 3.14159;
}
dholo[prt] = jiao / 2.0 / 3.14159;
}
extern "C" void kernel(float *dimage, float *dholo)
{
dim3 block(32, 32);
dim3 grid(32, 32);
test <<<grid, block >>> (dimage, dholo);
}
|
26ffca8229279cd28e2cd231e7eee3e920a9e326.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <sys/time.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
int* gblbpos;
float* partX;
float* partY;
float* partZ;
float* partFrcX;
float* partFrcY;
float* partFrcZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// InitializeForces: kernel to set all forces in device memory to zero
//-----------------------------------------------------------------------------
__global__ void InitializeForces()
{
int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
while (i < cSh.nparticle) {
cSh.partFrcX[i] = (float)0.0;
cSh.partFrcY[i] = (float)0.0;
cSh.partFrcZ[i] = (float)0.0;
i += gridDim.x * blockDim.x;
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
int nstripes = (cSh.nparticle + 31) / 32;
cSh.gblbpos[0] = nstripes - (gridDim.x * (blockDim.x / 32)) - 1;
}
}
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
int i;
int warpIdx = threadIdx.x / 32;
int tgx = (threadIdx.x & 31);
// Expanding the earlier method, using a warp counter in global
float qq = (float)0.0;
int nstripes = (cSh.nparticle + 31) / 32;
int bpos = nstripes - (blockIdx.x * (blockDim.x / 32)) - warpIdx - 1;
while (bpos >= 0) {
// Read 32 particles into registers rather than __shared__ memory.
int prtclIdx = 32*bpos + tgx;
float pX, pY, pZ, pQ;
if (prtclIdx < cSh.nparticle) {
pX = cSh.partX[prtclIdx];
pY = cSh.partY[prtclIdx];
pZ = cSh.partZ[prtclIdx];
pQ = cSh.partQ[prtclIdx];
}
else {
pX = (float)10000.0 + (float)(prtclIdx);
pY = (float)10000.0 + (float)(prtclIdx);
pZ = (float)10000.0 + (float)(prtclIdx);
pQ = (float)0.0;
}
// Loop over all particle pairs in the lower half triangle as before
int tpos = 0;
while (tpos <= bpos) {
// Initialize particles as in the outer loop
int prtclIdx = 32*tpos + tgx;
float tX, tY, tZ, tQ;
if (prtclIdx < cSh.nparticle) {
tX = cSh.partX[prtclIdx];
tY = cSh.partY[prtclIdx];
tZ = cSh.partZ[prtclIdx];
tQ = cSh.partQ[prtclIdx];
}
else {
// The offsets for particle positions must run along a different
// (parallel, but distinct) line so that not even dummy particles
// can ever occupy the same positions and cause a divide-by-zero.
// As before, the charge of the dummy particles is zero.
tX = (float)10100.0 + (float)(prtclIdx);
tY = (float)10200.0 + (float)(prtclIdx);
tZ = (float)10300.0 + (float)(prtclIdx);
tQ = (float)0.0;
}
// Initialize tile force accumulators
float sfpX = (float)0.0;
float sfpY = (float)0.0;
float sfpZ = (float)0.0;
float sftX = (float)0.0;
float sftY = (float)0.0;
float sftZ = (float)0.0;
// Indexing gets a bit more complex. Again, if we are on a
// diagonal tile skip the first iteration of the loop, as
// boths sets of 32 particles are the same.
int imin = (bpos == tpos);
float anti2xCountingFactor = (bpos == tpos) ? (float)0.5 : (float)1.0;
for (i = imin; i < 32; i++) {
// Find the thread to query
int j = tgx + i;
j -= (j >= 32) * 32;
// Compute the interaction
float dx = __shfl_sync(0xffffffff, tX, j) - pX;
float dy = __shfl_sync(0xffffffff, tY, j) - pY;
float dz = __shfl_sync(0xffffffff, tZ, j) - pZ;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = anti2xCountingFactor *
__shfl_sync(0xffffffff, tQ, j) * pQ;
qq += qfac / sqrt(r2);
// Log the interaction on this thread
float fmag = qfac / (r2 * r);
float fx = dx * fmag;
float fy = dy * fmag;
float fz = dz * fmag;
sfpX -= fx;
sfpY -= fy;
sfpZ -= fz;
// Find the other thread that queried this one.
// __shfl_sync contains a warp synchronization
// instruction, so no __syncwarp() is needed.
int k = tgx - i;
k += (k < 0) * 32;
sftX += __shfl_sync(0xffffffff, fx, k);
sftY += __shfl_sync(0xffffffff, fy, k);
sftZ += __shfl_sync(0xffffffff, fz, k);
}
// Contribute the tile force accumulations atomically to global memory
// (DRAM). This is only about 2x slower than atomic accumulation to
// __shared__. Accumulating things like this atomically to __shared__
// would make the kernel run only about 30% slower than accumulating
// them in an unsafe manner, willy-nilly. Fast atomics to global are
// a tremendous accomplishment by NVIDIA engineers!
//
// Note, the correspondence between 32*bpos + tgx or 32*tpos + tgx
// and 32*warpIdx + tgx. 32*warpIdx + tgx is, again, threadIdx.x.
atomicAdd(&cSh.partFrcX[32*bpos + tgx], sfpX);
atomicAdd(&cSh.partFrcY[32*bpos + tgx], sfpY);
atomicAdd(&cSh.partFrcZ[32*bpos + tgx], sfpZ);
atomicAdd(&cSh.partFrcX[32*tpos + tgx], sftX);
atomicAdd(&cSh.partFrcY[32*tpos + tgx], sftY);
atomicAdd(&cSh.partFrcZ[32*tpos + tgx], sftZ);
// Increment the tile counter
tpos++;
}
// Increment stripe counter
if (tgx == 0) {
bpos = atomicAdd(&cSh.gblbpos[0], -1);
}
bpos = __shfl_sync(0xffffffff, bpos, 0);
}
// Reduce the energy contributions in each warp.
// Add the warp contribution to the global sum.
qq += __shfl_down_sync(0xffffffff, qq, 16);
qq += __shfl_down_sync(0xffffffff, qq, 8);
qq += __shfl_down_sync(0xffffffff, qq, 4);
qq += __shfl_down_sync(0xffffffff, qq, 2);
qq += __shfl_down_sync(0xffffffff, qq, 1);
if (tgx == 0) {
atomicAdd(&cSh.Etot[0], qq);
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(int),
hipHostMallocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
hipMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
hipMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(float),
hipHostMallocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
hipMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
hipMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, j, k, np;
struct timeval timings[4];
gpuInt gpos;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat particleXfrc, particleYfrc, particleZfrc;
gpuFloat etot;
// Start timing
gettimeofday(&timings[0], NULL);
// Create a small array of particles and populate it
const int pdim = 64;
particleXcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleYcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleZcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleXfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleYfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleZfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleCharge = CreateGpuFloat(pdim * pdim * pdim, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(1, 1);
gpos = CreateGpuInt(1, 1);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// Allocate for many particles in a perturbed lattice (to ensure
// that none are going to get too close to one another)
float* xcrd = particleXcoord.HostData;
float* ycrd = particleYcoord.HostData;
float* zcrd = particleZcoord.HostData;
float* qval = particleCharge.HostData;
np = pdim * pdim * pdim;
int prcon = 0;
for (i = 0; i < pdim; i++) {
double di = (double)i + 0.2;
for (j = 0; j < pdim; j++) {
double dj = (double)j + 0.2;
for (k = 0; k < pdim; k++) {
double dk = (double)k + 0.2;
xcrd[prcon] = di + (0.6 * (double)rand() / (double)RAND_MAX);
ycrd[prcon] = dj + (0.6 * (double)rand() / (double)RAND_MAX);
zcrd[prcon] = dk + (0.6 * (double)rand() / (double)RAND_MAX);
qval[prcon] = 0.5 - rand() / (double)RAND_MAX;
prcon++;
}
}
}
// Start timing
gettimeofday(&timings[1], NULL);
// Compute the result on the CPU
printf("Compute the CPU result:\n");
double qqnrg = 0.0;
float* xfrc = particleXfrc.HostData;
float* yfrc = particleYfrc.HostData;
float* zfrc = particleZfrc.HostData;
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
float dx = xcrd[j] - xcrd[i];
float dy = ycrd[j] - ycrd[i];
float dz = zcrd[j] - zcrd[i];
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = qval[i] * qval[j];
float fmag = qfac / (r2 * r);
xfrc[i] -= dx * fmag;
yfrc[i] -= dy * fmag;
zfrc[i] -= dz * fmag;
xfrc[j] += dx * fmag;
yfrc[j] += dy * fmag;
zfrc[j] += dz * fmag;
qqnrg += qfac / r;
}
if ((i & 31) == 0) {
fprintf(stderr, "\rComputing for particle %7d / %7d", i, np);
fflush(stderr);
}
}
printf("\n");
printf("CPU calculated energy = %9.4lf\n", qqnrg);
for (i = 0; i < np; i += np/32) {
printf("CPU force [ %7d ] = %9.4f %9.4f %9.4f\n", i, xfrc[i], yfrc[i],
zfrc[i]);
}
// Wipe the host-side forces clean, just
// to be certain the GPU is solving them
for (i = 0; i < np; i++) {
xfrc[i] = (float)0.0;
yfrc[i] = (float)0.0;
zfrc[i] = (float)0.0;
}
// Start timing
gettimeofday(&timings[2], NULL);
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.gblbpos = gpos.DevcData;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partFrcX = particleXfrc.DevcData;
cnstage.partFrcY = particleYfrc.DevcData;
cnstage.partFrcZ = particleZfrc.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device--note that forces are not getting
// uploaded, as the memory is already allocated. The forces will
// be initialized and computed on the device.
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
hipMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Initialize energy and forces, then run the calculation on the
// GPU. The number of blocks and threads count in each kernel
// must be consistent, as there is a global counter being set in
// the initialization kernel based on the launch bounds.
etot.HostData[0] = 0.0;
UploadGpuFloat(&etot);
int nblocks = 80;
hipLaunchKernelGGL(( InitializeForces), dim3(nblocks), dim3(1024), 0, 0, );
hipLaunchKernelGGL(( ParticleSimulator), dim3(nblocks), dim3(1024), 0, 0, );
// Download the total energy
DownloadGpuFloat(&etot);
DownloadGpuFloat(&particleXfrc);
DownloadGpuFloat(&particleYfrc);
DownloadGpuFloat(&particleZfrc);
// Device synchronization was handled by the download. Print the output.
printf("GPU calculated energy = %10.4f\n", etot.HostData[0]);
for (i = 0; i < np; i += np/32) {
printf("GPU force [ %7d ] = %9.4f %9.4f %9.4f\n", i, xfrc[i], yfrc[i],
zfrc[i]);
}
// Time for GPU execution (including data transfer)
gettimeofday(&timings[3], NULL);
// Report timings
printf("\n");
double tts = timings[1].tv_sec - timings[0].tv_sec +
(1.0e-6)*(timings[1].tv_usec - timings[0].tv_usec);
printf("Setup time :: %10.4f s\n", tts);
tts = timings[2].tv_sec - timings[1].tv_sec +
(1.0e-6)*(timings[2].tv_usec - timings[1].tv_usec);
printf("CPU solver :: %10.4f s\n", tts);
tts = timings[3].tv_sec - timings[2].tv_sec +
(1.0e-6)*(timings[3].tv_usec - timings[2].tv_usec);
printf("GPU kernel :: %10.4f s\n", tts);
return 0;
}
| 26ffca8229279cd28e2cd231e7eee3e920a9e326.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <sys/time.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
int* gblbpos;
float* partX;
float* partY;
float* partZ;
float* partFrcX;
float* partFrcY;
float* partFrcZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// InitializeForces: kernel to set all forces in device memory to zero
//-----------------------------------------------------------------------------
__global__ void InitializeForces()
{
int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
while (i < cSh.nparticle) {
cSh.partFrcX[i] = (float)0.0;
cSh.partFrcY[i] = (float)0.0;
cSh.partFrcZ[i] = (float)0.0;
i += gridDim.x * blockDim.x;
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
int nstripes = (cSh.nparticle + 31) / 32;
cSh.gblbpos[0] = nstripes - (gridDim.x * (blockDim.x / 32)) - 1;
}
}
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
int i;
int warpIdx = threadIdx.x / 32;
int tgx = (threadIdx.x & 31);
// Expanding the earlier method, using a warp counter in global
float qq = (float)0.0;
int nstripes = (cSh.nparticle + 31) / 32;
int bpos = nstripes - (blockIdx.x * (blockDim.x / 32)) - warpIdx - 1;
while (bpos >= 0) {
// Read 32 particles into registers rather than __shared__ memory.
int prtclIdx = 32*bpos + tgx;
float pX, pY, pZ, pQ;
if (prtclIdx < cSh.nparticle) {
pX = cSh.partX[prtclIdx];
pY = cSh.partY[prtclIdx];
pZ = cSh.partZ[prtclIdx];
pQ = cSh.partQ[prtclIdx];
}
else {
pX = (float)10000.0 + (float)(prtclIdx);
pY = (float)10000.0 + (float)(prtclIdx);
pZ = (float)10000.0 + (float)(prtclIdx);
pQ = (float)0.0;
}
// Loop over all particle pairs in the lower half triangle as before
int tpos = 0;
while (tpos <= bpos) {
// Initialize particles as in the outer loop
int prtclIdx = 32*tpos + tgx;
float tX, tY, tZ, tQ;
if (prtclIdx < cSh.nparticle) {
tX = cSh.partX[prtclIdx];
tY = cSh.partY[prtclIdx];
tZ = cSh.partZ[prtclIdx];
tQ = cSh.partQ[prtclIdx];
}
else {
// The offsets for particle positions must run along a different
// (parallel, but distinct) line so that not even dummy particles
// can ever occupy the same positions and cause a divide-by-zero.
// As before, the charge of the dummy particles is zero.
tX = (float)10100.0 + (float)(prtclIdx);
tY = (float)10200.0 + (float)(prtclIdx);
tZ = (float)10300.0 + (float)(prtclIdx);
tQ = (float)0.0;
}
// Initialize tile force accumulators
float sfpX = (float)0.0;
float sfpY = (float)0.0;
float sfpZ = (float)0.0;
float sftX = (float)0.0;
float sftY = (float)0.0;
float sftZ = (float)0.0;
// Indexing gets a bit more complex. Again, if we are on a
// diagonal tile skip the first iteration of the loop, as
// boths sets of 32 particles are the same.
int imin = (bpos == tpos);
float anti2xCountingFactor = (bpos == tpos) ? (float)0.5 : (float)1.0;
for (i = imin; i < 32; i++) {
// Find the thread to query
int j = tgx + i;
j -= (j >= 32) * 32;
// Compute the interaction
float dx = __shfl_sync(0xffffffff, tX, j) - pX;
float dy = __shfl_sync(0xffffffff, tY, j) - pY;
float dz = __shfl_sync(0xffffffff, tZ, j) - pZ;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = anti2xCountingFactor *
__shfl_sync(0xffffffff, tQ, j) * pQ;
qq += qfac / sqrt(r2);
// Log the interaction on this thread
float fmag = qfac / (r2 * r);
float fx = dx * fmag;
float fy = dy * fmag;
float fz = dz * fmag;
sfpX -= fx;
sfpY -= fy;
sfpZ -= fz;
// Find the other thread that queried this one.
// __shfl_sync contains a warp synchronization
// instruction, so no __syncwarp() is needed.
int k = tgx - i;
k += (k < 0) * 32;
sftX += __shfl_sync(0xffffffff, fx, k);
sftY += __shfl_sync(0xffffffff, fy, k);
sftZ += __shfl_sync(0xffffffff, fz, k);
}
// Contribute the tile force accumulations atomically to global memory
// (DRAM). This is only about 2x slower than atomic accumulation to
// __shared__. Accumulating things like this atomically to __shared__
// would make the kernel run only about 30% slower than accumulating
// them in an unsafe manner, willy-nilly. Fast atomics to global are
// a tremendous accomplishment by NVIDIA engineers!
//
// Note, the correspondence between 32*bpos + tgx or 32*tpos + tgx
// and 32*warpIdx + tgx. 32*warpIdx + tgx is, again, threadIdx.x.
atomicAdd(&cSh.partFrcX[32*bpos + tgx], sfpX);
atomicAdd(&cSh.partFrcY[32*bpos + tgx], sfpY);
atomicAdd(&cSh.partFrcZ[32*bpos + tgx], sfpZ);
atomicAdd(&cSh.partFrcX[32*tpos + tgx], sftX);
atomicAdd(&cSh.partFrcY[32*tpos + tgx], sftY);
atomicAdd(&cSh.partFrcZ[32*tpos + tgx], sftZ);
// Increment the tile counter
tpos++;
}
// Increment stripe counter
if (tgx == 0) {
bpos = atomicAdd(&cSh.gblbpos[0], -1);
}
bpos = __shfl_sync(0xffffffff, bpos, 0);
}
// Reduce the energy contributions in each warp.
// Add the warp contribution to the global sum.
qq += __shfl_down_sync(0xffffffff, qq, 16);
qq += __shfl_down_sync(0xffffffff, qq, 8);
qq += __shfl_down_sync(0xffffffff, qq, 4);
qq += __shfl_down_sync(0xffffffff, qq, 2);
qq += __shfl_down_sync(0xffffffff, qq, 1);
if (tgx == 0) {
atomicAdd(&cSh.Etot[0], qq);
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, j, k, np;
struct timeval timings[4];
gpuInt gpos;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat particleXfrc, particleYfrc, particleZfrc;
gpuFloat etot;
// Start timing
gettimeofday(&timings[0], NULL);
// Create a small array of particles and populate it
const int pdim = 64;
particleXcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleYcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleZcoord = CreateGpuFloat(pdim * pdim * pdim, 1);
particleXfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleYfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleZfrc = CreateGpuFloat(pdim * pdim * pdim, 1);
particleCharge = CreateGpuFloat(pdim * pdim * pdim, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(1, 1);
gpos = CreateGpuInt(1, 1);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// Allocate for many particles in a perturbed lattice (to ensure
// that none are going to get too close to one another)
float* xcrd = particleXcoord.HostData;
float* ycrd = particleYcoord.HostData;
float* zcrd = particleZcoord.HostData;
float* qval = particleCharge.HostData;
np = pdim * pdim * pdim;
int prcon = 0;
for (i = 0; i < pdim; i++) {
double di = (double)i + 0.2;
for (j = 0; j < pdim; j++) {
double dj = (double)j + 0.2;
for (k = 0; k < pdim; k++) {
double dk = (double)k + 0.2;
xcrd[prcon] = di + (0.6 * (double)rand() / (double)RAND_MAX);
ycrd[prcon] = dj + (0.6 * (double)rand() / (double)RAND_MAX);
zcrd[prcon] = dk + (0.6 * (double)rand() / (double)RAND_MAX);
qval[prcon] = 0.5 - rand() / (double)RAND_MAX;
prcon++;
}
}
}
// Start timing
gettimeofday(&timings[1], NULL);
// Compute the result on the CPU
printf("Compute the CPU result:\n");
double qqnrg = 0.0;
float* xfrc = particleXfrc.HostData;
float* yfrc = particleYfrc.HostData;
float* zfrc = particleZfrc.HostData;
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
float dx = xcrd[j] - xcrd[i];
float dy = ycrd[j] - ycrd[i];
float dz = zcrd[j] - zcrd[i];
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = qval[i] * qval[j];
float fmag = qfac / (r2 * r);
xfrc[i] -= dx * fmag;
yfrc[i] -= dy * fmag;
zfrc[i] -= dz * fmag;
xfrc[j] += dx * fmag;
yfrc[j] += dy * fmag;
zfrc[j] += dz * fmag;
qqnrg += qfac / r;
}
if ((i & 31) == 0) {
fprintf(stderr, "\rComputing for particle %7d / %7d", i, np);
fflush(stderr);
}
}
printf("\n");
printf("CPU calculated energy = %9.4lf\n", qqnrg);
for (i = 0; i < np; i += np/32) {
printf("CPU force [ %7d ] = %9.4f %9.4f %9.4f\n", i, xfrc[i], yfrc[i],
zfrc[i]);
}
// Wipe the host-side forces clean, just
// to be certain the GPU is solving them
for (i = 0; i < np; i++) {
xfrc[i] = (float)0.0;
yfrc[i] = (float)0.0;
zfrc[i] = (float)0.0;
}
// Start timing
gettimeofday(&timings[2], NULL);
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.gblbpos = gpos.DevcData;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partFrcX = particleXfrc.DevcData;
cnstage.partFrcY = particleYfrc.DevcData;
cnstage.partFrcZ = particleZfrc.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device--note that forces are not getting
// uploaded, as the memory is already allocated. The forces will
// be initialized and computed on the device.
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Initialize energy and forces, then run the calculation on the
// GPU. The number of blocks and threads count in each kernel
// must be consistent, as there is a global counter being set in
// the initialization kernel based on the launch bounds.
etot.HostData[0] = 0.0;
UploadGpuFloat(&etot);
int nblocks = 80;
InitializeForces<<<nblocks, 1024>>>();
ParticleSimulator<<<nblocks, 1024>>>();
// Download the total energy
DownloadGpuFloat(&etot);
DownloadGpuFloat(&particleXfrc);
DownloadGpuFloat(&particleYfrc);
DownloadGpuFloat(&particleZfrc);
// Device synchronization was handled by the download. Print the output.
printf("GPU calculated energy = %10.4f\n", etot.HostData[0]);
for (i = 0; i < np; i += np/32) {
printf("GPU force [ %7d ] = %9.4f %9.4f %9.4f\n", i, xfrc[i], yfrc[i],
zfrc[i]);
}
// Time for GPU execution (including data transfer)
gettimeofday(&timings[3], NULL);
// Report timings
printf("\n");
double tts = timings[1].tv_sec - timings[0].tv_sec +
(1.0e-6)*(timings[1].tv_usec - timings[0].tv_usec);
printf("Setup time :: %10.4f s\n", tts);
tts = timings[2].tv_sec - timings[1].tv_sec +
(1.0e-6)*(timings[2].tv_usec - timings[1].tv_usec);
printf("CPU solver :: %10.4f s\n", tts);
tts = timings[3].tv_sec - timings[2].tv_sec +
(1.0e-6)*(timings[3].tv_usec - timings[2].tv_usec);
printf("GPU kernel :: %10.4f s\n", tts);
return 0;
}
|
dce38eb4ac7f6f258e6031c6a45f59153ee32c9b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <algorithm>
#include <hipcub/hipcub.hpp>
#include <iostream>
#include <utility>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/update.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.hpp"
#include "HugeCTR/include/shuffle/shuffle.cuh"
#include "HugeCTR/include/tensor2.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace hybrid_embedding {
namespace infrequent_embedding_kernels {
template <typename dtype, typename emtype>
__global__ void hier_update_model(
const uint32_t* __restrict__ model_indices, const uint32_t* __restrict__ model_indices_offsets,
const dtype* __restrict__ samples, const dtype* __restrict__ category_location,
const emtype* __restrict__ gradients, float* __restrict__ embedding_vectors,
uint32_t embedding_vec_size, uint32_t num_instances, uint32_t local_samples_size,
uint32_t local_comm_buff_size, const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
const uint32_t num_indices = model_indices_offsets[num_instances];
// Load offset only when the network_id changes
uint32_t previous_network_id = 0;
uint32_t offset = 0;
for (uint32_t i = blockIdx.x; i < num_indices; i += gridDim.x) {
uint32_t index = model_indices[i];
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
uint32_t network_id = index / local_samples_size;
if (network_id != previous_network_id) {
offset = model_indices_offsets[network_id];
previous_network_id = network_id;
}
atomicAdd(
embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[embedding_vec_size * (network_id * local_comm_buff_size + i - offset) +
threadIdx.x]));
}
}
template <typename dtype, typename emtype>
__global__ void infrequent_update_model_direct(
const emtype* const* __restrict__ gradients_pointers, float* embedding_vectors,
const uint32_t* __restrict__ model_indices, const uint32_t* __restrict__ model_indices_offsets,
const dtype* __restrict__ samples, const dtype* __restrict__ category_location,
uint32_t num_instances, uint32_t model_id, uint32_t embedding_vec_size,
uint32_t local_samples_size, const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
// Shift pattern
const uint32_t offset = __ldg(model_indices_offsets + model_id + 1);
const uint32_t num_model_indices = __ldg(model_indices_offsets + num_instances);
for (uint32_t i = blockIdx.x; i < num_model_indices; i += gridDim.x) {
uint32_t vid = (i + offset) % num_model_indices;
uint32_t index = model_indices[vid];
uint32_t network_id = index / local_samples_size;
uint32_t local_index = index % local_samples_size;
dtype category = samples[index];
uint32_t location = category_location[2 * category + 1];
const emtype* gradients = gradients_pointers[network_id];
atomicAdd(embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[local_index * embedding_vec_size + threadIdx.x]));
}
}
template <typename dtype>
__global__ void calculate_network_indices_mask(const dtype* __restrict__ local_samples,
const dtype* __restrict__ category_location,
bool* mask, uint32_t local_samples_size,
uint32_t num_instances) {
for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < local_samples_size;
i += gridDim.x * blockDim.x) {
dtype category = local_samples[i];
uint32_t model_id = static_cast<uint32_t>(category_location[2 * category]);
for (uint32_t section_id = 0; section_id < num_instances; section_id++) {
mask[local_samples_size * section_id + i] = (model_id == section_id);
}
}
}
} // namespace infrequent_embedding_kernels
template <typename dtype, typename emtype>
InfrequentEmbedding<dtype, emtype>::InfrequentEmbedding(const Data<dtype>& data_train,
const Data<dtype>& data_evaluate,
const Model<dtype>& model,
const GPUResource& gpu_resource,
uint32_t embedding_vec_size)
: model_(model),
data_train_(data_train),
data_evaluate_(data_evaluate),
data_(data_train), // Temporary
gpu_resource(gpu_resource),
embedding_vec_size_(embedding_vec_size) {
auto buf = GeneralBuffer2<CudaAllocator>::create();
auto managed_buf = GeneralBuffer2<CudaManagedAllocator>::create();
size_t universe_batch_size = ::max(data_train.batch_size, data_evaluate.batch_size);
buf->reserve({ceildiv<size_t>(model.num_categories, model.num_instances), embedding_vec_size_},
&infrequent_embedding_vectors_);
buf->reserve({universe_batch_size, data_train.table_sizes.size()}, &model_indices_);
managed_buf->reserve({model.num_instances + 1, 1}, &model_indices_offsets_);
buf->reserve({model_.num_instances}, &model_indices_sizes_);
buf->reserve({model_.num_instances},
&model_indices_sizes_ptrs_); // TODO: should be local instances
buf->reserve(
{ceildiv<size_t>(universe_batch_size, model.num_instances), data_train.table_sizes.size()},
&network_indices_);
managed_buf->reserve({model.num_instances + 1, 1}, &network_indices_offsets_);
buf->reserve({model_.num_instances}, &network_indices_sizes_);
buf->reserve({model_.num_instances}, &network_indices_sizes_ptrs_);
// Temporary storage
calculate_model_indices_temp_storage_bytes();
calculate_network_indices_temp_storage_bytes();
buf->reserve({model_indices_temp_storage_bytes, 1}, &model_indices_temp_storage_);
buf->reserve({network_indices_temp_storage_bytes, 1}, &network_indices_temp_storage_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_train_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_eval_);
buf->reserve({model.num_instances, 1}, &gradients_pointers_);
buf->allocate();
managed_buf->allocate();
int current_device;
CK_CUDA_THROW_(hipGetDevice(¤t_device));
CK_CUDA_THROW_(hipMemAdvise(managed_buf->get_ptr(), managed_buf->get_size_in_bytes(),
hipMemAdviseSetReadMostly, current_device));
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::initialize_embedding_vectors() {
CudaDeviceContext context(gpu_resource.get_device_id());
const size_t num_tables = data_.table_sizes.size();
for (size_t i = 0; i < num_tables; i++) {
float up_bound = sqrt(1.f / data_.table_sizes[i]);
const size_t offset = embedding_vec_size_ * model_.h_infrequent_model_table_offsets[i];
const size_t number_of_vectors =
model_.h_infrequent_model_table_offsets[i + 1] - model_.h_infrequent_model_table_offsets[i];
UniformGenerator::fill(
infrequent_embedding_vectors_.get_ptr() + offset, embedding_vec_size_ * number_of_vectors,
-up_bound, up_bound, gpu_resource.get_sm_count(),
gpu_resource.get_replica_variant_curand_generator(), gpu_resource.get_stream());
}
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_model(emtype* message_buffer,
hipStream_t stream) {
auto model_indices = model_indices_.get_ptr();
auto samples = data_.samples.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t index = model_indices[i];
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
return {infrequent_embedding_vectors + location * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_forward_model(emtype** message_buffer,
hipStream_t stream) {
auto model_indices = model_indices_.get_ptr();
auto model_indices_offsets = model_indices_offsets_.get_ptr();
auto samples = data_.samples.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets_.get_ptr() + num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t num_selected = model_indices_offsets[num_instances];
uint32_t vid = (i + model_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = model_indices[vid];
uint32_t network_id = (index / local_samples_size);
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
uint32_t local_network_id = (network_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_network_id][(network_id - local_network_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {infrequent_embedding_vectors + location * embedding_vec_size,
{output_ptr + (vid - model_indices_offsets[network_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
hipStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
return {message_buffer + i * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
hipStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto network_indices_offsets = network_indices_offsets_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, model_.num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
// Find model id and offset
uint32_t model_id = 0;
uint32_t offset = 0;
uint32_t next_offset = network_indices_offsets[1];
while (next_offset <= i) {
offset = next_offset;
model_id++;
next_offset = network_indices_offsets[model_id + 1];
}
return {
message_buffer + (model_id * local_comm_buff_size + i - offset) * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
/** Forward network for single GPU (no communications) */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network_direct(bool is_train,
hipStream_t stream) {
const uint32_t num_instances = model_.num_instances;
const uint32_t model_id = model_.global_instance_id;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
auto interaction_layer_input_pointers = is_train
? interaction_layer_input_pointers_train_.get_ptr()
: interaction_layer_input_pointers_eval_.get_ptr();
auto model_indices = model_indices_.get_ptr();
auto model_indices_offsets = model_indices_offsets_.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto samples = data_.samples.get_ptr();
auto model_table = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets + num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
const uint32_t offset = model_indices_offsets[model_id + 1];
const uint32_t num_model_indices = model_indices_offsets[num_instances];
const uint32_t vid = (i + offset) % num_model_indices;
const uint32_t index = model_indices[vid];
const dtype category = samples[index];
const dtype location = category_location[2 * category + 1];
const uint32_t network_id = index / local_samples_size;
const uint32_t local_index = index % local_samples_size;
emtype* interaction_layer_input = interaction_layer_input_pointers[network_id];
return {model_table + location * embedding_vec_size,
{interaction_layer_input + local_index * embedding_vec_size},
{true}};
});
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.start", stream, false);
shuffle(copy_desc, stream, local_samples_size / 10);
CK_CUDA_THROW_(hipPeekAtLastError());
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_network(const emtype* gradients,
emtype* message_buffer,
hipStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
return {gradients + index * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_update_network(const emtype* gradients,
emtype** message_buffer,
hipStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto network_indices_offsets = network_indices_offsets_.get_ptr();
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t num_selected = network_indices_offsets[num_instances];
uint32_t vid = (i + network_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = network_indices[vid];
uint32_t model_id;
for (model_id = 0; model_id < num_instances && network_indices_offsets[model_id + 1] <= vid;
model_id++)
;
uint32_t local_model_id = (model_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_model_id][(model_id - local_model_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {gradients + index * embedding_vec_size,
{output_ptr + (vid - network_indices_offsets[model_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(hipPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model(const emtype* message_buffer,
float* dev_lr, float scale,
hipStream_t stream) {
const uint32_t* __restrict__ model_indices = model_indices_.get_ptr();
const dtype* __restrict__ samples = data_.samples.get_ptr();
const dtype* __restrict__ category_location = model_.category_location.get_ptr();
uint32_t n_blocks = gpu_resource.get_sm_count();
sgd_atomic_update(message_buffer, infrequent_embedding_vectors_.get_ptr(),
model_indices_offsets_.get_ptr() + model_.num_instances,
[model_indices, samples, category_location] __device__(uint32_t i) {
uint32_t index = model_indices[i];
dtype category = samples[index];
return category_location[2 * category + 1];
},
n_blocks, embedding_vec_size_, dev_lr, scale, stream);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_update_model(const emtype* message_buffer,
float* dev_lr, float scale,
hipStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
const uint32_t* __restrict__ model_indices = model_indices_.get_ptr();
const dtype* __restrict__ samples = data_.samples.get_ptr();
const dtype* __restrict__ category_location = model_.category_location.get_ptr();
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
hipLaunchKernelGGL(( infrequent_embedding_kernels::hier_update_model), dim3(n_blocks), dim3(embedding_vec_size_), 0, stream,
model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), data_.samples.get_ptr(),
model_.category_location.get_ptr(), message_buffer, infrequent_embedding_vectors_.get_ptr(),
embedding_vec_size_, num_instances, local_samples_size, local_comm_buff_size, dev_lr, scale);
CK_CUDA_THROW_(hipPeekAtLastError());
}
/** Update model for single GPU (no communications), lr is a device variable */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model_direct(float* dev_lr, float scale,
hipStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
/* Each model reads from the gradients of each network */
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.start", stream, false);
infrequent_embedding_kernels::
hipLaunchKernelGGL(( infrequent_update_model_direct), dim3(n_blocks), dim3(embedding_vec_size_), 0, stream,
gradients_pointers_.get_ptr(), infrequent_embedding_vectors_.get_ptr(),
model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), data_.samples.get_ptr(),
model_.category_location.get_ptr(), model_.num_instances, model_.global_instance_id,
embedding_vec_size_, local_samples_size, dev_lr, scale);
CK_CUDA_THROW_(hipPeekAtLastError());
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.stop", stream, false);
}
template <typename dtype>
struct ModelIndicesSelectOp {
const dtype* samples;
const dtype* category_location;
uint32_t my_model_id;
__host__ __device__ __forceinline__ ModelIndicesSelectOp(const dtype* samples,
const dtype* category_location,
uint32_t my_model_id)
: samples(samples), category_location(category_location), my_model_id(my_model_id) {}
__device__ __forceinline__ bool operator()(const uint32_t& idx) const {
dtype category = __ldg(samples + idx);
dtype model_id = __ldg(category_location + 2 * category);
return model_id == my_model_id;
}
};
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices_temp_storage_bytes() {
size_t max_batch_size = ::max(data_train_.batch_size, data_evaluate_.batch_size);
hipcub::CountingInputIterator<uint32_t> counting(0);
ModelIndicesSelectOp<dtype> select_op(nullptr, nullptr, 0);
hipcub::DeviceSelect::If(nullptr, model_indices_temp_storage_bytes, counting, (uint32_t*)nullptr,
(uint32_t*)nullptr, max_batch_size * data_.table_sizes.size(), select_op,
0);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices(hipStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
size_t local_batch_size = ceildiv<size_t>(data_.batch_size, num_instances);
// Select indices of infrequent categories belonging to this model
hipcub::CountingInputIterator<uint32_t> counting(0);
ModelIndicesSelectOp<dtype> select_op(data_.samples.get_ptr(), model_.category_location.get_ptr(),
model_.global_instance_id);
PROFILE_RECORD("inf_calculate_model_indices.device_select_if.start", stream, false);
hipcub::DeviceSelect::If(reinterpret_cast<void*>(model_indices_temp_storage_.get_ptr()),
model_indices_temp_storage_bytes, counting, model_indices_.get_ptr(),
model_indices_offsets_.get_ptr() + num_instances,
data_.batch_size * data_.table_sizes.size(), select_op, stream);
PROFILE_RECORD("inf_calculate_model_indices.device_select_if.stop", stream, false);
// Compute offsets
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(num_instances, TPB);
PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.start", stream, false);
hipLaunchKernelGGL(( offsets_kernel), dim3(n_blocks), dim3(TPB), 0, stream, model_indices_.get_ptr(),
model_indices_offsets_.get_ptr(), num_instances,
local_batch_size * data_.table_sizes.size());
PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.stop", stream, false);
CK_CUDA_THROW_(hipPeekAtLastError());
}
static __global__ void offsets_to_sizes(size_t* sizes, uint32_t* offsets, size_t element_size,
uint32_t num_instances) {
for (int t = blockIdx.x * blockDim.x + threadIdx.x; t < num_instances;
t += gridDim.x * blockDim.x) {
sizes[t] = (offsets[t + 1] - offsets[t]) * element_size;
}
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices_sizes_from_offsets(
hipStream_t stream) {
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
hipLaunchKernelGGL(( offsets_to_sizes), dim3(n_blocks), dim3(TPB), 0, stream,
model_indices_sizes_.get_ptr(), model_indices_offsets_.get_ptr(),
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices_temp_storage_bytes() {
size_t max_batch_size = ::max(data_train_.batch_size, data_evaluate_.batch_size);
const uint32_t num_instances = model_.num_instances;
uint32_t samples_size = max_batch_size * data_.table_sizes.size();
uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances);
// Calculate select bytes
size_t select_bytes = 0;
hipcub::CountingInputIterator<uint32_t> counting(0);
hipcub::DeviceSelect::Flagged(nullptr, select_bytes, counting, (bool*)nullptr, (uint32_t*)nullptr,
(uint32_t*)nullptr, samples_size, 0);
// Total size
constexpr uint32_t align = 256;
network_indices_temp_storage_bytes =
alignTo<size_t>(sizeof(bool) * samples_size, align) + select_bytes;
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices(hipStream_t stream) {
const uint32_t num_instances = model_.num_instances;
uint32_t samples_size = data_.batch_size * data_.table_sizes.size();
uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances);
// Temporary storage
constexpr uint32_t align = 256;
char* scratch_ptr = network_indices_temp_storage_.get_ptr();
size_t scratch_offset = 0;
bool* d_mask = reinterpret_cast<bool*>(scratch_ptr + scratch_offset);
scratch_offset += alignTo<size_t>(sizeof(bool) * samples_size, align);
void* d_temp_storage = reinterpret_cast<void*>(scratch_ptr + scratch_offset);
size_t temp_storage_bytes = network_indices_temp_storage_bytes - scratch_offset;
// Compute mask (for each source GPU, whether each element in the batch is located there)
constexpr uint32_t TPB_mask = 256;
uint32_t n_blocks_mask = ceildiv<uint32_t>(local_samples_size, TPB_mask);
PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.start", stream,
false);
infrequent_embedding_kernels::
hipLaunchKernelGGL(( calculate_network_indices_mask), dim3(n_blocks_mask), dim3(TPB_mask), 0, stream,
data_.samples.get_ptr() + model_.global_instance_id * local_samples_size,
model_.category_location.get_ptr(), d_mask, local_samples_size, num_instances);
CK_CUDA_THROW_(hipPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.stop", stream,
false);
// Select indices according to the mask
hipcub::CountingInputIterator<uint32_t> counting(0);
PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.start", stream, false);
hipcub::DeviceSelect::Flagged(
d_temp_storage, temp_storage_bytes, counting, d_mask, network_indices_.get_ptr(),
network_indices_offsets_.get_ptr() + num_instances, samples_size, stream);
PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.stop", stream, false);
// Compute offsets
constexpr uint32_t TPB_offsets = 256;
uint32_t n_blocks_offsets = ceildiv<uint32_t>(num_instances, TPB_offsets);
PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.start", stream, false);
hipLaunchKernelGGL(( offsets_kernel), dim3(n_blocks_offsets), dim3(TPB_offsets), 0, stream, network_indices_.get_ptr(),
network_indices_offsets_.get_ptr(),
num_instances, local_samples_size);
CK_CUDA_THROW_(hipPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.stop", stream, false);
// Re-map indices between 0 and local_samples_size - 1
uint32_t TPB_remap = 256;
uint32_t n_blocks_remap = gpu_resource.get_sm_count();
PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.start", stream, false);
hipLaunchKernelGGL(( modulo_kernel), dim3(n_blocks_remap), dim3(TPB_remap), 0, stream,
network_indices_.get_ptr(), network_indices_offsets_.get_ptr() + num_instances,
local_samples_size);
CK_CUDA_THROW_(hipPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices_sizes_from_offsets(
hipStream_t stream) {
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
hipLaunchKernelGGL(( offsets_to_sizes), dim3(n_blocks), dim3(TPB), 0, stream,
network_indices_sizes_.get_ptr(), network_indices_offsets_.get_ptr(),
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template class InfrequentEmbedding<uint32_t, __half>;
template class InfrequentEmbedding<uint32_t, float>;
template class InfrequentEmbedding<long long, __half>;
template class InfrequentEmbedding<long long, float>;
} // namespace hybrid_embedding
} // namespace HugeCTR
| dce38eb4ac7f6f258e6031c6a45f59153ee32c9b.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <algorithm>
#include <cub/cub.cuh>
#include <iostream>
#include <utility>
#include <vector>
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/data_simulator.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/data.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/infrequent_embedding.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/model.hpp"
#include "HugeCTR/include/embeddings/hybrid_embedding/update.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.cuh"
#include "HugeCTR/include/embeddings/hybrid_embedding/utils.hpp"
#include "HugeCTR/include/shuffle/shuffle.cuh"
#include "HugeCTR/include/tensor2.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace hybrid_embedding {
namespace infrequent_embedding_kernels {
template <typename dtype, typename emtype>
__global__ void hier_update_model(
const uint32_t* __restrict__ model_indices, const uint32_t* __restrict__ model_indices_offsets,
const dtype* __restrict__ samples, const dtype* __restrict__ category_location,
const emtype* __restrict__ gradients, float* __restrict__ embedding_vectors,
uint32_t embedding_vec_size, uint32_t num_instances, uint32_t local_samples_size,
uint32_t local_comm_buff_size, const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
const uint32_t num_indices = model_indices_offsets[num_instances];
// Load offset only when the network_id changes
uint32_t previous_network_id = 0;
uint32_t offset = 0;
for (uint32_t i = blockIdx.x; i < num_indices; i += gridDim.x) {
uint32_t index = model_indices[i];
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
uint32_t network_id = index / local_samples_size;
if (network_id != previous_network_id) {
offset = model_indices_offsets[network_id];
previous_network_id = network_id;
}
atomicAdd(
embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[embedding_vec_size * (network_id * local_comm_buff_size + i - offset) +
threadIdx.x]));
}
}
template <typename dtype, typename emtype>
__global__ void infrequent_update_model_direct(
const emtype* const* __restrict__ gradients_pointers, float* embedding_vectors,
const uint32_t* __restrict__ model_indices, const uint32_t* __restrict__ model_indices_offsets,
const dtype* __restrict__ samples, const dtype* __restrict__ category_location,
uint32_t num_instances, uint32_t model_id, uint32_t embedding_vec_size,
uint32_t local_samples_size, const float* __restrict__ lr_ptr, const float scale) {
float lr = __ldg(lr_ptr) / scale;
// Shift pattern
const uint32_t offset = __ldg(model_indices_offsets + model_id + 1);
const uint32_t num_model_indices = __ldg(model_indices_offsets + num_instances);
for (uint32_t i = blockIdx.x; i < num_model_indices; i += gridDim.x) {
uint32_t vid = (i + offset) % num_model_indices;
uint32_t index = model_indices[vid];
uint32_t network_id = index / local_samples_size;
uint32_t local_index = index % local_samples_size;
dtype category = samples[index];
uint32_t location = category_location[2 * category + 1];
const emtype* gradients = gradients_pointers[network_id];
atomicAdd(embedding_vectors + location * embedding_vec_size + threadIdx.x,
-lr * TypeConvertFunc<float, emtype>::convert(
gradients[local_index * embedding_vec_size + threadIdx.x]));
}
}
template <typename dtype>
__global__ void calculate_network_indices_mask(const dtype* __restrict__ local_samples,
const dtype* __restrict__ category_location,
bool* mask, uint32_t local_samples_size,
uint32_t num_instances) {
for (uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < local_samples_size;
i += gridDim.x * blockDim.x) {
dtype category = local_samples[i];
uint32_t model_id = static_cast<uint32_t>(category_location[2 * category]);
for (uint32_t section_id = 0; section_id < num_instances; section_id++) {
mask[local_samples_size * section_id + i] = (model_id == section_id);
}
}
}
} // namespace infrequent_embedding_kernels
template <typename dtype, typename emtype>
InfrequentEmbedding<dtype, emtype>::InfrequentEmbedding(const Data<dtype>& data_train,
const Data<dtype>& data_evaluate,
const Model<dtype>& model,
const GPUResource& gpu_resource,
uint32_t embedding_vec_size)
: model_(model),
data_train_(data_train),
data_evaluate_(data_evaluate),
data_(data_train), // Temporary
gpu_resource(gpu_resource),
embedding_vec_size_(embedding_vec_size) {
auto buf = GeneralBuffer2<CudaAllocator>::create();
auto managed_buf = GeneralBuffer2<CudaManagedAllocator>::create();
size_t universe_batch_size = std::max(data_train.batch_size, data_evaluate.batch_size);
buf->reserve({ceildiv<size_t>(model.num_categories, model.num_instances), embedding_vec_size_},
&infrequent_embedding_vectors_);
buf->reserve({universe_batch_size, data_train.table_sizes.size()}, &model_indices_);
managed_buf->reserve({model.num_instances + 1, 1}, &model_indices_offsets_);
buf->reserve({model_.num_instances}, &model_indices_sizes_);
buf->reserve({model_.num_instances},
&model_indices_sizes_ptrs_); // TODO: should be local instances
buf->reserve(
{ceildiv<size_t>(universe_batch_size, model.num_instances), data_train.table_sizes.size()},
&network_indices_);
managed_buf->reserve({model.num_instances + 1, 1}, &network_indices_offsets_);
buf->reserve({model_.num_instances}, &network_indices_sizes_);
buf->reserve({model_.num_instances}, &network_indices_sizes_ptrs_);
// Temporary storage
calculate_model_indices_temp_storage_bytes();
calculate_network_indices_temp_storage_bytes();
buf->reserve({model_indices_temp_storage_bytes, 1}, &model_indices_temp_storage_);
buf->reserve({network_indices_temp_storage_bytes, 1}, &network_indices_temp_storage_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_train_);
buf->reserve({model.num_instances, 1}, &interaction_layer_input_pointers_eval_);
buf->reserve({model.num_instances, 1}, &gradients_pointers_);
buf->allocate();
managed_buf->allocate();
int current_device;
CK_CUDA_THROW_(cudaGetDevice(¤t_device));
CK_CUDA_THROW_(cudaMemAdvise(managed_buf->get_ptr(), managed_buf->get_size_in_bytes(),
cudaMemAdviseSetReadMostly, current_device));
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::initialize_embedding_vectors() {
CudaDeviceContext context(gpu_resource.get_device_id());
const size_t num_tables = data_.table_sizes.size();
for (size_t i = 0; i < num_tables; i++) {
float up_bound = sqrt(1.f / data_.table_sizes[i]);
const size_t offset = embedding_vec_size_ * model_.h_infrequent_model_table_offsets[i];
const size_t number_of_vectors =
model_.h_infrequent_model_table_offsets[i + 1] - model_.h_infrequent_model_table_offsets[i];
UniformGenerator::fill(
infrequent_embedding_vectors_.get_ptr() + offset, embedding_vec_size_ * number_of_vectors,
-up_bound, up_bound, gpu_resource.get_sm_count(),
gpu_resource.get_replica_variant_curand_generator(), gpu_resource.get_stream());
}
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_model(emtype* message_buffer,
cudaStream_t stream) {
auto model_indices = model_indices_.get_ptr();
auto samples = data_.samples.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t index = model_indices[i];
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
return {infrequent_embedding_vectors + location * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_forward_model(emtype** message_buffer,
cudaStream_t stream) {
auto model_indices = model_indices_.get_ptr();
auto model_indices_offsets = model_indices_offsets_.get_ptr();
auto samples = data_.samples.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto infrequent_embedding_vectors = infrequent_embedding_vectors_.get_ptr();
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets_.get_ptr() + num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
uint32_t num_selected = model_indices_offsets[num_instances];
uint32_t vid = (i + model_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = model_indices[vid];
uint32_t network_id = (index / local_samples_size);
dtype category = samples[index];
dtype location = category_location[2 * category + 1];
uint32_t local_network_id = (network_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_network_id][(network_id - local_network_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {infrequent_embedding_vectors + location * embedding_vec_size,
{output_ptr + (vid - model_indices_offsets[network_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
cudaStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
return {message_buffer + i * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_forward_network(const emtype* message_buffer,
emtype* interaction_layer_input,
cudaStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto network_indices_offsets = network_indices_offsets_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, model_.num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
// Find model id and offset
uint32_t model_id = 0;
uint32_t offset = 0;
uint32_t next_offset = network_indices_offsets[1];
while (next_offset <= i) {
offset = next_offset;
model_id++;
next_offset = network_indices_offsets[model_id + 1];
}
return {
message_buffer + (model_id * local_comm_buff_size + i - offset) * embedding_vec_size,
{interaction_layer_input + index * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
/** Forward network for single GPU (no communications) */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::forward_network_direct(bool is_train,
cudaStream_t stream) {
const uint32_t num_instances = model_.num_instances;
const uint32_t model_id = model_.global_instance_id;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
auto interaction_layer_input_pointers = is_train
? interaction_layer_input_pointers_train_.get_ptr()
: interaction_layer_input_pointers_eval_.get_ptr();
auto model_indices = model_indices_.get_ptr();
auto model_indices_offsets = model_indices_offsets_.get_ptr();
auto category_location = model_.category_location.get_ptr();
auto samples = data_.samples.get_ptr();
auto model_table = infrequent_embedding_vectors_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<float, emtype, 1>(
model_indices_offsets + num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<float, emtype, 1> {
const uint32_t offset = model_indices_offsets[model_id + 1];
const uint32_t num_model_indices = model_indices_offsets[num_instances];
const uint32_t vid = (i + offset) % num_model_indices;
const uint32_t index = model_indices[vid];
const dtype category = samples[index];
const dtype location = category_location[2 * category + 1];
const uint32_t network_id = index / local_samples_size;
const uint32_t local_index = index % local_samples_size;
emtype* interaction_layer_input = interaction_layer_input_pointers[network_id];
return {model_table + location * embedding_vec_size,
{interaction_layer_input + local_index * embedding_vec_size},
{true}};
});
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.start", stream, false);
shuffle(copy_desc, stream, local_samples_size / 10);
CK_CUDA_THROW_(cudaPeekAtLastError());
PROFILE_RECORD("inf_forward_network_direct.forward_network_direct.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_network(const emtype* gradients,
emtype* message_buffer,
cudaStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto embedding_vec_size = embedding_vec_size_;
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t index = network_indices[i];
return {gradients + index * embedding_vec_size,
{message_buffer + i * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::fused_intra_update_network(const emtype* gradients,
emtype** message_buffer,
cudaStream_t stream) {
auto network_indices = network_indices_.get_ptr();
auto network_indices_offsets = network_indices_offsets_.get_ptr();
size_t embedding_vec_size = embedding_vec_size_;
auto local_instance_id = model_.instance_id;
auto num_instances = model_.num_instances;
auto per_node_instances = num_instances / model_.h_num_instances_per_node.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
auto copy_desc = CopyDescriptors::make_OneToOne<emtype, emtype, 1>(
network_indices_offsets_.get_ptr() + model_.num_instances, embedding_vec_size,
[=] __device__(size_t i) -> CopyDescriptors::CopyDetails<emtype, emtype, 1> {
uint32_t num_selected = network_indices_offsets[num_instances];
uint32_t vid = (i + network_indices_offsets[(local_instance_id + 1) % per_node_instances]) %
num_selected;
uint32_t index = network_indices[vid];
uint32_t model_id;
for (model_id = 0; model_id < num_instances && network_indices_offsets[model_id + 1] <= vid;
model_id++)
;
uint32_t local_model_id = (model_id % per_node_instances);
emtype* output_ptr =
&message_buffer[local_model_id][(model_id - local_model_id + local_instance_id) *
local_comm_buff_size * embedding_vec_size];
return {gradients + index * embedding_vec_size,
{output_ptr + (vid - network_indices_offsets[model_id]) * embedding_vec_size},
{true}};
});
shuffle(copy_desc, stream, data_.samples.get_num_elements() / model_.num_instances / 8);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model(const emtype* message_buffer,
float* dev_lr, float scale,
cudaStream_t stream) {
const uint32_t* __restrict__ model_indices = model_indices_.get_ptr();
const dtype* __restrict__ samples = data_.samples.get_ptr();
const dtype* __restrict__ category_location = model_.category_location.get_ptr();
uint32_t n_blocks = gpu_resource.get_sm_count();
sgd_atomic_update(message_buffer, infrequent_embedding_vectors_.get_ptr(),
model_indices_offsets_.get_ptr() + model_.num_instances,
[model_indices, samples, category_location] __device__(uint32_t i) {
uint32_t index = model_indices[i];
dtype category = samples[index];
return category_location[2 * category + 1];
},
n_blocks, embedding_vec_size_, dev_lr, scale, stream);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::hier_update_model(const emtype* message_buffer,
float* dev_lr, float scale,
cudaStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
uint32_t local_comm_buff_size =
ceildiv<uint32_t>(max_num_infrequent_per_train_batch_, model_.num_instances);
const uint32_t* __restrict__ model_indices = model_indices_.get_ptr();
const dtype* __restrict__ samples = data_.samples.get_ptr();
const dtype* __restrict__ category_location = model_.category_location.get_ptr();
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
infrequent_embedding_kernels::hier_update_model<<<n_blocks, embedding_vec_size_, 0, stream>>>(
model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), data_.samples.get_ptr(),
model_.category_location.get_ptr(), message_buffer, infrequent_embedding_vectors_.get_ptr(),
embedding_vec_size_, num_instances, local_samples_size, local_comm_buff_size, dev_lr, scale);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
/** Update model for single GPU (no communications), lr is a device variable */
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::update_model_direct(float* dev_lr, float scale,
cudaStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
uint32_t local_samples_size =
ceildiv<uint32_t>(data_.batch_size, num_instances) * data_.table_sizes.size();
int num_sm = gpu_resource.get_sm_count();
int n_blocks = 16 * num_sm; // TODO: better heuristics
/* Each model reads from the gradients of each network */
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.start", stream, false);
infrequent_embedding_kernels::
infrequent_update_model_direct<<<n_blocks, embedding_vec_size_, 0, stream>>>(
gradients_pointers_.get_ptr(), infrequent_embedding_vectors_.get_ptr(),
model_indices_.get_ptr(), model_indices_offsets_.get_ptr(), data_.samples.get_ptr(),
model_.category_location.get_ptr(), model_.num_instances, model_.global_instance_id,
embedding_vec_size_, local_samples_size, dev_lr, scale);
CK_CUDA_THROW_(cudaPeekAtLastError());
PROFILE_RECORD("inf_update_model_direct.infrequent_update_model_direct.stop", stream, false);
}
template <typename dtype>
struct ModelIndicesSelectOp {
const dtype* samples;
const dtype* category_location;
uint32_t my_model_id;
__host__ __device__ __forceinline__ ModelIndicesSelectOp(const dtype* samples,
const dtype* category_location,
uint32_t my_model_id)
: samples(samples), category_location(category_location), my_model_id(my_model_id) {}
__device__ __forceinline__ bool operator()(const uint32_t& idx) const {
dtype category = __ldg(samples + idx);
dtype model_id = __ldg(category_location + 2 * category);
return model_id == my_model_id;
}
};
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices_temp_storage_bytes() {
size_t max_batch_size = std::max(data_train_.batch_size, data_evaluate_.batch_size);
cub::CountingInputIterator<uint32_t> counting(0);
ModelIndicesSelectOp<dtype> select_op(nullptr, nullptr, 0);
cub::DeviceSelect::If(nullptr, model_indices_temp_storage_bytes, counting, (uint32_t*)nullptr,
(uint32_t*)nullptr, max_batch_size * data_.table_sizes.size(), select_op,
0);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices(cudaStream_t stream) {
const uint32_t& num_instances = model_.num_instances;
size_t local_batch_size = ceildiv<size_t>(data_.batch_size, num_instances);
// Select indices of infrequent categories belonging to this model
cub::CountingInputIterator<uint32_t> counting(0);
ModelIndicesSelectOp<dtype> select_op(data_.samples.get_ptr(), model_.category_location.get_ptr(),
model_.global_instance_id);
PROFILE_RECORD("inf_calculate_model_indices.device_select_if.start", stream, false);
cub::DeviceSelect::If(reinterpret_cast<void*>(model_indices_temp_storage_.get_ptr()),
model_indices_temp_storage_bytes, counting, model_indices_.get_ptr(),
model_indices_offsets_.get_ptr() + num_instances,
data_.batch_size * data_.table_sizes.size(), select_op, stream);
PROFILE_RECORD("inf_calculate_model_indices.device_select_if.stop", stream, false);
// Compute offsets
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(num_instances, TPB);
PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.start", stream, false);
offsets_kernel<<<n_blocks, TPB, 0, stream>>>(model_indices_.get_ptr(),
model_indices_offsets_.get_ptr(), num_instances,
local_batch_size * data_.table_sizes.size());
PROFILE_RECORD("inf_calculate_model_indices.offsets_kernel.stop", stream, false);
CK_CUDA_THROW_(cudaPeekAtLastError());
}
static __global__ void offsets_to_sizes(size_t* sizes, uint32_t* offsets, size_t element_size,
uint32_t num_instances) {
for (int t = blockIdx.x * blockDim.x + threadIdx.x; t < num_instances;
t += gridDim.x * blockDim.x) {
sizes[t] = (offsets[t + 1] - offsets[t]) * element_size;
}
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_model_indices_sizes_from_offsets(
cudaStream_t stream) {
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>(
model_indices_sizes_.get_ptr(), model_indices_offsets_.get_ptr(),
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices_temp_storage_bytes() {
size_t max_batch_size = std::max(data_train_.batch_size, data_evaluate_.batch_size);
const uint32_t num_instances = model_.num_instances;
uint32_t samples_size = max_batch_size * data_.table_sizes.size();
uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances);
// Calculate select bytes
size_t select_bytes = 0;
cub::CountingInputIterator<uint32_t> counting(0);
cub::DeviceSelect::Flagged(nullptr, select_bytes, counting, (bool*)nullptr, (uint32_t*)nullptr,
(uint32_t*)nullptr, samples_size, 0);
// Total size
constexpr uint32_t align = 256;
network_indices_temp_storage_bytes =
alignTo<size_t>(sizeof(bool) * samples_size, align) + select_bytes;
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices(cudaStream_t stream) {
const uint32_t num_instances = model_.num_instances;
uint32_t samples_size = data_.batch_size * data_.table_sizes.size();
uint32_t local_samples_size = ceildiv<uint32_t>(samples_size, num_instances);
// Temporary storage
constexpr uint32_t align = 256;
char* scratch_ptr = network_indices_temp_storage_.get_ptr();
size_t scratch_offset = 0;
bool* d_mask = reinterpret_cast<bool*>(scratch_ptr + scratch_offset);
scratch_offset += alignTo<size_t>(sizeof(bool) * samples_size, align);
void* d_temp_storage = reinterpret_cast<void*>(scratch_ptr + scratch_offset);
size_t temp_storage_bytes = network_indices_temp_storage_bytes - scratch_offset;
// Compute mask (for each source GPU, whether each element in the batch is located there)
constexpr uint32_t TPB_mask = 256;
uint32_t n_blocks_mask = ceildiv<uint32_t>(local_samples_size, TPB_mask);
PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.start", stream,
false);
infrequent_embedding_kernels::
calculate_network_indices_mask<<<n_blocks_mask, TPB_mask, 0, stream>>>(
data_.samples.get_ptr() + model_.global_instance_id * local_samples_size,
model_.category_location.get_ptr(), d_mask, local_samples_size, num_instances);
CK_CUDA_THROW_(cudaPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.calculate_network_indices_mask.stop", stream,
false);
// Select indices according to the mask
cub::CountingInputIterator<uint32_t> counting(0);
PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.start", stream, false);
cub::DeviceSelect::Flagged(
d_temp_storage, temp_storage_bytes, counting, d_mask, network_indices_.get_ptr(),
network_indices_offsets_.get_ptr() + num_instances, samples_size, stream);
PROFILE_RECORD("inf_calculate_network_indices.device_select_flagged.stop", stream, false);
// Compute offsets
constexpr uint32_t TPB_offsets = 256;
uint32_t n_blocks_offsets = ceildiv<uint32_t>(num_instances, TPB_offsets);
PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.start", stream, false);
offsets_kernel<<<n_blocks_offsets, TPB_offsets, 0, stream>>>(network_indices_.get_ptr(),
network_indices_offsets_.get_ptr(),
num_instances, local_samples_size);
CK_CUDA_THROW_(cudaPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.offsets_kernel.stop", stream, false);
// Re-map indices between 0 and local_samples_size - 1
uint32_t TPB_remap = 256;
uint32_t n_blocks_remap = gpu_resource.get_sm_count();
PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.start", stream, false);
modulo_kernel<<<n_blocks_remap, TPB_remap, 0, stream>>>(
network_indices_.get_ptr(), network_indices_offsets_.get_ptr() + num_instances,
local_samples_size);
CK_CUDA_THROW_(cudaPeekAtLastError());
PROFILE_RECORD("inf_calculate_network_indices.modulo_kernel.stop", stream, false);
}
template <typename dtype, typename emtype>
void InfrequentEmbedding<dtype, emtype>::calculate_network_indices_sizes_from_offsets(
cudaStream_t stream) {
constexpr size_t TPB = 256;
const size_t n_blocks = ceildiv<size_t>(model_.num_instances, TPB);
offsets_to_sizes<<<n_blocks, TPB, 0, stream>>>(
network_indices_sizes_.get_ptr(), network_indices_offsets_.get_ptr(),
embedding_vec_size_ * sizeof(emtype), model_.num_instances);
}
template class InfrequentEmbedding<uint32_t, __half>;
template class InfrequentEmbedding<uint32_t, float>;
template class InfrequentEmbedding<long long, __half>;
template class InfrequentEmbedding<long long, float>;
} // namespace hybrid_embedding
} // namespace HugeCTR
|
fe5bf4e35233c2cd65d3b8b1b2f77e1a42407284.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xtrtri_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_operators.h"
#include "kblas_defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xtrtri_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xtrtri_batch_offset(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
trtri_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrtri_batch_core<TYPE, TYPE**, false>(
handle,
uplo, diag, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_trtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXtrtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xtrtri_batch_offset(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
trtri_batch_wsquery_core<true>( batchCount, n, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrtri_batch_core<TYPE, TYPE*, true>(
handle,
uplo, diag, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_trtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXtrtri_batch_strided(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
| fe5bf4e35233c2cd65d3b8b1b2f77e1a42407284.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xtrtri_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_operators.h"
#include "kblas_defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xtrtri_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A: host pointer to device buffer
int Xtrtri_batch_offset(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int A_row_off, int A_col_off, int lda,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
trtri_batch_wsquery_core<false>( n, batchCount, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrtri_batch_core<TYPE, TYPE**, false>(
handle,
uplo, diag, n,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_trtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXtrtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE** A, int lda,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda,
batchCount,
info_array);
}
//==============================================================================================
//Strided form
// template<>
// workspace needed: device pointers
// A: host pointer to device buffer
int Xtrtri_batch_offset(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
int batchCount,
int *info_array)
{
KBlasWorkspaceState ws_needed;
trtri_batch_wsquery_core<true>( batchCount, n, (kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrtri_batch_core<TYPE, TYPE*, true>(
handle,
uplo, diag, n,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
int kblas_trtri_batch(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
// workspace needed: device pointers
// A: host pointer to device buffer
extern "C"
int kblasXtrtri_batch_strided(kblasHandle_t handle,
char uplo, char diag,
const int n,
TYPE* A, int lda, long strideA,
int batchCount,
int *info_array)
{
return Xtrtri_batch_offset( handle,
uplo, diag, n,
A, 0, 0, lda, strideA,
batchCount,
info_array);
}
|
8a68a16ad5523a1dc6a018767d9af908523cd7d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************
*
****************************************************************************/
#include "hpc.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#define BLKDIM 32
typedef struct{
int h,w;
int connectivity;
int *image;
int *result;
}bitmap;
/**
* Reads the input bitmap.
*/
void read_input( FILE *f, bitmap *bm )
{
int i, con, h, w, pixel;
int size;
int *pixels;
int *positions;
if ( 1 != fscanf(f, "%d", &con) ) {
fprintf(stderr, "Error: cannot read connectivity\n");
exit(EXIT_FAILURE);
}
if ( con != 4 && con != 8 ) {
fprintf(stderr, "Error: connectivity cannot be %d. The only acceptable values are 4 and 8\n", con);
exit(EXIT_FAILURE);
}
bm->connectivity = con;
if ( 2 != fscanf(f, "%d %d", &h, &w) ){
fprintf(stderr, "Error: cannot to read image sizes\n");
exit(EXIT_FAILURE);
}
assert(h > 0);
assert(w > 0);
bm->h = h;
bm->w = w;
size = h * w;
pixels = (int*)malloc( size * sizeof(int) );
positions = (int*)malloc( size * sizeof(int) );
assert(pixels);
assert(positions);
for (i=0; i<size; i++) {
if (1 != fscanf(f, "%d ", &pixel)) {
fprintf(stderr, "Error: cannot read the value of pixel at %d, %d\n", i, size % i);
exit(EXIT_FAILURE);
}
assert(pixel == 0 || pixel == 1);
pixels[i] = pixel;
positions[i] = i;
}
bm->image = pixels;
bm->result = positions;
}
/**
* Reads the bitmap and creates the equivalences found in rows
*/
__global__ void row_equivalences( int *input, int *res, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int left = (j - 1) + i * w;
if ( i == 0 && j == 0 ){
res[0] = 0;
}
if ( i < h && j < w ){
if ( j == 0 ){
res[pos] = pos * input[pos - 1];
} else {
if ( input[pos - 1] ){
if ( input[left] ){
res[pos] = pos-1;
} else {
res[pos] = pos;
}
} else {
res[pos] = 0;
}
}
}
}
/**
* Reads the bitmap and creates the equivalences found in coulmns
*/
__global__ void col_equivalences( int *input, int *res, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int upper = j + (i - 1) * w;
if ( i == 0 && j == 0 ){
res[0] = 0;
}
if ( i < h && j < w ){
if ( i == 0 ){
res[pos] = pos * input[pos - 1];
} else {
if ( input[pos - 1] ){
if ( input[upper] ){
res[pos] = pos-(w);
} else {
res[pos] = pos;
}
} else {
res[pos] = 0;
}
}
}
}
/**
* Sets the labels of the result from the vector labels
*/
__global__ void set_labels( int *input, int *result, int *labels, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int root = labels[pos];
while ( root != labels[root] ){
root = labels[root];
}
labels[pos] = root;
result[pos - 1] = root;
}
/**
* Merging column values and row values, getting the provvisional label values
*/
__global__ void merge( int *input, int *result, int *row, int *col, int *label, int w, int h){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
if ( i < h && j < w ){
label[pos] = 0;
if ( input[pos - 1] ){
if ( col[pos] == pos && row[pos] == pos ){
label[pos] = pos;
} else if ( col[pos] && col[pos] != pos){
label[pos] = col[pos];
} else if ( row[pos] && row[pos] != pos ){
label[pos] = row[pos];
}
} else {
result[pos - 1] = 0;
}
}
}
/**
* Checks if root leads to test_root
*/
int check_cycles( int *labels, int test_root, int root ){
while ( labels[root] != root && labels[root] != test_root ){
root = labels[root];
}
return labels[root] != test_root;
}
/**
* Checks the roots and updates if no cycles are found
*/
void update_label( int root_to_update, int root_to_set, int *labels ){
int is_acyclic;
//Searching the root of the label to update
while ( root_to_update != labels[root_to_update] && root_to_set != root_to_update ){
root_to_update = labels[root_to_update];
}
is_acyclic = check_cycles( labels, root_to_update, root_to_set );
if ( labels[root_to_set] != root_to_update && root_to_update != root_to_set && is_acyclic){
labels[root_to_update] = root_to_set;
}
}
/**
* Creates the result matrix with labels from an initial binary matrix in connectivity 4
*/
void label( bitmap *bm ){
int i, j;
int pos;
int upper;
int left;
int upper_left;
int root, my_root;
int size;
int *label;
int *d_input, *d_result;
int *d_row, *d_col, *d_label;
dim3 block( BLKDIM, BLKDIM );
dim3 grid( (bm->w + BLKDIM - 1)/BLKDIM, (bm->h + BLKDIM - 1)/BLKDIM );
size = bm->h * bm->w;
size_t byte_size = size * sizeof(int);
size_t label_size = (size + 1) * sizeof(int);
label = (int*)malloc( (size + 1) * sizeof(int) );
//Input and result contain the matrix as it is while row, col and label have an offset to contain the background value (0) at position 0
cudaSafeCall( hipMalloc((int**) &d_input, byte_size) );
cudaSafeCall( hipMalloc((int**) &d_result, byte_size) );
//The size is increased by 1, so position 0 is reserved to the 0 value (background) and all the values are easily reached later
cudaSafeCall( hipMalloc((int**) &d_row, label_size) );
cudaSafeCall( hipMalloc((int**) &d_col, label_size) );
cudaSafeCall( hipMalloc((int**) &d_label, label_size) );
cudaSafeCall( hipMemcpy(d_input, bm->image, byte_size, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( row_equivalences), dim3(grid), dim3(block), 0, 0, d_input, d_row, bm->w, bm->h);
cudaCheckError();
hipLaunchKernelGGL(( col_equivalences), dim3(grid), dim3(block), 0, 0, d_input, d_col, bm->w, bm->h);
cudaCheckError();
//Merging column values and row values, getting the provvisional label values
hipLaunchKernelGGL(( merge), dim3(grid), dim3(block), 0, 0, d_input, d_result, d_row, d_col, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( hipFree( d_row ) );
cudaSafeCall( hipFree( d_col ) );
//Setting provisional labels
hipLaunchKernelGGL(( set_labels), dim3(grid), dim3(block), 0, 0, d_input, d_result, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( hipMemcpy( label, d_label, label_size, hipMemcpyDeviceToHost ) );
/**
* Refining of the local equivalences
* In particular removing the equivalences of the following type
*
* * * * * *
* * * a * *
* * b a * *
* * * * * *
*
* This is the only type of the equivalences that could be found after the previous passes of the algorithm.
*/
for (i = 1; i < bm->h; i++){
for (j = 1; j < bm->w; j++){
pos = (j + i * bm->w) + 1;
upper = j + (i-1) * bm->w;
left = (j - 1) + i * bm->w;
upper_left = (j-1) + (i-1) * bm->w;
if ( bm->image[pos - 1] ){
my_root = label[pos];
if ( bm->image[upper] && bm->image[left] && !bm->image[upper_left] ){
root = label[pos-1];
update_label(my_root, root, label);
}
}
}
}
cudaSafeCall( hipMemcpy( d_label, label, label_size, hipMemcpyHostToDevice ) );
//Setting final labels
hipLaunchKernelGGL(( set_labels), dim3(grid), dim3(block), 0, 0, d_input, d_result, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( hipMemcpy(bm->result, d_result, byte_size, hipMemcpyDeviceToHost) );
free(label);
cudaSafeCall( hipFree( d_label ) );
}
/**
* Free the bitmap data structure
*/
void free_bitmap( bitmap *bm )
{
free( bm->image );
free( bm->result );
}
int main( void )
{
bitmap bm;
double tstart, elapsed;
int x = 0, y = 0;
int pos = 0, left = 0, upper = 0;
int val = 0;
int isCorrect = 1;
read_input(stdin, &bm);
tstart = hpc_gettime();
label(&bm);
elapsed = hpc_gettime() - tstart;
fprintf(stderr, "Elapsed time = %f sec\n", elapsed);
//Printing the result
printf("Result:\n");
for ( int i = 0; i < bm.h; i++ ){
for ( int j = 0; j < bm.w; j++ ){
pos = j + i * bm.w;
left = (j - 1) + i * bm.w;
upper = j + (i - 1) * bm.w;
printf("%9d ", bm.result[pos]);
if ( i && j ){
//Checking if the result provided by the algorithm is correct
if ( bm.image[pos] ){
if ( (bm.image[pos] == 1 && bm.result[pos] == 0) || (bm.image[pos] == 0 && bm.result[pos] != 0) ){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
if ( bm.result[left] && bm.result[left] != bm.result[pos]){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
if ( bm.result[upper] && bm.result[upper] != bm.result[pos]){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
}
}
}
printf("\n");
}
if ( isCorrect ){
fprintf(stderr, "Correct\n");
} else {
fprintf(stderr, "Result Wrong. Last wrong value found: X = %d, Y = %d, val = %d\n", x, y, val);
}
free_bitmap(&bm);
return EXIT_SUCCESS;
}
| 8a68a16ad5523a1dc6a018767d9af908523cd7d6.cu | /****************************************************************************
*
****************************************************************************/
#include "hpc.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#define BLKDIM 32
typedef struct{
int h,w;
int connectivity;
int *image;
int *result;
}bitmap;
/**
* Reads the input bitmap.
*/
void read_input( FILE *f, bitmap *bm )
{
int i, con, h, w, pixel;
int size;
int *pixels;
int *positions;
if ( 1 != fscanf(f, "%d", &con) ) {
fprintf(stderr, "Error: cannot read connectivity\n");
exit(EXIT_FAILURE);
}
if ( con != 4 && con != 8 ) {
fprintf(stderr, "Error: connectivity cannot be %d. The only acceptable values are 4 and 8\n", con);
exit(EXIT_FAILURE);
}
bm->connectivity = con;
if ( 2 != fscanf(f, "%d %d", &h, &w) ){
fprintf(stderr, "Error: cannot to read image sizes\n");
exit(EXIT_FAILURE);
}
assert(h > 0);
assert(w > 0);
bm->h = h;
bm->w = w;
size = h * w;
pixels = (int*)malloc( size * sizeof(int) );
positions = (int*)malloc( size * sizeof(int) );
assert(pixels);
assert(positions);
for (i=0; i<size; i++) {
if (1 != fscanf(f, "%d ", &pixel)) {
fprintf(stderr, "Error: cannot read the value of pixel at %d, %d\n", i, size % i);
exit(EXIT_FAILURE);
}
assert(pixel == 0 || pixel == 1);
pixels[i] = pixel;
positions[i] = i;
}
bm->image = pixels;
bm->result = positions;
}
/**
* Reads the bitmap and creates the equivalences found in rows
*/
__global__ void row_equivalences( int *input, int *res, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int left = (j - 1) + i * w;
if ( i == 0 && j == 0 ){
res[0] = 0;
}
if ( i < h && j < w ){
if ( j == 0 ){
res[pos] = pos * input[pos - 1];
} else {
if ( input[pos - 1] ){
if ( input[left] ){
res[pos] = pos-1;
} else {
res[pos] = pos;
}
} else {
res[pos] = 0;
}
}
}
}
/**
* Reads the bitmap and creates the equivalences found in coulmns
*/
__global__ void col_equivalences( int *input, int *res, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int upper = j + (i - 1) * w;
if ( i == 0 && j == 0 ){
res[0] = 0;
}
if ( i < h && j < w ){
if ( i == 0 ){
res[pos] = pos * input[pos - 1];
} else {
if ( input[pos - 1] ){
if ( input[upper] ){
res[pos] = pos-(w);
} else {
res[pos] = pos;
}
} else {
res[pos] = 0;
}
}
}
}
/**
* Sets the labels of the result from the vector labels
*/
__global__ void set_labels( int *input, int *result, int *labels, int w, int h ){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
int root = labels[pos];
while ( root != labels[root] ){
root = labels[root];
}
labels[pos] = root;
result[pos - 1] = root;
}
/**
* Merging column values and row values, getting the provvisional label values
*/
__global__ void merge( int *input, int *result, int *row, int *col, int *label, int w, int h){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
//The position is increased by one, so the position 0 is reserved to the background value and it's easier to access the other values later.
int pos = (j + i * w) + 1;
if ( i < h && j < w ){
label[pos] = 0;
if ( input[pos - 1] ){
if ( col[pos] == pos && row[pos] == pos ){
label[pos] = pos;
} else if ( col[pos] && col[pos] != pos){
label[pos] = col[pos];
} else if ( row[pos] && row[pos] != pos ){
label[pos] = row[pos];
}
} else {
result[pos - 1] = 0;
}
}
}
/**
* Checks if root leads to test_root
*/
int check_cycles( int *labels, int test_root, int root ){
while ( labels[root] != root && labels[root] != test_root ){
root = labels[root];
}
return labels[root] != test_root;
}
/**
* Checks the roots and updates if no cycles are found
*/
void update_label( int root_to_update, int root_to_set, int *labels ){
int is_acyclic;
//Searching the root of the label to update
while ( root_to_update != labels[root_to_update] && root_to_set != root_to_update ){
root_to_update = labels[root_to_update];
}
is_acyclic = check_cycles( labels, root_to_update, root_to_set );
if ( labels[root_to_set] != root_to_update && root_to_update != root_to_set && is_acyclic){
labels[root_to_update] = root_to_set;
}
}
/**
* Creates the result matrix with labels from an initial binary matrix in connectivity 4
*/
void label( bitmap *bm ){
int i, j;
int pos;
int upper;
int left;
int upper_left;
int root, my_root;
int size;
int *label;
int *d_input, *d_result;
int *d_row, *d_col, *d_label;
dim3 block( BLKDIM, BLKDIM );
dim3 grid( (bm->w + BLKDIM - 1)/BLKDIM, (bm->h + BLKDIM - 1)/BLKDIM );
size = bm->h * bm->w;
size_t byte_size = size * sizeof(int);
size_t label_size = (size + 1) * sizeof(int);
label = (int*)malloc( (size + 1) * sizeof(int) );
//Input and result contain the matrix as it is while row, col and label have an offset to contain the background value (0) at position 0
cudaSafeCall( cudaMalloc((int**) &d_input, byte_size) );
cudaSafeCall( cudaMalloc((int**) &d_result, byte_size) );
//The size is increased by 1, so position 0 is reserved to the 0 value (background) and all the values are easily reached later
cudaSafeCall( cudaMalloc((int**) &d_row, label_size) );
cudaSafeCall( cudaMalloc((int**) &d_col, label_size) );
cudaSafeCall( cudaMalloc((int**) &d_label, label_size) );
cudaSafeCall( cudaMemcpy(d_input, bm->image, byte_size, cudaMemcpyHostToDevice) );
row_equivalences<<<grid, block>>>(d_input, d_row, bm->w, bm->h);
cudaCheckError();
col_equivalences<<<grid, block>>>(d_input, d_col, bm->w, bm->h);
cudaCheckError();
//Merging column values and row values, getting the provvisional label values
merge<<<grid, block>>>(d_input, d_result, d_row, d_col, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( cudaFree( d_row ) );
cudaSafeCall( cudaFree( d_col ) );
//Setting provisional labels
set_labels<<<grid, block>>>(d_input, d_result, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( cudaMemcpy( label, d_label, label_size, cudaMemcpyDeviceToHost ) );
/**
* Refining of the local equivalences
* In particular removing the equivalences of the following type
*
* * * * * *
* * * a * *
* * b a * *
* * * * * *
*
* This is the only type of the equivalences that could be found after the previous passes of the algorithm.
*/
for (i = 1; i < bm->h; i++){
for (j = 1; j < bm->w; j++){
pos = (j + i * bm->w) + 1;
upper = j + (i-1) * bm->w;
left = (j - 1) + i * bm->w;
upper_left = (j-1) + (i-1) * bm->w;
if ( bm->image[pos - 1] ){
my_root = label[pos];
if ( bm->image[upper] && bm->image[left] && !bm->image[upper_left] ){
root = label[pos-1];
update_label(my_root, root, label);
}
}
}
}
cudaSafeCall( cudaMemcpy( d_label, label, label_size, cudaMemcpyHostToDevice ) );
//Setting final labels
set_labels<<<grid, block>>>(d_input, d_result, d_label, bm->w, bm->h);
cudaCheckError();
cudaSafeCall( cudaMemcpy(bm->result, d_result, byte_size, cudaMemcpyDeviceToHost) );
free(label);
cudaSafeCall( cudaFree( d_label ) );
}
/**
* Free the bitmap data structure
*/
void free_bitmap( bitmap *bm )
{
free( bm->image );
free( bm->result );
}
int main( void )
{
bitmap bm;
double tstart, elapsed;
int x = 0, y = 0;
int pos = 0, left = 0, upper = 0;
int val = 0;
int isCorrect = 1;
read_input(stdin, &bm);
tstart = hpc_gettime();
label(&bm);
elapsed = hpc_gettime() - tstart;
fprintf(stderr, "Elapsed time = %f sec\n", elapsed);
//Printing the result
printf("Result:\n");
for ( int i = 0; i < bm.h; i++ ){
for ( int j = 0; j < bm.w; j++ ){
pos = j + i * bm.w;
left = (j - 1) + i * bm.w;
upper = j + (i - 1) * bm.w;
printf("%9d ", bm.result[pos]);
if ( i && j ){
//Checking if the result provided by the algorithm is correct
if ( bm.image[pos] ){
if ( (bm.image[pos] == 1 && bm.result[pos] == 0) || (bm.image[pos] == 0 && bm.result[pos] != 0) ){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
if ( bm.result[left] && bm.result[left] != bm.result[pos]){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
if ( bm.result[upper] && bm.result[upper] != bm.result[pos]){
x = j;
y = i;
val = bm.result[pos];
isCorrect = 0;
}
}
}
}
printf("\n");
}
if ( isCorrect ){
fprintf(stderr, "Correct\n");
} else {
fprintf(stderr, "Result Wrong. Last wrong value found: X = %d, Y = %d, val = %d\n", x, y, val);
}
free_bitmap(&bm);
return EXIT_SUCCESS;
}
|
a95077b42a32b008114881f3306f84a077753ad2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_r1 [7][1];
static int dims_update_halo_kernel1_r1_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_r1_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(-1,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(-1,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(-1,0);
if(fields[FIELD_U] == 1) u(0,0) = u(-1,0);
if(fields[FIELD_P] == 1) p(0,0) = p(-1,0);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(-1,0);
}
__global__ void ops_update_halo_kernel1_r1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_r1[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_r1[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_r1[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_r1[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_r1[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_r1[5][0], arg5);
update_halo_kernel1_r1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,56)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel1_r1");
OPS_kernels[56].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_r1_h[0][0] || xdim1 != dims_update_halo_kernel1_r1_h[1][0] || xdim2 != dims_update_halo_kernel1_r1_h[2][0] || xdim3 != dims_update_halo_kernel1_r1_h[3][0] || xdim4 != dims_update_halo_kernel1_r1_h[4][0] || xdim5 != dims_update_halo_kernel1_r1_h[5][0]) {
dims_update_halo_kernel1_r1_h[0][0] = xdim0;
dims_update_halo_kernel1_r1_h[1][0] = xdim1;
dims_update_halo_kernel1_r1_h[2][0] = xdim2;
dims_update_halo_kernel1_r1_h[3][0] = xdim3;
dims_update_halo_kernel1_r1_h[4][0] = xdim4;
dims_update_halo_kernel1_r1_h[5][0] = xdim5;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_r1, dims_update_halo_kernel1_r1_h, sizeof(dims_update_halo_kernel1_r1)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_r1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[56].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 56;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 56;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
| a95077b42a32b008114881f3306f84a077753ad2.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_r1 [7][1];
static int dims_update_halo_kernel1_r1_h [7][1] = {0};
//user function
__device__
inline void update_halo_kernel1_r1_gpu(ACC<double> &density0,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &u,
ACC<double> &p,
ACC<double> &sd,
const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0(0,0) = density0(-1,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0) = energy0(-1,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0) = energy1(-1,0);
if(fields[FIELD_U] == 1) u(0,0) = u(-1,0);
if(fields[FIELD_P] == 1) p(0,0) = p(-1,0);
if(fields[FIELD_SD] == 1) sd(0,0) = sd(-1,0);
}
__global__ void ops_update_halo_kernel1_r1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_update_halo_kernel1_r1[0][0], arg0);
ACC<double> argp1(dims_update_halo_kernel1_r1[1][0], arg1);
ACC<double> argp2(dims_update_halo_kernel1_r1[2][0], arg2);
ACC<double> argp3(dims_update_halo_kernel1_r1[3][0], arg3);
ACC<double> argp4(dims_update_halo_kernel1_r1[4][0], arg4);
ACC<double> argp5(dims_update_halo_kernel1_r1[5][0], arg5);
update_halo_kernel1_r1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,56)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel1_r1");
OPS_kernels[56].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_update_halo_kernel1_r1_h[0][0] || xdim1 != dims_update_halo_kernel1_r1_h[1][0] || xdim2 != dims_update_halo_kernel1_r1_h[2][0] || xdim3 != dims_update_halo_kernel1_r1_h[3][0] || xdim4 != dims_update_halo_kernel1_r1_h[4][0] || xdim5 != dims_update_halo_kernel1_r1_h[5][0]) {
dims_update_halo_kernel1_r1_h[0][0] = xdim0;
dims_update_halo_kernel1_r1_h[1][0] = xdim1;
dims_update_halo_kernel1_r1_h[2][0] = xdim2;
dims_update_halo_kernel1_r1_h[3][0] = xdim3;
dims_update_halo_kernel1_r1_h[4][0] = xdim4;
dims_update_halo_kernel1_r1_h[5][0] = xdim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_r1, dims_update_halo_kernel1_r1_h, sizeof(dims_update_halo_kernel1_r1)));
}
int *arg6h = (int *)arg6.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_update_halo_kernel1_r1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[56].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 56;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 56;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
|
979910ebba6188943588c23f048dfd96df06b857.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc -o -arch=sm_75 -lcublas -lcurand .cu
// : http://devblogs.nvidia.com/parallelforall
// wmma cublas
// wmma cublas .
//
// cublass tensorcore cuda tflops .
// wmma .
//
// cuda tensorCore .
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
// Must be multiples of 16 for wmma code to work
#define MATRIX_M 16384
#define MATRIX_N 16384
#define MATRIX_K 16384
// The only dimensions currently supported by WMMA
const int WMMA_M = 16;
const int WMMA_N = 16;
const int WMMA_K = 16;
// Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 16.
// 3) Neither A nor B are transposed.
// Note: This is NOT a high performance example but is for demonstration purposes only
// For a high performance code please use the GEMM provided in cuBLAS.
__global__ void wmma_example(half *a, half *b, float *c, int M, int N, int K, float alpha, float beta) {
// Leading dimensions. Packed with no transpositions.
int lda = M;
int ldb = K;
int ldc = M;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aRow + aCol * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cRow = warpM * WMMA_M;
int cCol = warpN * WMMA_N;
if (cRow < M && cCol < N) {
wmma::load_matrix_sync(c_frag, c + cRow + cCol * ldc, ldc, wmma::mem_col_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(c + cRow + cCol * ldc, c_frag, ldc, wmma::mem_col_major);
}
}
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_wmma;
float *c_host_cublas;
float *c_host_wmma;
hiprandGenerator_t gen;
hipblasHandle_t cublasHandle;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
// 1 CUBLAS_TENSOR_OP_MATH CUBLAS_DEFAULT_MATH .
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));//
cudaErrCheck(hipMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_wmma, MATRIX_M * MATRIX_N * sizeof(float)));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
c_host_wmma = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
curandErrCheck(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
curandErrCheck(hiprandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K));
curandErrCheck(hiprandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_fp16, a_fp32, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_fp16, b_fp32, MATRIX_K * MATRIX_N);
curandErrCheck(hiprandGenerateUniform(gen, c, MATRIX_M * MATRIX_N));
curandErrCheck(hiprandDestroyGenerator(gen));
cudaErrCheck(hipMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToDevice));
cudaErrCheck(hipMemcpy(c_wmma, c, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToDevice));
float alpha = 2.0f;
float beta = 2.0f;
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// First: using WMMA
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (MATRIX_M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32);
gridDim.y = (MATRIX_N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(gridDim), dim3(blockDim) , 0, 0, a_fp16, b_fp16, c_wmma, MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
cudaErrCheck(hipEventRecord(stopWMMA));
// Now using cuBLAS
printf("Running with cuBLAS...\n");
cudaErrCheck(hipEventRecord(startcublas));
// 2
// CUUBLAS_GEMM_DFALT_TENSOR_OP CUBLAS_GEMM_DEFALT .
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, HIP_R_16F, MATRIX_M,
b_fp16, HIP_R_16F, MATRIX_K,
&beta,
c_cublas, HIP_R_32F, MATRIX_M,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
cudaErrCheck(hipEventRecord(stopcublas));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(c_host_wmma, c_wmma, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToHost));
// cublas wmma .
// c_host_wmma[i] wmma
// c_host_cublas[i] cublass .
// TensorCore
// .
// 0.01% relative tolerance. 1e-5 absolute tolerance.
int errors = 0;
for (int i = 0; i < MATRIX_M * MATRIX_N; i++) {
float v1 = c_host_wmma[i];
float v2 = c_host_cublas[i];
if (v1 / v2 > 1.0001 || v2 / v1 > 1.0001 || abs(v1 - v2) > 1e-5) {
errors++;
if (errors < 10) printf("%f %f\n", v1, v2);
}
}
if (errors > 0) {
printf("WMMA does not agree with cuBLAS! %d errors!\n", errors);
}
else {
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
float cublasTime;
cudaErrCheck(hipEventSynchronize(stopWMMA));
cudaErrCheck(hipEventSynchronize(stopcublas));
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
// TFLOPS
printf("wmma took %fms\n", wmmaTime);
printf("[+] TFLOPS: %.2f\n", ((double)MATRIX_M * MATRIX_N * MATRIX_K * 2) / wmmaTime / 1e9);
printf("cublas took %fms\n", cublasTime);
printf("[+] TFLOPS: %.2f\n", ((double)MATRIX_M * MATRIX_N * MATRIX_K * 2) / cublasTime / 1e9);
printf("\nFor a faster code using wmma you should check out the cudaTensorCoreGemm sample in the CUDA Toolkit.\nThis code was written as a demo only!\n\n");
}
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp32));
cudaErrCheck(hipFree(b_fp32));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c));
cudaErrCheck(hipFree(c_cublas));
cudaErrCheck(hipFree(c_wmma));
free(c_host_cublas);
free(c_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
| 979910ebba6188943588c23f048dfd96df06b857.cu | // 컴파일 명령어 nvcc -o 결과파일명 -arch=sm_75 -lcublas -lcurand 코드파일.cu
// 파일 출처 : http://devblogs.nvidia.com/parallelforall
// wmma와 cublas를 동시에 실행하는 파일이며
// wmma로 계산한 결과 cublas로 계산한 결과를 비교한다.
//
// 이 코드를 cublass에서 tensorcore로 연산한 결과 cuda코어로 연산했으 경우 tflops를 계산한다.
// 기본적인 wmma 연산을 확인한다.
//
// 추가로 cuda 와 tensorCore에서 계산된 결과를 저장하기로 한다.
#include <stdio.h>
#include <curand.h>
#include <cublas_v2.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
// Must be multiples of 16 for wmma code to work
#define MATRIX_M 16384
#define MATRIX_N 16384
#define MATRIX_K 16384
// The only dimensions currently supported by WMMA
const int WMMA_M = 16;
const int WMMA_N = 16;
const int WMMA_K = 16;
// Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 16.
// 3) Neither A nor B are transposed.
// Note: This is NOT a high performance example but is for demonstration purposes only
// For a high performance code please use the GEMM provided in cuBLAS.
__global__ void wmma_example(half *a, half *b, float *c, int M, int N, int K, float alpha, float beta) {
// Leading dimensions. Packed with no transpositions.
int lda = M;
int ldb = K;
int ldc = M;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aRow + aCol * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cRow = warpM * WMMA_M;
int cCol = warpN * WMMA_N;
if (cRow < M && cCol < N) {
wmma::load_matrix_sync(c_frag, c + cRow + cCol * ldc, ldc, wmma::mem_col_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(c + cRow + cCol * ldc, c_frag, ldc, wmma::mem_col_major);
}
}
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_wmma;
float *c_host_cublas;
float *c_host_wmma;
curandGenerator_t gen;
cublasHandle_t cublasHandle;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
// 쿠다 코어로 변경 1번 CUBLAS_TENSOR_OP_MATH를 CUBLAS_DEFAULT_MATH로 변경해준다.
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));//
cudaErrCheck(cudaMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_wmma, MATRIX_M * MATRIX_N * sizeof(float)));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
c_host_wmma = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
curandErrCheck(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
curandErrCheck(curandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K));
curandErrCheck(curandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
convertFp32ToFp16 <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_fp16, a_fp32, MATRIX_M * MATRIX_K);
convertFp32ToFp16 <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_fp16, b_fp32, MATRIX_K * MATRIX_N);
curandErrCheck(curandGenerateUniform(gen, c, MATRIX_M * MATRIX_N));
curandErrCheck(curandDestroyGenerator(gen));
cudaErrCheck(cudaMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToDevice));
cudaErrCheck(cudaMemcpy(c_wmma, c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToDevice));
float alpha = 2.0f;
float beta = 2.0f;
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// First: using WMMA
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (MATRIX_M + (WMMA_M * blockDim.x / 32 - 1)) / (WMMA_M * blockDim.x / 32);
gridDim.y = (MATRIX_N + WMMA_N * blockDim.y - 1) / (WMMA_N * blockDim.y);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< gridDim, blockDim >>> (a_fp16, b_fp16, c_wmma, MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
cudaErrCheck(cudaEventRecord(stopWMMA));
// Now using cuBLAS
printf("Running with cuBLAS...\n");
cudaErrCheck(cudaEventRecord(startcublas));
// 쿠다 코어로 변경 2번
// 쿠다 코어 이용시에 CUUBLAS_GEMM_DFALT_TENSOR_OP을 CUBLAS_GEMM_DEFALT 로 변경해준다.
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, CUDA_R_16F, MATRIX_M,
b_fp16, CUDA_R_16F, MATRIX_K,
&beta,
c_cublas, CUDA_R_32F, MATRIX_M,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
cudaErrCheck(cudaEventRecord(stopcublas));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(c_host_wmma, c_wmma, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost));
// cublas결과와 wmma결과를 비교하는 코드이다.
// c_host_wmma[i] 에는 wmma 결과가
// c_host_cublas[i] 에는 cublass 결과가 담긴다.
// 혼합 정밀도를 가진 TensorCore를 통하여 계산한 결과아
// 단일정밀도를 가진 쿠다 코어로 계산한 결과를 비교하면 반드시 에러가 발생한다.
// 0.01% relative tolerance. 1e-5 absolute tolerance.
int errors = 0;
for (int i = 0; i < MATRIX_M * MATRIX_N; i++) {
float v1 = c_host_wmma[i];
float v2 = c_host_cublas[i];
if (v1 / v2 > 1.0001 || v2 / v1 > 1.0001 || abs(v1 - v2) > 1e-5) {
errors++;
if (errors < 10) printf("%f %f\n", v1, v2);
}
}
if (errors > 0) {
printf("WMMA does not agree with cuBLAS! %d errors!\n", errors);
}
else {
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
float cublasTime;
cudaErrCheck(cudaEventSynchronize(stopWMMA));
cudaErrCheck(cudaEventSynchronize(stopcublas));
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
// TFLOPS 계산 결과 출력
printf("wmma took %fms\n", wmmaTime);
printf("[+] TFLOPS: %.2f\n", ((double)MATRIX_M * MATRIX_N * MATRIX_K * 2) / wmmaTime / 1e9);
printf("cublas took %fms\n", cublasTime);
printf("[+] TFLOPS: %.2f\n", ((double)MATRIX_M * MATRIX_N * MATRIX_K * 2) / cublasTime / 1e9);
printf("\nFor a faster code using wmma you should check out the cudaTensorCoreGemm sample in the CUDA Toolkit.\nThis code was written as a demo only!\n\n");
}
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp32));
cudaErrCheck(cudaFree(b_fp32));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c));
cudaErrCheck(cudaFree(c_cublas));
cudaErrCheck(cudaFree(c_wmma));
free(c_host_cublas);
free(c_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
4d3b03e8797fdbc084b6921527be36c01e4dfb29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_4_left;
int xdim0_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_4_left;
int ydim0_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_4_left;
int xdim1_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_4_left;
int ydim1_update_halo_kernel2_xvel_minus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_minus_4_left * (y) + \
xdim0_update_halo_kernel2_xvel_minus_4_left * \
ydim0_update_halo_kernel2_xvel_minus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_minus_4_left * (y) + \
xdim1_update_halo_kernel2_xvel_minus_4_left * \
ydim1_update_halo_kernel2_xvel_minus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_minus_4_left_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = -xvel0[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = -xvel1[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_4_left(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_4_left *
ydim0_update_halo_kernel2_xvel_minus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_4_left *
ydim1_update_halo_kernel2_xvel_minus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 72))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(72, "update_halo_kernel2_xvel_minus_4_left");
OPS_kernels[72].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_left_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_left_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_left_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_left_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_minus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_minus_4_left_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_minus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_minus_4_left_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_minus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_minus_4_left_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_minus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_minus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[72].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_minus_4_left), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[72].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[72].mpi_time += t2 - t1;
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 4d3b03e8797fdbc084b6921527be36c01e4dfb29.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_minus_4_left;
int xdim0_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_minus_4_left;
int ydim0_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_minus_4_left;
int xdim1_update_halo_kernel2_xvel_minus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_minus_4_left;
int ydim1_update_halo_kernel2_xvel_minus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_minus_4_left * (y) + \
xdim0_update_halo_kernel2_xvel_minus_4_left * \
ydim0_update_halo_kernel2_xvel_minus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_minus_4_left * (y) + \
xdim1_update_halo_kernel2_xvel_minus_4_left * \
ydim1_update_halo_kernel2_xvel_minus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_minus_4_left_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = -xvel0[OPS_ACC0(4, 0, 0)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = -xvel1[OPS_ACC1(4, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_minus_4_left(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_minus_4_left *
ydim0_update_halo_kernel2_xvel_minus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_minus_4_left *
ydim1_update_halo_kernel2_xvel_minus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_minus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 72))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(72, "update_halo_kernel2_xvel_minus_4_left");
OPS_kernels[72].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_left_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_left_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_left_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_left_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_minus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_minus_4_left_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_minus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_minus_4_left_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_minus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_minus_4_left_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_minus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_minus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[72].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_minus_4_left<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[72].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[72].mpi_time += t2 - t1;
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
e6a836800c8fb4650b842e6788569f8bbb36dd3d.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere
architecture, most concept still holds. The two main differences are
1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see
include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere.
2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide
latency (see include/cutlass/gemm/threadblock/mma_multistage.h)
Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
hipError_t error = hipSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
float alpha;
float beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({5120, 4096, 4096}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "14_ampere_tf32_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/14_ampere_tf32_tensorop_gemm/14_ampere_tf32_tensorop_gemm --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = float; // <- data type of elements in input matrix A
using ElementInputB = float; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha);
ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d TF32 tensor op Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
| e6a836800c8fb4650b842e6788569f8bbb36dd3d.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere
architecture, most concept still holds. The two main differences are
1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see
include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere.
2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide
latency (see include/cutlass/gemm/threadblock/mma_multistage.h)
Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
float alpha;
float beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({5120, 4096, 4096}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "14_ampere_tf32_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --alpha=<f32> Epilogue scalar alpha\n"
<< " --beta=<f32> Epilogue scalar beta\n\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/14_ampere_tf32_tensorop_gemm/14_ampere_tf32_tensorop_gemm --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = float; // <- data type of elements in input matrix A
using ElementInputB = float; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha);
ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d TF32 tensor op Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
|
f3b113192219559aec2104ac124db104a4bca00d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_ele_scale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *m = NULL;
hipMalloc(&m, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
float scaler = 2;
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_ele_scale), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,scaler,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_ele_scale), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,scaler,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_ele_scale), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,scaler,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f3b113192219559aec2104ac124db104a4bca00d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_ele_scale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *m = NULL;
cudaMalloc(&m, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
float scaler = 2;
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_ele_scale<<<gridBlock,threadBlock>>>(m,target,scaler,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_ele_scale<<<gridBlock,threadBlock>>>(m,target,scaler,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_ele_scale<<<gridBlock,threadBlock>>>(m,target,scaler,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5546525d3f13ab8382ca813ce439db696aa248b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2013 Yangqing Jia
#include <algorithm>
#include <limits>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/vision_layers.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void DropoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
NeuronLayer<Dtype>::SetUp(bottom, top);
// Set up the cache for random number generation
rand_vec_.reset(new SyncedMemory(bottom[0]->count() * sizeof(int)));
threshold_ = this->layer_param_.dropout_ratio();
DCHECK(threshold_ > 0.);
DCHECK(threshold_ < 1.);
scale_ = 1. / (1. - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
};
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
int* mask = (int*)rand_vec_->mutable_cpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
//viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(),
// count, mask, 1. - threshold_);
caffe_vRngBernoulli<int>(count, mask, 1. - threshold_);
for (int i = 0; i < count; ++i) {
top_data[i] = bottom_data[i] * mask[i] * scale_;
}
} else {
memcpy(top_data, bottom_data, bottom[0]->count() * sizeof(Dtype));
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int* mask = (int*)(rand_vec_->cpu_data());
const int count = (*bottom)[0]->count();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
}
return Dtype(0);
}
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(),
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
// set thresholds
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, scale_,
top_data);
CUDA_POST_KERNEL_CHECK;
} else {
CUDA_CHECK(hipMemcpy(top_data, bottom_data,
count * sizeof(Dtype), hipMemcpyDeviceToDevice));
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data();
const int count = (*bottom)[0]->count();
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
return Dtype(0);
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe
| 5546525d3f13ab8382ca813ce439db696aa248b6.cu | // Copyright 2013 Yangqing Jia
#include <algorithm>
#include <limits>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/vision_layers.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void DropoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
NeuronLayer<Dtype>::SetUp(bottom, top);
// Set up the cache for random number generation
rand_vec_.reset(new SyncedMemory(bottom[0]->count() * sizeof(int)));
threshold_ = this->layer_param_.dropout_ratio();
DCHECK(threshold_ > 0.);
DCHECK(threshold_ < 1.);
scale_ = 1. / (1. - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
};
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
int* mask = (int*)rand_vec_->mutable_cpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
// Create random numbers
//viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(),
// count, mask, 1. - threshold_);
caffe_vRngBernoulli<int>(count, mask, 1. - threshold_);
for (int i = 0; i < count; ++i) {
top_data[i] = bottom_data[i] * mask[i] * scale_;
}
} else {
memcpy(top_data, bottom_data, bottom[0]->count() * sizeof(Dtype));
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
const int* mask = (int*)(rand_vec_->cpu_data());
const int count = (*bottom)[0]->count();
for (int i = 0; i < count; ++i) {
bottom_diff[i] = top_diff[i] * mask[i] * scale_;
}
}
return Dtype(0);
}
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
// set thresholds
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, scale_,
top_data);
CUDA_POST_KERNEL_CHECK;
} else {
CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
count * sizeof(Dtype), cudaMemcpyDeviceToDevice));
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
CHECK(Caffe::phase() == Caffe::TRAIN);
if (propagate_down) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data();
const int count = (*bottom)[0]->count();
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
return Dtype(0);
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe
|
af7e9c851e9e2f68eeefdf79c4700fb21b7920a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/greater_equal.hpp"
#include "complex.hpp"
__device__ bool operator>=(hipComplex lhs, hipComplex rhs){
return lhs.x > rhs.x || (lhs.x == rhs.x && lhs.y >= rhs.y);
}
__device__ bool operator>=(hipDoubleComplex lhs, hipDoubleComplex rhs){
return lhs.x > rhs.x || (lhs.x == rhs.x && lhs.y >= rhs.y);
}
template <typename T>
__global__ void greater_equal_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] >= b[incb * index];
}
}
template <typename T>
void greater_equal_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, greater_equal_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( greater_equal_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
void egblas_sgreater_equal(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dgreater_equal(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cgreater_equal(size_t n, const hipComplex* a, size_t inca, const hipComplex* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zgreater_equal(size_t n, const hipDoubleComplex* a, size_t inca, const hipDoubleComplex* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
| af7e9c851e9e2f68eeefdf79c4700fb21b7920a6.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/greater_equal.hpp"
#include "complex.hpp"
__device__ bool operator>=(cuComplex lhs, cuComplex rhs){
return lhs.x > rhs.x || (lhs.x == rhs.x && lhs.y >= rhs.y);
}
__device__ bool operator>=(cuDoubleComplex lhs, cuDoubleComplex rhs){
return lhs.x > rhs.x || (lhs.x == rhs.x && lhs.y >= rhs.y);
}
template <typename T>
__global__ void greater_equal_kernel(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = a[inca * index] >= b[incb * index];
}
}
template <typename T>
void greater_equal_kernel_run(size_t n, const T* a, size_t inca, const T* b, size_t incb, bool* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, greater_equal_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
greater_equal_kernel<T><<<gridSize, blockSize>>>(n, a, inca, b, incb, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
void egblas_sgreater_equal(size_t n, const float* a, size_t inca, const float* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_dgreater_equal(size_t n, const double* a, size_t inca, const double* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_cgreater_equal(size_t n, const cuComplex* a, size_t inca, const cuComplex* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
void egblas_zgreater_equal(size_t n, const cuDoubleComplex* a, size_t inca, const cuDoubleComplex* b, size_t incb, bool* y, size_t incy) {
greater_equal_kernel_run(n, a, inca, b, incb, y, incy);
}
|
f0228d6e9e3d227f2eb5cdca0aaf2b4702f2b43e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright [2016] <Contributors>
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \
* bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \
* bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\
+ (l+s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight, int bottomchannels,
int bottomcount, int pad_size, const Dtype *bottom0,
const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\
* bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_, int top_width_, int pad_size_,
bool is_multiply, int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_, hipStream_t stream,
hipStream_t stream_tmp1, hipStream_t stream_tmp2) {
const Dtype *bottom_data1 = data1.dptr_;
const Dtype *bottom_data2 = data2.dptr_;
Dtype *rbot1 = tmp1.dptr_;
Dtype *rbot2 = tmp2.dptr_;
Dtype *top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1,
bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2,
bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
hipLaunchKernelGGL(( CorrelateData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock),
shared_memory_per_block * sizeof(Dtype), stream,
topThreadCount,
num, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size_,
stride1_, stride2_,
width, height, channels,
rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\
/ kMaxThreadsPerBlock;
hipLaunchKernelGGL(( CorrelateDataSubtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream,
topThreadCount,
num, n, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, width, height, channels, rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_,
hipStream_t stream0, hipStream_t stream1,
int num, int channels, int height, int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = \
(static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\
/ static_cast<float>(stride1_))) + 1) * top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward0<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot2, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
hipLaunchKernelGGL(( CorrelateDataBackward1<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
hipLaunchKernelGGL(( CorrelateDataBackward0Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot1, rbot2, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
hipLaunchKernelGGL(( CorrelateDataBackward1Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1,
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, rbot2, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(hipPeekAtLastError());
}
}
}
} // namespace cuda
template<typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_
) {
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_,
top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_,
neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, stream, stream_tmp1, stream_tmp2);
}
template<typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_,
int stride2_, int num, int channels, int height, int width
) {
hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_,
top_height_, top_width_, pad_size_, is_multiply,
max_displacement_, kernel_size_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, stride1_, stride2_,
stream0, stream1, num, channels, height, width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CorrelationParam param) {
return new CorrelationOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| f0228d6e9e3d227f2eb5cdca0aaf2b4702f2b43e.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright [2016] <Contributors>
* \file Correation.cu
* \brief Correlation operator
* \author Xu Dong
*/
#include "./correlation-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#define ROUND_OFF 50000
#define WARPS_PER_BLOCK 1
#define THREADS_PER_WARP 32
#define CORRELATION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// == Correlation Kernel
template <typename Dtype>
__global__ void CorrelateData(const int nthreads, int num, int topwidth,
int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
extern __shared__ char patch_data_char[];
Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char);
// First (upper left) position of kernel upper-left corner
// in current center position of neighborhood in image 1
int x1 = blockIdx.x * stride1 + max_displacement;
int y1 = blockIdx.y * stride1 + max_displacement;
int item = blockIdx.z;
int ch_off = threadIdx.x;
// Load 3D patch into shared shared memory
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch;
int idxPatchData = ji_off + ch;
patch_data[idxPatchData] = bottom0[idx1];
}
}
}
__syncthreads();
__shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK];
// Compute correlation
for (int top_channel = 0; top_channel < topchannels; top_channel++) {
sum[ch_off] = 0;
int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
for (int j = 0; j < kernel_size; j++) { // HEIGHT
for (int i = 0; i < kernel_size; i++) { // WIDTH
int ji_off = ((j * kernel_size) + i) * bottomchannels;
for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) {
// CHANNELS
int x2 = x1 + s2o;
int y2 = y1 + s2p;
int idxPatchData = ji_off + ch;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch;
sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2];
}
}
}
__syncthreads();
if (ch_off == 0) {
Dtype total_sum = 0;
for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) {
total_sum += sum[idx];
}
const int sumelems = kernel_size * kernel_size * bottomchannels;
const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x;
top[index + item*topcount] = total_sum / static_cast<float>(sumelems);
} // Aggregate result of different threads
}
}
// == Correlation Backward Pass Kernel (For data1)
template <typename Dtype>
__global__ void CorrelateDataBackward0(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\
* bottomchannels + n;
Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot1tmp;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1(const int nthreads,
int num, int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) \
* pbottomwidth + (l - s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n]
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * bot0tmp;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Kernel Subtraction
template <typename Dtype>
__global__ void CorrelateDataSubtract(const int nthreads, int num, int item,
int topwidth, int topheight, int topchannels, int topcount,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius, int stride1, int stride2,
int bottomwidth, int bottomheight, int bottomchannels,
const Dtype *bottom0, const Dtype *bottom1, Dtype *top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int x = index % topwidth; // w-pos
int y = (index / topwidth) % topheight; // h-pos
int c = (index / topwidth / topheight) % topchannels; // channels
// Offset of patch in image 2
int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2;
int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2;
// First (upper left) position of kernel center in current neighborhood in image 1
int x1 = x*stride1 + kernel_radius + max_displacement;
int y1 = y*stride1 + kernel_radius + max_displacement;
// Iterate through 3D patch
Dtype sum = 0;
for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT
for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH
for (int l = 0; l < bottomchannels; l++) { // CHANNELS
// Calculate position in image 2
int x2 = x1 + s2o;
int y2 = y1 + s2p;
// Indices in bottom data: (CH=l,W=x2,H=y2,N)
int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \
* bottomchannels + l;
int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \
* bottomchannels + l;
// Do the correlation:
sum += fabsf(bottom0[idx1] - bottom1[idx2]);
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels;
top[index + item * topcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 0)
template <typename Dtype>
__global__ void CorrelateDataBackward0Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight,
int bottomchannels, int bottomcount, int pad_size,
Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// Get X,Y ranges and clamp
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\
/ stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1
// Same here:
int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement) / stride1
int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement) / stride1
Dtype sum = 0;
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
// Get bottom1 data:
int s2o = stride2 * o;
int s2p = stride2 * p;
int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\
+ (l+s2o)) * bottomchannels + n;
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\
+ (o + neighborhood_grid_radius); // index [o,p]
int idxopoffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels;
const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size);
bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Correlation Backward Pass Kernel (For Blob 1)
template <typename Dtype>
__global__ void CorrelateDataBackward1Subtract(const int nthreads, int num,
int item, int topwidth, int topheight, int topchannels,
int max_displacement, int neighborhood_grid_radius,
int neighborhood_grid_width, int kernel_radius,
int stride1, int stride2, int bottomwidth, int bottomheight,
int pbottomwidth, int pbottomheight, int bottomchannels,
int bottomcount, int pad_size, const Dtype *bottom0,
const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// int l = index % bottomwidth + pad_size; //w-pos
// int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos
// int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels
int n = index % bottomchannels; // channels
int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos
int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos
// round_off is a trick to enable integer division with ceil, even for negative numbers
// We use a large offset, for the inner part not to become negative.
const int round_off = ROUND_OFF;
const int round_off_s1 = stride1 * round_off;
Dtype sum = 0;
int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\
* bottomchannels + n;
for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) {
for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) {
int s2o = stride2 * o;
int s2p = stride2 * p;
// Get X,Y ranges and clamp
// We add round_off before_s1 the int division and subtract round_off after it,
// to ensure the formula matches ceil behavior:
int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\
/ stride1 + 1 - round_off;
// ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1
// Same here:
int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off;
// floor (l - max_displacement - s2o) / stride1
int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off;
// floor (m - max_displacement - s2p) / stride1
if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) {
xmin = max(0, xmin);
xmax = min(topwidth-1, xmax);
ymin = max(0, ymin);
ymax = min(topheight-1, ymax);
// Get bottom0 data:
int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\
* bottomchannels + n;
// bottom0[l+s2o,m+s2p,n]
Dtype bot0tmp = bottom0[idxbot0];
Dtype bot1tmp = bottom1[idxbot1];
Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0);
// Index offset for topdiff in following loops:
int op = (p+neighborhood_grid_radius) * \
neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p]
int idxOpOffset = (item * topchannels + op);
for (int y = ymin; y <= ymax; y++) {
for (int x = xmin; x <= xmax; x++) {
int idxtopdiff = (idxOpOffset * topheight + y)\
* topwidth + x; // topdiff[x,y,o,p]
sum += topdiff[idxtopdiff] * sign;
}
}
}
}
}
const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels;
const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size);
bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems);
}
}
// == Forward
// == Dimension rearrangement Kernel
template <typename Dtype>
__global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num,
int channels, int width, int height, int widthheight, int padding, int pwidthheight) {
// change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel]
int xy = blockIdx.x * blockDim.x + threadIdx.x;
if (xy >= widthheight )
return;
int ch = blockIdx.y;
int n = blockIdx.z;
Dtype value = in[(n * channels + ch) * widthheight + xy];
__syncthreads();
int xpad = (xy % width + padding);
int ypad = (xy / width + padding);
int xypad = ypad * (width + 2 * padding) + xpad;
out[(n * pwidthheight + xypad) * channels + ch] = value;
}
template <typename Dtype>
void Forward_gpu(
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_, int top_width_, int pad_size_,
bool is_multiply, int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream,
cudaStream_t stream_tmp1, cudaStream_t stream_tmp2) {
const Dtype *bottom_data1 = data1.dptr_;
const Dtype *bottom_data2 = data2.dptr_;
Dtype *rbot1 = tmp1.dptr_;
Dtype *rbot2 = tmp2.dptr_;
Dtype *top = out.dptr_;
const int bnum = data1.size(0);
const int bchannels = data1.size(1);
const int bheight = data1.size(2);
const int bwidth = data1.size(3);
const int bwidthheight = bwidth * bheight;
const int topcount = top_width_ * top_height_ * top_channels_;
dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK);
int threads_per_block = 16;
dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum);
const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>>
(bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>>
(bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight);
const int num = bnum;
const int channels = bchannels;
const int height = bheight + 2 * pad_size_;
const int width = bwidth + 2 * pad_size_;
const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels;
if (is_multiply == true) {
// CorrelationLayer
int topThreadCount = topcount;
dim3 totalBlocksCorr(top_width_, top_height_, num);
CorrelateData<Dtype><<<totalBlocksCorr, threadsPerBlock,
shared_memory_per_block * sizeof(Dtype), stream>>>(
topThreadCount,
num, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, kernel_size_,
stride1_, stride2_,
width, height, channels,
rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
} else {
// CorrelationLayer
for (int n = 0; n < num; n++) {
int topThreadCount = topcount;
const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\
/ kMaxThreadsPerBlock;
CorrelateDataSubtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream>>>(
topThreadCount,
num, n, top_width_, top_height_, top_channels_, topcount,
max_displacement_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, width, height, channels, rbot1, rbot2, top);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
}
}
template <typename Dtype>
void Backward_gpu(
const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_,
cudaStream_t stream0, cudaStream_t stream1,
int num, int channels, int height, int width) {
// Get top diff, compute bottom diff
const Dtype* top_diff = out_grad.dptr_;
Dtype* bottom0_diff = in_grad1.dptr_;
Dtype* bottom1_diff = in_grad2.dptr_;
const Dtype* rbot1 = tmp1.dptr_;
const Dtype* rbot2 = tmp2.dptr_;
const int paddedheight = height + 2 * pad_size_;
const int paddedwidth = width + 2 * pad_size_;
const int bottomcount = channels * height * width;
int botThreadCount = bottomcount;
const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
// CorrelationLayerBackward
if (is_multiply == true) {
// == Run kernel Backward 0
dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest
const int buffer_size_backw0 = \
(static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\
/ static_cast<float>(stride1_))) + 1) * top_channels_;
// == Run kernel Backward 0
for (int n = 0; n < num; n++) {
CorrelateDataBackward0<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot2, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
// == Run kernel Backward 1
for (int n = 0; n < num; n++) {
CorrelateDataBackward1<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
} else {
for (int n = 0; n < num; n++) {
// Bottom0:
CorrelateDataBackward0Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
bottom0_diff, rbot1, rbot2, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
for (int n = 0; n < num; n++) {
// Bottom1:
CorrelateDataBackward1Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>(
botThreadCount,
num, n, top_width_, top_height_, top_channels_,
max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_,
width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_,
rbot1, rbot2, bottom1_diff, top_diff);
CORRELATION_CUDA_CHECK(cudaPeekAtLastError());
}
}
}
} // namespace cuda
template<typename Dtype>
inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data1,
const Tensor<gpu, 4, Dtype> &data2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_, int stride2_
) {
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_);
cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_);
cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_,
top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_,
neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_,
stride1_, stride2_, stream, stream_tmp1, stream_tmp2);
}
template<typename Dtype>
inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad,
const Tensor<gpu, 4, Dtype> &in_grad1,
const Tensor<gpu, 4, Dtype> &in_grad2,
const Tensor<gpu, 4, Dtype> &tmp1,
const Tensor<gpu, 4, Dtype> &tmp2,
int top_channels_, int top_height_,
int top_width_, int pad_size_, bool is_multiply,
int max_displacement_, int kernel_size_,
int neighborhood_grid_radius_, int neighborhood_grid_width_,
int kernel_radius_, int stride1_,
int stride2_, int num, int channels, int height, int width
) {
cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_);
cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_);
cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_,
top_height_, top_width_, pad_size_, is_multiply,
max_displacement_, kernel_size_, neighborhood_grid_radius_,
neighborhood_grid_width_, kernel_radius_, stride1_, stride2_,
stream0, stream1, num, channels, height, width);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CorrelationParam param) {
return new CorrelationOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
1df3b969c0eaaf32a5b926112e218b7c121ea221.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GROUP_SIZE 256
#define BUFFER_GROUPS 2
#define BUFFER_SIZE BUFFER_GROUPS*GROUP_SIZE
#define WARP_SIZE 32
#define INVALID 0xFFFF
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList, real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
pos.x -= floor(pos.x*invPeriodicBoxSize.x)*periodicBoxSize.x;
pos.y -= floor(pos.y*invPeriodicBoxSize.y)*periodicBoxSize.y;
pos.z -= floor(pos.z*invPeriodicBoxSize.z)*periodicBoxSize.z;
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
pos.x -= floor((pos.x-center.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-center.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-center.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = false;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
}
}
/**
* Perform a parallel prefix sum over an array. The input values are all assumed to be 0 or 1.
*/
__device__ void prefixSum(short* sum, ushort2* temp) {
#if __CUDA_ARCH__ >= 300
const int indexInWarp = threadIdx.x%WARP_SIZE;
const int warpMask = (2<<indexInWarp)-1;
for (int base = 0; base < BUFFER_SIZE; base += blockDim.x)
temp[base+threadIdx.x].x = __popc(__ballot(sum[base+threadIdx.x])&warpMask);
__syncthreads();
if (threadIdx.x < BUFFER_SIZE/WARP_SIZE) {
int multiWarpSum = temp[(threadIdx.x+1)*WARP_SIZE-1].x;
for (int offset = 1; offset < BUFFER_SIZE/WARP_SIZE; offset *= 2) {
short n = __shfl_up(multiWarpSum, offset, WARP_SIZE);
if (indexInWarp >= offset)
multiWarpSum += n;
}
temp[threadIdx.x].y = multiWarpSum;
}
__syncthreads();
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].x+(i < WARP_SIZE ? 0 : temp[i/WARP_SIZE-1].y);
__syncthreads();
#else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].x = sum[i];
__syncthreads();
int whichBuffer = 0;
for (int offset = 1; offset < BUFFER_SIZE; offset *= 2) {
if (whichBuffer == 0)
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].y = (i < offset ? temp[i].x : temp[i].x+temp[i-offset].x);
else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].x = (i < offset ? temp[i].y : temp[i].y+temp[i-offset].y);
whichBuffer = 1-whichBuffer;
__syncthreads();
}
if (whichBuffer == 0)
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].x;
else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].y;
__syncthreads();
#endif
}
/**
* This is called by findBlocksWithInteractions(). It compacts the list of blocks, identifies interactions
* in them, and writes the result to global memory.
*/
__device__ void storeInteractionData(int x, unsigned short* buffer, short* sum, ushort2* temp, int* atoms, int& numAtoms,
int& baseIndex, unsigned int* interactionCount, int* interactingTiles, unsigned int* interactingAtoms, real4 periodicBoxSize,
real4 invPeriodicBoxSize, const real4* posq, real3* posBuffer, real4 blockCenterX, real4 blockSizeX, unsigned int maxTiles, bool finish) {
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (threadIdx.x < TILE_SIZE) {
real3 pos = trimTo3(posq[x*TILE_SIZE+threadIdx.x]);
posBuffer[threadIdx.x] = pos;
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
pos.x -= floor((pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
posBuffer[threadIdx.x] = pos;
}
#endif
}
// The buffer is full, so we need to compact it and write out results. Start by doing a parallel prefix sum.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = (buffer[i] == INVALID ? 0 : 1);
__syncthreads();
prefixSum(sum, temp);
int numValid = sum[BUFFER_SIZE-1];
// Compact the buffer.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
if (buffer[i] != INVALID)
temp[sum[i]-1].x = buffer[i];
__syncthreads();
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
buffer[i] = temp[i].x;
__syncthreads();
// Loop over the tiles and find specific interactions in them.
const int indexInWarp = threadIdx.x%WARP_SIZE;
for (int base = 0; base < numValid; base += BUFFER_SIZE/WARP_SIZE) {
for (int i = threadIdx.x/WARP_SIZE; i < BUFFER_SIZE/WARP_SIZE && base+i < numValid; i += GROUP_SIZE/WARP_SIZE) {
// Check each atom in block Y for interactions.
real3 pos = trimTo3(posq[buffer[base+i]*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
pos.x -= floor((pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
}
#endif
bool interacts = false;
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos-posBuffer[j];
delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
}
else {
#endif
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos-posBuffer[j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
#ifdef USE_PERIODIC
}
#endif
sum[i*WARP_SIZE+indexInWarp] = (interacts ? 1 : 0);
}
for (int i = numValid-base+threadIdx.x/WARP_SIZE; i < BUFFER_SIZE/WARP_SIZE; i += GROUP_SIZE/WARP_SIZE)
sum[i*WARP_SIZE+indexInWarp] = 0;
// Compact the list of atoms.
__syncthreads();
prefixSum(sum, temp);
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
if (sum[i] != (i == 0 ? 0 : sum[i-1]))
atoms[numAtoms+sum[i]-1] = buffer[base+i/WARP_SIZE]*TILE_SIZE+indexInWarp;
// Store them to global memory.
int atomsToStore = numAtoms+sum[BUFFER_SIZE-1];
bool storePartialTile = (finish && base >= numValid-BUFFER_SIZE/WARP_SIZE);
int tilesToStore = (storePartialTile ? (atomsToStore+TILE_SIZE-1)/TILE_SIZE : atomsToStore/TILE_SIZE);
if (tilesToStore > 0) {
if (threadIdx.x == 0)
baseIndex = atomicAdd(interactionCount, tilesToStore);
__syncthreads();
if (threadIdx.x == 0)
numAtoms = atomsToStore-tilesToStore*TILE_SIZE;
if (baseIndex+tilesToStore <= maxTiles) {
if (threadIdx.x < tilesToStore)
interactingTiles[baseIndex+threadIdx.x] = x;
for (int i = threadIdx.x; i < tilesToStore*TILE_SIZE; i += blockDim.x)
interactingAtoms[baseIndex*TILE_SIZE+i] = (i < atomsToStore ? atoms[i] : NUM_ATOMS);
}
}
else {
__syncthreads();
if (threadIdx.x == 0)
numAtoms += sum[BUFFER_SIZE-1];
}
__syncthreads();
if (threadIdx.x < numAtoms && !storePartialTile)
atoms[threadIdx.x] = atoms[tilesToStore*TILE_SIZE+threadIdx.x];
}
if (numValid == 0 && numAtoms > 0 && finish) {
// We didn't have any more tiles to process, but there were some atoms left over from a
// previous call to this function. Save them now.
if (threadIdx.x == 0)
baseIndex = atomicAdd(interactionCount, 1);
__syncthreads();
if (baseIndex < maxTiles) {
if (threadIdx.x == 0)
interactingTiles[baseIndex] = x;
if (threadIdx.x < TILE_SIZE)
interactingAtoms[baseIndex*TILE_SIZE+threadIdx.x] = (threadIdx.x < numAtoms ? atoms[threadIdx.x] : NUM_ATOMS);
}
}
// Reset the buffer for processing more tiles.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
buffer[i] = INVALID;
__syncthreads();
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grain atomblock against interacting atomblock neighbourlist is constructed.
*
* Each threadblock first loads in some block X of interest. Each thread within the threadblock then loads
* in a different atomblock Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atomblocks are within the cutoff distance, then the two atomblocks are considered to be
* interacting and Y is added to the buffer for X. If during any given iteration an atomblock (or thread)
* finds BUFFER_GROUP interacting blocks, the entire buffer is sent for compaction by storeInteractionData().
*
* STAGE 2:
*
* A fine grain atomblock against interacting atoms neighbourlist is constructed.
*
* The input is an atomblock list detailing the interactions with other atomblocks. The list of interacting
* atom blocks are initially stored in the buffer array in shared memory. buffer is then compacted using
* prefixSum. Afterwards, each threadblock processes one contiguous atomblock X. Each warp in a threadblock
* processes a block Y to find the atoms that interact with any given atom in X. Once BUFFER_SIZE/WARP_SIZE
* (eg. 16) atomblocks have been processed for a given X, the list of interacting atoms in these 16 blocks
* are subsequently compacted. The process repeats until all atomblocks that interact with X are computed.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int* __restrict__ interactionCount,
int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int startBlockIndex,
unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter, const real4* __restrict__ sortedBlockBoundingBox,
const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices, real4* __restrict__ oldPositions,
const int* __restrict__ rebuildNeighborList) {
__shared__ unsigned short buffer[BUFFER_SIZE];
__shared__ short sum[BUFFER_SIZE];
__shared__ ushort2 temp[BUFFER_SIZE];
__shared__ int atoms[BUFFER_SIZE+TILE_SIZE];
__shared__ real3 posBuffer[TILE_SIZE];
__shared__ int exclusionsForX[MAX_EXCLUSIONS];
__shared__ int bufferFull;
__shared__ int globalIndex;
__shared__ int numAtoms;
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
int valuesInBuffer = 0;
if (threadIdx.x == 0)
bufferFull = false;
for (int i = 0; i < BUFFER_GROUPS; ++i)
buffer[i*GROUP_SIZE+threadIdx.x] = INVALID;
__syncthreads();
// Loop over blocks sorted by size.
for (int i = startBlockIndex+blockIdx.x; i < startBlockIndex+numBlocks; i += gridDim.x) {
if (threadIdx.x == blockDim.x-1)
numAtoms = 0;
real2 sortedKey = sortedBlocks[i];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[i];
real4 blockSizeX = sortedBlockBoundingBox[i];
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = threadIdx.x; j < numExclusions; j += blockDim.x)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
__syncthreads();
// Compare it to other blocks after this one in sorted order.
for (int base = i+1; base < NUM_BLOCKS; base += blockDim.x) {
int j = base+threadIdx.x;
real2 sortedKey2 = (j < NUM_BLOCKS ? sortedBlocks[j] : make_real2(0));
real4 blockCenterY = (j < NUM_BLOCKS ? sortedBlockCenter[j] : make_real4(0));
real4 blockSizeY = (j < NUM_BLOCKS ? sortedBlockBoundingBox[j] : make_real4(0));
unsigned short y = (unsigned short) sortedKey2.y;
real4 delta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
delta.x = max(0.0f, fabs(delta.x)-blockSizeX.x-blockSizeY.x);
delta.y = max(0.0f, fabs(delta.y)-blockSizeX.y-blockSizeY.y);
delta.z = max(0.0f, fabs(delta.z)-blockSizeX.z-blockSizeY.z);
bool hasExclusions = false;
for (int k = 0; k < numExclusions; k++)
hasExclusions |= (exclusionsForX[k] == y);
if (j < NUM_BLOCKS && delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED && !hasExclusions) {
// Add this tile to the buffer.
int bufferIndex = valuesInBuffer*GROUP_SIZE+threadIdx.x;
buffer[bufferIndex] = y;
valuesInBuffer++;
// cuda-memcheck --tool racecheck will throw errors about this as
// RAW/WAW/WAR race condition errors. But this is safe in all instances
if (!bufferFull && valuesInBuffer == BUFFER_GROUPS)
bufferFull = true;
}
__syncthreads();
if (bufferFull) {
storeInteractionData(x, buffer, sum, temp, atoms, numAtoms, globalIndex, interactionCount, interactingTiles, interactingAtoms, periodicBoxSize, invPeriodicBoxSize, posq, posBuffer, blockCenterX, blockSizeX, maxTiles, false);
valuesInBuffer = 0;
if (threadIdx.x == 0)
bufferFull = false;
}
__syncthreads();
}
storeInteractionData(x, buffer, sum, temp, atoms, numAtoms, globalIndex, interactionCount, interactingTiles, interactingAtoms, periodicBoxSize, invPeriodicBoxSize, posq, posBuffer, blockCenterX, blockSizeX, maxTiles, true);
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
| 1df3b969c0eaaf32a5b926112e218b7c121ea221.cu | #define GROUP_SIZE 256
#define BUFFER_GROUPS 2
#define BUFFER_SIZE BUFFER_GROUPS*GROUP_SIZE
#define WARP_SIZE 32
#define INVALID 0xFFFF
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList, real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
pos.x -= floor(pos.x*invPeriodicBoxSize.x)*periodicBoxSize.x;
pos.y -= floor(pos.y*invPeriodicBoxSize.y)*periodicBoxSize.y;
pos.z -= floor(pos.z*invPeriodicBoxSize.z)*periodicBoxSize.z;
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
pos.x -= floor((pos.x-center.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-center.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-center.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = false;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
}
}
/**
* Perform a parallel prefix sum over an array. The input values are all assumed to be 0 or 1.
*/
__device__ void prefixSum(short* sum, ushort2* temp) {
#if __CUDA_ARCH__ >= 300
const int indexInWarp = threadIdx.x%WARP_SIZE;
const int warpMask = (2<<indexInWarp)-1;
for (int base = 0; base < BUFFER_SIZE; base += blockDim.x)
temp[base+threadIdx.x].x = __popc(__ballot(sum[base+threadIdx.x])&warpMask);
__syncthreads();
if (threadIdx.x < BUFFER_SIZE/WARP_SIZE) {
int multiWarpSum = temp[(threadIdx.x+1)*WARP_SIZE-1].x;
for (int offset = 1; offset < BUFFER_SIZE/WARP_SIZE; offset *= 2) {
short n = __shfl_up(multiWarpSum, offset, WARP_SIZE);
if (indexInWarp >= offset)
multiWarpSum += n;
}
temp[threadIdx.x].y = multiWarpSum;
}
__syncthreads();
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].x+(i < WARP_SIZE ? 0 : temp[i/WARP_SIZE-1].y);
__syncthreads();
#else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].x = sum[i];
__syncthreads();
int whichBuffer = 0;
for (int offset = 1; offset < BUFFER_SIZE; offset *= 2) {
if (whichBuffer == 0)
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].y = (i < offset ? temp[i].x : temp[i].x+temp[i-offset].x);
else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
temp[i].x = (i < offset ? temp[i].y : temp[i].y+temp[i-offset].y);
whichBuffer = 1-whichBuffer;
__syncthreads();
}
if (whichBuffer == 0)
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].x;
else
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = temp[i].y;
__syncthreads();
#endif
}
/**
* This is called by findBlocksWithInteractions(). It compacts the list of blocks, identifies interactions
* in them, and writes the result to global memory.
*/
__device__ void storeInteractionData(int x, unsigned short* buffer, short* sum, ushort2* temp, int* atoms, int& numAtoms,
int& baseIndex, unsigned int* interactionCount, int* interactingTiles, unsigned int* interactingAtoms, real4 periodicBoxSize,
real4 invPeriodicBoxSize, const real4* posq, real3* posBuffer, real4 blockCenterX, real4 blockSizeX, unsigned int maxTiles, bool finish) {
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (threadIdx.x < TILE_SIZE) {
real3 pos = trimTo3(posq[x*TILE_SIZE+threadIdx.x]);
posBuffer[threadIdx.x] = pos;
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
pos.x -= floor((pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
posBuffer[threadIdx.x] = pos;
}
#endif
}
// The buffer is full, so we need to compact it and write out results. Start by doing a parallel prefix sum.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
sum[i] = (buffer[i] == INVALID ? 0 : 1);
__syncthreads();
prefixSum(sum, temp);
int numValid = sum[BUFFER_SIZE-1];
// Compact the buffer.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
if (buffer[i] != INVALID)
temp[sum[i]-1].x = buffer[i];
__syncthreads();
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
buffer[i] = temp[i].x;
__syncthreads();
// Loop over the tiles and find specific interactions in them.
const int indexInWarp = threadIdx.x%WARP_SIZE;
for (int base = 0; base < numValid; base += BUFFER_SIZE/WARP_SIZE) {
for (int i = threadIdx.x/WARP_SIZE; i < BUFFER_SIZE/WARP_SIZE && base+i < numValid; i += GROUP_SIZE/WARP_SIZE) {
// Check each atom in block Y for interactions.
real3 pos = trimTo3(posq[buffer[base+i]*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
pos.x -= floor((pos.x-blockCenterX.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-blockCenterX.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-blockCenterX.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
}
#endif
bool interacts = false;
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos-posBuffer[j];
delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
}
else {
#endif
for (int j = 0; j < TILE_SIZE; j++) {
real3 delta = pos-posBuffer[j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED);
}
#ifdef USE_PERIODIC
}
#endif
sum[i*WARP_SIZE+indexInWarp] = (interacts ? 1 : 0);
}
for (int i = numValid-base+threadIdx.x/WARP_SIZE; i < BUFFER_SIZE/WARP_SIZE; i += GROUP_SIZE/WARP_SIZE)
sum[i*WARP_SIZE+indexInWarp] = 0;
// Compact the list of atoms.
__syncthreads();
prefixSum(sum, temp);
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
if (sum[i] != (i == 0 ? 0 : sum[i-1]))
atoms[numAtoms+sum[i]-1] = buffer[base+i/WARP_SIZE]*TILE_SIZE+indexInWarp;
// Store them to global memory.
int atomsToStore = numAtoms+sum[BUFFER_SIZE-1];
bool storePartialTile = (finish && base >= numValid-BUFFER_SIZE/WARP_SIZE);
int tilesToStore = (storePartialTile ? (atomsToStore+TILE_SIZE-1)/TILE_SIZE : atomsToStore/TILE_SIZE);
if (tilesToStore > 0) {
if (threadIdx.x == 0)
baseIndex = atomicAdd(interactionCount, tilesToStore);
__syncthreads();
if (threadIdx.x == 0)
numAtoms = atomsToStore-tilesToStore*TILE_SIZE;
if (baseIndex+tilesToStore <= maxTiles) {
if (threadIdx.x < tilesToStore)
interactingTiles[baseIndex+threadIdx.x] = x;
for (int i = threadIdx.x; i < tilesToStore*TILE_SIZE; i += blockDim.x)
interactingAtoms[baseIndex*TILE_SIZE+i] = (i < atomsToStore ? atoms[i] : NUM_ATOMS);
}
}
else {
__syncthreads();
if (threadIdx.x == 0)
numAtoms += sum[BUFFER_SIZE-1];
}
__syncthreads();
if (threadIdx.x < numAtoms && !storePartialTile)
atoms[threadIdx.x] = atoms[tilesToStore*TILE_SIZE+threadIdx.x];
}
if (numValid == 0 && numAtoms > 0 && finish) {
// We didn't have any more tiles to process, but there were some atoms left over from a
// previous call to this function. Save them now.
if (threadIdx.x == 0)
baseIndex = atomicAdd(interactionCount, 1);
__syncthreads();
if (baseIndex < maxTiles) {
if (threadIdx.x == 0)
interactingTiles[baseIndex] = x;
if (threadIdx.x < TILE_SIZE)
interactingAtoms[baseIndex*TILE_SIZE+threadIdx.x] = (threadIdx.x < numAtoms ? atoms[threadIdx.x] : NUM_ATOMS);
}
}
// Reset the buffer for processing more tiles.
for (int i = threadIdx.x; i < BUFFER_SIZE; i += blockDim.x)
buffer[i] = INVALID;
__syncthreads();
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grain atomblock against interacting atomblock neighbourlist is constructed.
*
* Each threadblock first loads in some block X of interest. Each thread within the threadblock then loads
* in a different atomblock Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atomblocks are within the cutoff distance, then the two atomblocks are considered to be
* interacting and Y is added to the buffer for X. If during any given iteration an atomblock (or thread)
* finds BUFFER_GROUP interacting blocks, the entire buffer is sent for compaction by storeInteractionData().
*
* STAGE 2:
*
* A fine grain atomblock against interacting atoms neighbourlist is constructed.
*
* The input is an atomblock list detailing the interactions with other atomblocks. The list of interacting
* atom blocks are initially stored in the buffer array in shared memory. buffer is then compacted using
* prefixSum. Afterwards, each threadblock processes one contiguous atomblock X. Each warp in a threadblock
* processes a block Y to find the atoms that interact with any given atom in X. Once BUFFER_SIZE/WARP_SIZE
* (eg. 16) atomblocks have been processed for a given X, the list of interacting atoms in these 16 blocks
* are subsequently compacted. The process repeats until all atomblocks that interact with X are computed.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, unsigned int* __restrict__ interactionCount,
int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int startBlockIndex,
unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter, const real4* __restrict__ sortedBlockBoundingBox,
const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices, real4* __restrict__ oldPositions,
const int* __restrict__ rebuildNeighborList) {
__shared__ unsigned short buffer[BUFFER_SIZE];
__shared__ short sum[BUFFER_SIZE];
__shared__ ushort2 temp[BUFFER_SIZE];
__shared__ int atoms[BUFFER_SIZE+TILE_SIZE];
__shared__ real3 posBuffer[TILE_SIZE];
__shared__ int exclusionsForX[MAX_EXCLUSIONS];
__shared__ int bufferFull;
__shared__ int globalIndex;
__shared__ int numAtoms;
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
int valuesInBuffer = 0;
if (threadIdx.x == 0)
bufferFull = false;
for (int i = 0; i < BUFFER_GROUPS; ++i)
buffer[i*GROUP_SIZE+threadIdx.x] = INVALID;
__syncthreads();
// Loop over blocks sorted by size.
for (int i = startBlockIndex+blockIdx.x; i < startBlockIndex+numBlocks; i += gridDim.x) {
if (threadIdx.x == blockDim.x-1)
numAtoms = 0;
real2 sortedKey = sortedBlocks[i];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[i];
real4 blockSizeX = sortedBlockBoundingBox[i];
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = threadIdx.x; j < numExclusions; j += blockDim.x)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
__syncthreads();
// Compare it to other blocks after this one in sorted order.
for (int base = i+1; base < NUM_BLOCKS; base += blockDim.x) {
int j = base+threadIdx.x;
real2 sortedKey2 = (j < NUM_BLOCKS ? sortedBlocks[j] : make_real2(0));
real4 blockCenterY = (j < NUM_BLOCKS ? sortedBlockCenter[j] : make_real4(0));
real4 blockSizeY = (j < NUM_BLOCKS ? sortedBlockBoundingBox[j] : make_real4(0));
unsigned short y = (unsigned short) sortedKey2.y;
real4 delta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
delta.x -= floor(delta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
delta.y -= floor(delta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
delta.z -= floor(delta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
delta.x = max(0.0f, fabs(delta.x)-blockSizeX.x-blockSizeY.x);
delta.y = max(0.0f, fabs(delta.y)-blockSizeX.y-blockSizeY.y);
delta.z = max(0.0f, fabs(delta.z)-blockSizeX.z-blockSizeY.z);
bool hasExclusions = false;
for (int k = 0; k < numExclusions; k++)
hasExclusions |= (exclusionsForX[k] == y);
if (j < NUM_BLOCKS && delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED && !hasExclusions) {
// Add this tile to the buffer.
int bufferIndex = valuesInBuffer*GROUP_SIZE+threadIdx.x;
buffer[bufferIndex] = y;
valuesInBuffer++;
// cuda-memcheck --tool racecheck will throw errors about this as
// RAW/WAW/WAR race condition errors. But this is safe in all instances
if (!bufferFull && valuesInBuffer == BUFFER_GROUPS)
bufferFull = true;
}
__syncthreads();
if (bufferFull) {
storeInteractionData(x, buffer, sum, temp, atoms, numAtoms, globalIndex, interactionCount, interactingTiles, interactingAtoms, periodicBoxSize, invPeriodicBoxSize, posq, posBuffer, blockCenterX, blockSizeX, maxTiles, false);
valuesInBuffer = 0;
if (threadIdx.x == 0)
bufferFull = false;
}
__syncthreads();
}
storeInteractionData(x, buffer, sum, temp, atoms, numAtoms, globalIndex, interactionCount, interactingTiles, interactingAtoms, periodicBoxSize, invPeriodicBoxSize, posq, posBuffer, blockCenterX, blockSizeX, maxTiles, true);
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
|
9a5d669c0ec2635ce8d26151cee05a27a8f44b19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sgm/sgm.cuh>
void sgm(DeviceImage<PIXEL_COST> &cost, DeviceImage<float> &depth)
{
int width = cost.width;
int height = cost.height;
DeviceImage<PIXEL_COST> sgm_cost(width, height);
sgm_cost.zero();
dim3 sgm_row_block;
dim3 sgm_row_grid;
sgm_row_block.x = DEPTH_NUM;
sgm_row_grid.x = height;
dim3 sgm_col_block;
dim3 sgm_col_grid;
sgm_col_block.x = DEPTH_NUM;
sgm_col_grid.x = width;
struct timeval start, end;
gettimeofday(&start,NULL);
hipLaunchKernelGGL(( sgm_cost_row_kernel), dim3(sgm_row_grid), dim3(sgm_row_block), 0, 0, true, cost.dev_ptr, sgm_cost.dev_ptr);
hipLaunchKernelGGL(( sgm_cost_row_kernel), dim3(sgm_row_grid), dim3(sgm_row_block), 0, 0, false, cost.dev_ptr, sgm_cost.dev_ptr);
hipLaunchKernelGGL(( sgm_cost_col_kernel), dim3(sgm_col_grid), dim3(sgm_col_block), 0, 0, true, cost.dev_ptr, sgm_cost.dev_ptr);
hipLaunchKernelGGL(( sgm_cost_col_kernel), dim3(sgm_col_grid), dim3(sgm_col_block), 0, 0, false, cost.dev_ptr, sgm_cost.dev_ptr);
hipDeviceSynchronize();
dim3 depth_filter_block;
dim3 depth_filter_grid;
depth_filter_block.x = DEPTH_NUM;
depth_filter_grid.x = width;
depth_filter_grid.y = height;
hipLaunchKernelGGL(( sgm_filter), dim3(depth_filter_grid), dim3(depth_filter_block), 0, 0, sgm_cost.dev_ptr, depth.dev_ptr);
hipDeviceSynchronize();
gettimeofday(&end,NULL);
float time_use = (end.tv_sec-start.tv_sec) * 1000.0 + (end.tv_usec-start.tv_usec) / 1000.0f;
printf("sgm cost: %lf ms.\n",time_use);
}
__global__ void sgm_cost_row_kernel(bool to_left, DeviceImage<PIXEL_COST> *cost_devptr, DeviceImage<PIXEL_COST> *sgm_cost_devptr)
{
const int width = cost_devptr->width;
const int height = cost_devptr->height;
const int depth_id = threadIdx.x;
int y = blockIdx.x;
int x, delta_x;
if(to_left)
{
x = 0;
delta_x = 1;
}
else
{
x = width - 1;
delta_x = -1;
}
__shared__ float last_cost[DEPTH_NUM], last_cost_min[DEPTH_NUM];;
__shared__ float this_cost[DEPTH_NUM];
last_cost_min[depth_id] = last_cost[depth_id] = 0.0;
__syncthreads();
for( ; x < width && x >= 0; x += delta_x)
{
float* my_add_ptr = (sgm_cost_devptr->atXY(x,y)).cost_ptr(depth_id);
this_cost[depth_id] = (cost_devptr->atXY(x,y)).get_cost(depth_id);
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if(depth_id < i && last_cost_min[depth_id + i] < last_cost_min[depth_id])
{
last_cost_min[depth_id] = last_cost_min[depth_id + i];
}
__syncthreads();
}
float value = min(last_cost_min[0] + sgm_P2, last_cost[depth_id]);
if(depth_id > 0)
value = min(value, last_cost[depth_id - 1] + sgm_P1);
if(depth_id < DEPTH_NUM - 1)
value = min(value, last_cost[depth_id + 1] + sgm_P1);
value = this_cost[depth_id] + value - last_cost_min[0];
atomicAdd(my_add_ptr, value);
__syncthreads();
last_cost_min[depth_id] = last_cost[depth_id] = value;
}
}
__global__ void sgm_cost_col_kernel(bool to_down, DeviceImage<PIXEL_COST> *cost_devptr, DeviceImage<PIXEL_COST> *sgm_cost_devptr)
{
const int width = cost_devptr->width;
const int height = cost_devptr->height;
const int depth_id = threadIdx.x;
int x = blockIdx.x;
int y, delta_y;
if(to_down)
{
y = 0;
delta_y = 1;
}
else
{
y = height - 1;
delta_y = -1;
}
__shared__ float last_cost[DEPTH_NUM], last_cost_min[DEPTH_NUM];;
__shared__ float this_cost[DEPTH_NUM];
last_cost_min[depth_id] = last_cost[depth_id] = 0.0;
__syncthreads();
for( ; y < height && y >= 0; y += delta_y)
{
float* my_add_ptr = (sgm_cost_devptr->atXY(x,y)).cost_ptr(depth_id);
this_cost[depth_id] = (cost_devptr->atXY(x,y)).get_cost(depth_id);
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if(depth_id < i && last_cost_min[depth_id + i] < last_cost_min[depth_id])
{
last_cost_min[depth_id] = last_cost_min[depth_id + i];
}
__syncthreads();
}
float value = min(last_cost_min[0] + sgm_P2, last_cost[depth_id]);
if(depth_id > 0)
value = min(value, last_cost[depth_id - 1] + sgm_P1);
if(depth_id < DEPTH_NUM - 1)
value = min(value, last_cost[depth_id + 1] + sgm_P1);
value = this_cost[depth_id] + value - last_cost_min[0];
atomicAdd(my_add_ptr, value);
__syncthreads();
last_cost_min[depth_id] = last_cost[depth_id] = value;
}
}
__global__ void sgm_filter(DeviceImage<PIXEL_COST> *sgm_cost_devptr, DeviceImage<float> *depth_devptr)
{
const int x = blockIdx.x;
const int y = blockIdx.y;
const int depth = threadIdx.x;
__shared__ float cost[DEPTH_NUM];
__shared__ float min_cost[DEPTH_NUM];
__shared__ float min_index[DEPTH_NUM];
min_cost[depth] = cost[depth] = (sgm_cost_devptr->atXY(x,y)).get_cost(depth);
min_index[depth] = depth;
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if( depth < i && min_cost[depth + i] < min_cost[depth] )
{
min_cost[depth] = min_cost[depth + i];
min_index[depth] = min_index[depth + i];
}
__syncthreads();
}
//sub pixel depth
if(depth == 0)
{
int min = min_index[0];
if(min == 0 || min == DEPTH_NUM - 1)
depth_devptr->atXY(x,y) = min;
else
{
float pre_cost = cost[min - 1];
float pro_cost = cost[min + 1];
float a = pre_cost - 2.0f * min_cost[0] + pro_cost;
float b = - pre_cost + pro_cost;
depth_devptr->atXY(x,y) = (float) min - b / (2.0f * a);
}
}
} | 9a5d669c0ec2635ce8d26151cee05a27a8f44b19.cu | #include <sgm/sgm.cuh>
void sgm(DeviceImage<PIXEL_COST> &cost, DeviceImage<float> &depth)
{
int width = cost.width;
int height = cost.height;
DeviceImage<PIXEL_COST> sgm_cost(width, height);
sgm_cost.zero();
dim3 sgm_row_block;
dim3 sgm_row_grid;
sgm_row_block.x = DEPTH_NUM;
sgm_row_grid.x = height;
dim3 sgm_col_block;
dim3 sgm_col_grid;
sgm_col_block.x = DEPTH_NUM;
sgm_col_grid.x = width;
struct timeval start, end;
gettimeofday(&start,NULL);
sgm_cost_row_kernel<<<sgm_row_grid, sgm_row_block>>>(true, cost.dev_ptr, sgm_cost.dev_ptr);
sgm_cost_row_kernel<<<sgm_row_grid, sgm_row_block>>>(false, cost.dev_ptr, sgm_cost.dev_ptr);
sgm_cost_col_kernel<<<sgm_col_grid, sgm_col_block>>>(true, cost.dev_ptr, sgm_cost.dev_ptr);
sgm_cost_col_kernel<<<sgm_col_grid, sgm_col_block>>>(false, cost.dev_ptr, sgm_cost.dev_ptr);
cudaDeviceSynchronize();
dim3 depth_filter_block;
dim3 depth_filter_grid;
depth_filter_block.x = DEPTH_NUM;
depth_filter_grid.x = width;
depth_filter_grid.y = height;
sgm_filter<<<depth_filter_grid, depth_filter_block>>>(sgm_cost.dev_ptr, depth.dev_ptr);
cudaDeviceSynchronize();
gettimeofday(&end,NULL);
float time_use = (end.tv_sec-start.tv_sec) * 1000.0 + (end.tv_usec-start.tv_usec) / 1000.0f;
printf("sgm cost: %lf ms.\n",time_use);
}
__global__ void sgm_cost_row_kernel(bool to_left, DeviceImage<PIXEL_COST> *cost_devptr, DeviceImage<PIXEL_COST> *sgm_cost_devptr)
{
const int width = cost_devptr->width;
const int height = cost_devptr->height;
const int depth_id = threadIdx.x;
int y = blockIdx.x;
int x, delta_x;
if(to_left)
{
x = 0;
delta_x = 1;
}
else
{
x = width - 1;
delta_x = -1;
}
__shared__ float last_cost[DEPTH_NUM], last_cost_min[DEPTH_NUM];;
__shared__ float this_cost[DEPTH_NUM];
last_cost_min[depth_id] = last_cost[depth_id] = 0.0;
__syncthreads();
for( ; x < width && x >= 0; x += delta_x)
{
float* my_add_ptr = (sgm_cost_devptr->atXY(x,y)).cost_ptr(depth_id);
this_cost[depth_id] = (cost_devptr->atXY(x,y)).get_cost(depth_id);
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if(depth_id < i && last_cost_min[depth_id + i] < last_cost_min[depth_id])
{
last_cost_min[depth_id] = last_cost_min[depth_id + i];
}
__syncthreads();
}
float value = min(last_cost_min[0] + sgm_P2, last_cost[depth_id]);
if(depth_id > 0)
value = min(value, last_cost[depth_id - 1] + sgm_P1);
if(depth_id < DEPTH_NUM - 1)
value = min(value, last_cost[depth_id + 1] + sgm_P1);
value = this_cost[depth_id] + value - last_cost_min[0];
atomicAdd(my_add_ptr, value);
__syncthreads();
last_cost_min[depth_id] = last_cost[depth_id] = value;
}
}
__global__ void sgm_cost_col_kernel(bool to_down, DeviceImage<PIXEL_COST> *cost_devptr, DeviceImage<PIXEL_COST> *sgm_cost_devptr)
{
const int width = cost_devptr->width;
const int height = cost_devptr->height;
const int depth_id = threadIdx.x;
int x = blockIdx.x;
int y, delta_y;
if(to_down)
{
y = 0;
delta_y = 1;
}
else
{
y = height - 1;
delta_y = -1;
}
__shared__ float last_cost[DEPTH_NUM], last_cost_min[DEPTH_NUM];;
__shared__ float this_cost[DEPTH_NUM];
last_cost_min[depth_id] = last_cost[depth_id] = 0.0;
__syncthreads();
for( ; y < height && y >= 0; y += delta_y)
{
float* my_add_ptr = (sgm_cost_devptr->atXY(x,y)).cost_ptr(depth_id);
this_cost[depth_id] = (cost_devptr->atXY(x,y)).get_cost(depth_id);
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if(depth_id < i && last_cost_min[depth_id + i] < last_cost_min[depth_id])
{
last_cost_min[depth_id] = last_cost_min[depth_id + i];
}
__syncthreads();
}
float value = min(last_cost_min[0] + sgm_P2, last_cost[depth_id]);
if(depth_id > 0)
value = min(value, last_cost[depth_id - 1] + sgm_P1);
if(depth_id < DEPTH_NUM - 1)
value = min(value, last_cost[depth_id + 1] + sgm_P1);
value = this_cost[depth_id] + value - last_cost_min[0];
atomicAdd(my_add_ptr, value);
__syncthreads();
last_cost_min[depth_id] = last_cost[depth_id] = value;
}
}
__global__ void sgm_filter(DeviceImage<PIXEL_COST> *sgm_cost_devptr, DeviceImage<float> *depth_devptr)
{
const int x = blockIdx.x;
const int y = blockIdx.y;
const int depth = threadIdx.x;
__shared__ float cost[DEPTH_NUM];
__shared__ float min_cost[DEPTH_NUM];
__shared__ float min_index[DEPTH_NUM];
min_cost[depth] = cost[depth] = (sgm_cost_devptr->atXY(x,y)).get_cost(depth);
min_index[depth] = depth;
__syncthreads();
for(int i = DEPTH_NUM/2; i > 0; i /= 2)
{
if( depth < i && min_cost[depth + i] < min_cost[depth] )
{
min_cost[depth] = min_cost[depth + i];
min_index[depth] = min_index[depth + i];
}
__syncthreads();
}
//sub pixel depth
if(depth == 0)
{
int min = min_index[0];
if(min == 0 || min == DEPTH_NUM - 1)
depth_devptr->atXY(x,y) = min;
else
{
float pre_cost = cost[min - 1];
float pro_cost = cost[min + 1];
float a = pre_cost - 2.0f * min_cost[0] + pro_cost;
float b = - pre_cost + pro_cost;
depth_devptr->atXY(x,y) = (float) min - b / (2.0f * a);
}
}
} |
002a36a13369d8f9d54b43b5a0334a1fd4c117ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver on 4/9/2018.
//
#include <system/Environment.h>
#include "../indexreduce.h"
#include <system/op_boilerplate.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
template <typename X, typename Z>
static __global__ void simpleIndexReduceGeneric(const int op,
void const* dx,
Nd4jLong const* xShapeInfo, int xRank,
void *extraParams,
void *result,
Nd4jLong const* zShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
functions::indexreduce::IndexReduce<X, Z>::transform(op,dx,xShapeInfo,extraParams,result,zShapeInfo,dimension,dimensionLength,postProcessOrNot,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
namespace functions {
namespace indexreduce {
template <typename X, typename Z>
_CUDA_H void IndexReduce<X,Z>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream,
const int opNum,
void const* dx, Nd4jLong const* xShapeInfo,
int xRank,
void *extraParams,
void *result, Nd4jLong const* zShapeInfo,
int zRank,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *reductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
hipLaunchKernelGGL(( simpleIndexReduceGeneric<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum,
dx, xShapeInfo, xRank,
extraParams,
result, zShapeInfo, 0,
nullptr, 0,
1,
allocationBuffer, reductionBuffer,
tadOnlyShapeInfo, tadOffsets);
}
template <typename X, typename Z>
_CUDA_H void IndexReduce<X, Z>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, void const* dx, Nd4jLong const* xShapeInfo, int xRank, void *extraParams, void *result, Nd4jLong const* zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
hipLaunchKernelGGL(( simpleIndexReduceGeneric<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
zShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename X, typename Z>
template <typename OpType>
__device__ void IndexReduce<X, Z>::aggregatePartials(IndexValue<X> *sPartials, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<X*>(vextraParams);
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<X> prev = sPartials[tid - floorPow2];
IndexValue<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<X> curr = sPartials[tid];
IndexValue<X> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename X, typename Y>
__device__ void IndexReduce<X, Y>::transform(
const int opNum,
void const* x,
Nd4jLong const* xShapeInfo,
void *extraParams,
void *result,
Nd4jLong const* zShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
void *reductionBuffer,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffset) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(x, xShapeInfo, extraParams, result, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename X, typename Z>
template <typename OpType>
__device__ void IndexReduce<X, Z>::transform(void const* vdx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void* vz, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets){
/**int
* Gpu information for the problem
*/
auto dx = reinterpret_cast<X const*>(vdx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = static_cast<X*>(vextraParams);
auto reductionBuffer = static_cast<X*>(vreductionBuffer);
auto order = shape::order(xShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile bool resultScalar;
//shared memory space for storing intermediate results
__shared__ IndexValue<X> sPartials[CUDA_BLOCK_SIZE];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jLong xLength;
__shared__ volatile Nd4jLong zLen;
//only compute the tad indexes once
IndexValue<X> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
zLen = shape::length(zShapeInfo);
else zLen = 1;
if (zLen == 1)
resultScalar = true;
else
resultScalar = false;
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if(sd::ArrayOptions::arrayType(xShapeInfo) == sd::ArrayType::EMPTY) {
if(sd::ArrayOptions::arrayType(zShapeInfo) == sd::ArrayType::EMPTY)
return;
for (uint i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x)
z[i] = (Z) reduction.index;
return;
}
if (!resultScalar) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
IndexValue<X> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = (Z) sPartials[threadIdx.x].index;
}
__syncthreads();
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<X> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = (Z) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
__syncthreads();
}
}
} else {
auto n = shape::length(xShapeInfo);
auto xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1 && order == 'c') {
for(Nd4jLong i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue<X> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
for(Nd4jLong i = tid;i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
IndexValue<X> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
auto pBuffer = reinterpret_cast<IndexValue<X> *>(reductionBuffer);
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<X> *pBuffer = (IndexValue<X> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
z[0] = (Z) sPartials[0].index;
}
}
} else {
if (tid == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = (Z) sPartials[0].index;
}
}
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT IndexReduce, , LIBND4J_TYPES, INDEXING_TYPES);
}
}
| 002a36a13369d8f9d54b43b5a0334a1fd4c117ac.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver on 4/9/2018.
//
#include <system/Environment.h>
#include "../indexreduce.h"
#include <system/op_boilerplate.h>
#include <helpers/DebugHelper.h>
#include <types/types.h>
#include "../legacy_ops.h"
using namespace simdOps;
template <typename X, typename Z>
static __global__ void simpleIndexReduceGeneric(const int op,
void const* dx,
Nd4jLong const* xShapeInfo, int xRank,
void *extraParams,
void *result,
Nd4jLong const* zShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
functions::indexreduce::IndexReduce<X, Z>::transform(op,dx,xShapeInfo,extraParams,result,zShapeInfo,dimension,dimensionLength,postProcessOrNot,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets);
}
namespace functions {
namespace indexreduce {
template <typename X, typename Z>
_CUDA_H void IndexReduce<X,Z>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream,
const int opNum,
void const* dx, Nd4jLong const* xShapeInfo,
int xRank,
void *extraParams,
void *result, Nd4jLong const* zShapeInfo,
int zRank,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *reductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
simpleIndexReduceGeneric<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(opNum,
dx, xShapeInfo, xRank,
extraParams,
result, zShapeInfo, 0,
nullptr, 0,
1,
allocationBuffer, reductionBuffer,
tadOnlyShapeInfo, tadOffsets);
}
template <typename X, typename Z>
_CUDA_H void IndexReduce<X, Z>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, void const* dx, Nd4jLong const* xShapeInfo, int xRank, void *extraParams, void *result, Nd4jLong const* zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets) {
simpleIndexReduceGeneric<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
zShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename X, typename Z>
template <typename OpType>
__device__ void IndexReduce<X, Z>::aggregatePartials(IndexValue<X> *sPartials, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto extraParams = static_cast<X*>(vextraParams);
Nd4jLong floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<X> prev = sPartials[tid - floorPow2];
IndexValue<X> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<X> curr = sPartials[tid];
IndexValue<X> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename X, typename Y>
__device__ void IndexReduce<X, Y>::transform(
const int opNum,
void const* x,
Nd4jLong const* xShapeInfo,
void *extraParams,
void *result,
Nd4jLong const* zShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
void *reductionBuffer,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffset) {
DISPATCH_BY_OPNUM_TT(transform, PARAMS(x, xShapeInfo, extraParams, result, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename X, typename Z>
template <typename OpType>
__device__ void IndexReduce<X, Z>::transform(void const* vdx, Nd4jLong const* xShapeInfo,
void *vextraParams,
void* vz, Nd4jLong const* zShapeInfo,
int *dimension, int dimensionLength,
int postProcessOrNot,
int *allocationBuffer, void *vreductionBuffer,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets){
/**int
* Gpu information for the problem
*/
auto dx = reinterpret_cast<X const*>(vdx);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = static_cast<X*>(vextraParams);
auto reductionBuffer = static_cast<X*>(vreductionBuffer);
auto order = shape::order(xShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile bool resultScalar;
//shared memory space for storing intermediate results
__shared__ IndexValue<X> sPartials[CUDA_BLOCK_SIZE];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jLong xLength;
__shared__ volatile Nd4jLong zLen;
//only compute the tad indexes once
IndexValue<X> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (zShapeInfo != nullptr)
zLen = shape::length(zShapeInfo);
else zLen = 1;
if (zLen == 1)
resultScalar = true;
else
resultScalar = false;
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if(sd::ArrayOptions::arrayType(xShapeInfo) == sd::ArrayType::EMPTY) {
if(sd::ArrayOptions::arrayType(zShapeInfo) == sd::ArrayType::EMPTY)
return;
for (uint i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x)
z[i] = (Z) reduction.index;
return;
}
if (!resultScalar) {
__shared__ Nd4jLong tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo);
IndexValue<X> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[r] = (Z) sPartials[threadIdx.x].index;
}
__syncthreads();
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jLong tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<X> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[i] = (Z) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
__syncthreads();
}
}
} else {
auto n = shape::length(xShapeInfo);
auto xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1 && order == 'c') {
for(Nd4jLong i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue<X> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
for(Nd4jLong i = tid;i < n; i += blockDim.x * gridDim.x) {
auto offset = shape::getIndexOffset(i, xShapeInfo);
IndexValue<X> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
tid = threadIdx.x;
if (threadIdx.x == 0) {
auto pBuffer = reinterpret_cast<IndexValue<X> *>(reductionBuffer);
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<X> *pBuffer = (IndexValue<X> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
z[0] = (Z) sPartials[0].index;
}
}
} else {
if (tid == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = (Z) sPartials[0].index;
}
}
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT IndexReduce, , LIBND4J_TYPES, INDEXING_TYPES);
}
}
|
184095c08c6fde88d22adf6dd094456181b2a0dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
// Num. of Channel
#define N_ch (TK_NUM * BT_NUM)
// Num. of sample
#define N_samp 8
#define N_col 64
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int threads, int size);
void FBComb(float *y, float *Vect_F, int *num_size, int index);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim);
int main(){
float **r;
float **r_dev;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
int num_thread[N_ch];
int num_size[N_ch];
float **h_Vect_F;
FILE *f;
hipSetDevice(0);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
hipStream_t *filter_stream;
int i, j;
double start_timer, end_timer;
filter_stream = (hipStream_t*)malloc(N_ch*sizeof(hipStream_t));
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipStreamCreate(&filter_stream[i]));
}
f = fopen("rand.txt", "r");
for(i = 0; i < N_ch; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < N_ch; i++)
num_size[i] = (num_thread[i]*16)*(num_thread[i]*16);
r = (float**)malloc(N_ch*sizeof(float*));
H = (float**)malloc(N_ch*sizeof(float*));
F = (float**)malloc(N_ch*sizeof(float*));
Vect_H = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn = (float**)malloc(N_ch*sizeof(float*));
Vect_Up = (float**)malloc(N_ch*sizeof(float*));
Vect_F = (float**)malloc(N_ch*sizeof(float*));
r_dev = (float**)malloc(N_ch*sizeof(float*));
H_dev = (float**)malloc(N_ch*sizeof(float*));
F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_H_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Up_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
h_Vect_F = (float**)malloc(N_ch*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipHostMalloc(&r[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&r_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&H[i], N_col*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&H_dev[i], N_col*sizeof(float)));
checkCudaErrors(hipHostMalloc(&F[i], N_col*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&F_dev[i], N_col*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_H[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Dn[i], (num_size[i]/N_samp)*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Dn_dev[i], (num_size[i]/N_samp)*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Up[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Up_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_F[i], num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_F_dev[i], num_size[i]*sizeof(float)));
h_Vect_F[i] = (float*)malloc(num_size[i] * sizeof(float));
}
printf("Filterbank inputs are generating\n");
/*init data*/
for(i = 0; i < N_ch; i++)
for(j = 0; j < num_size[i]; j++){
r[i][j] = j + 0.0001;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_col; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipMemcpyAsync(r_dev[i], r[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_Up_dev[i], Vect_Up[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_F_dev[i], Vect_F[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_H_dev[i], Vect_H[i], num_size[i]*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(H_dev[i], H[i], N_col*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(F_dev[i], F[i], N_col*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
}
checkCudaErrors(hipDeviceSynchronize());
printf("Filterbank CUDA baseline is running\n");
// task launch
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
hipLaunchKernelGGL(( FBCore), dim3(1), dim3(num_thread[i]*32), 0, filter_stream[i], r_dev[i], H_dev[i], Vect_H_dev[i],
Vect_Dn_dev[i], Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], num_thread[i]*32, num_size[i]);
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
printf("Filterbank CUDA baseline Elapsed Time: %f Sec.\n", end_timer - start_timer);
// Data transfer back to host
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipMemcpyAsync(Vect_F[i], Vect_F_dev[i], num_size[i]*sizeof(float), hipMemcpyDeviceToHost, filter_stream[i]));
}
checkCudaErrors(hipDeviceSynchronize());
/*CPU tasks*/
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
h_FBCore(r[i], H[i], Vect_H[i], Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], num_size[i]);
}
end_timer = my_timer();
//printf("CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Verify*/
printf("Verify\n");
int flag = 0;
for(i = 0; i < N_ch; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_Vect_F[i][j] - Vect_F[i][j]) > 1e-3){
printf("Error:%f, %f, %d\n", h_Vect_F[i][j], Vect_F[i][j], i);
flag = 1;
break;
}
}
}
if(!flag) printf("Verify successfully\n");
/*Free Memory*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipStreamDestroy(filter_stream[i]));
}
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipHostFree(r[i]));
checkCudaErrors(hipFree(r_dev[i]));
checkCudaErrors(hipHostFree(H[i]));
checkCudaErrors(hipFree(H_dev[i]));
checkCudaErrors(hipHostFree(F[i]));
checkCudaErrors(hipFree(F_dev[i]));
checkCudaErrors(hipHostFree(Vect_H[i]));
checkCudaErrors(hipFree(Vect_H_dev[i]));
checkCudaErrors(hipHostFree(Vect_Dn[i]));
checkCudaErrors(hipFree(Vect_Dn_dev[i]));
checkCudaErrors(hipHostFree(Vect_Up[i]));
checkCudaErrors(hipFree(Vect_Up_dev[i]));
checkCudaErrors(hipHostFree(Vect_F[i]));
checkCudaErrors(hipFree(Vect_F_dev[i]));
free(h_Vect_F[i]);
}
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
free(h_Vect_F);
return 0;
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim){
int j, k, p;
//convolving H
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int threads, int size){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < threads){
for (j=0; j< (size/threads); j++){
for(k = 0; k < N_col; k++){
if(((j*threads+tid)-k)>=0){
Vect_H[j*threads+tid] += (r[(j*threads+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < threads)
for (j=0; j < size/N_samp/threads; j++)
Vect_Dn[(j*threads+tid)]=Vect_H[(j*threads+tid)*N_samp];
//Up Sampling
if(tid < threads)
for (j=0; j < size/N_samp/threads;j++)
Vect_Up[(j*threads+tid)*N_samp]=Vect_Dn[(j*threads+tid)];
__syncthreads();
//convolving F
if(tid < threads){
for (j=0; j< (size/threads); j++){
for(k = 0; k < N_col; k++){
if(((j*threads+tid)-k)>=0){
Vect_F[j*threads+tid]+=(F[k]*Vect_Up[(j*threads+tid)-k]);
}
}
}
}
}
| 184095c08c6fde88d22adf6dd094456181b2a0dd.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
// Num. of Channel
#define N_ch (TK_NUM * BT_NUM)
// Num. of sample
#define N_samp 8
#define N_col 64
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int threads, int size);
void FBComb(float *y, float *Vect_F, int *num_size, int index);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim);
int main(){
float **r;
float **r_dev;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
int num_thread[N_ch];
int num_size[N_ch];
float **h_Vect_F;
FILE *f;
cudaSetDevice(0);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
cudaStream_t *filter_stream;
int i, j;
double start_timer, end_timer;
filter_stream = (cudaStream_t*)malloc(N_ch*sizeof(cudaStream_t));
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaStreamCreate(&filter_stream[i]));
}
f = fopen("rand.txt", "r");
for(i = 0; i < N_ch; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < N_ch; i++)
num_size[i] = (num_thread[i]*16)*(num_thread[i]*16);
r = (float**)malloc(N_ch*sizeof(float*));
H = (float**)malloc(N_ch*sizeof(float*));
F = (float**)malloc(N_ch*sizeof(float*));
Vect_H = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn = (float**)malloc(N_ch*sizeof(float*));
Vect_Up = (float**)malloc(N_ch*sizeof(float*));
Vect_F = (float**)malloc(N_ch*sizeof(float*));
r_dev = (float**)malloc(N_ch*sizeof(float*));
H_dev = (float**)malloc(N_ch*sizeof(float*));
F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_H_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Up_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
h_Vect_F = (float**)malloc(N_ch*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaHostAlloc(&r[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&r_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&H[i], N_col*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&H_dev[i], N_col*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&F[i], N_col*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&F_dev[i], N_col*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_H[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_H_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Dn[i], (num_size[i]/N_samp)*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Dn_dev[i], (num_size[i]/N_samp)*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Up[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Up_dev[i], num_size[i]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_F[i], num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_F_dev[i], num_size[i]*sizeof(float)));
h_Vect_F[i] = (float*)malloc(num_size[i] * sizeof(float));
}
printf("Filterbank inputs are generating\n");
/*init data*/
for(i = 0; i < N_ch; i++)
for(j = 0; j < num_size[i]; j++){
r[i][j] = j + 0.0001;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_col; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaMemcpyAsync(r_dev[i], r[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_Up_dev[i], Vect_Up[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_F_dev[i], Vect_F[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_H_dev[i], Vect_H[i], num_size[i]*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(H_dev[i], H[i], N_col*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(F_dev[i], F[i], N_col*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
}
checkCudaErrors(cudaDeviceSynchronize());
printf("Filterbank CUDA baseline is running\n");
// task launch
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
FBCore<<<1, num_thread[i]*32, 0, filter_stream[i]>>>(r_dev[i], H_dev[i], Vect_H_dev[i],
Vect_Dn_dev[i], Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], num_thread[i]*32, num_size[i]);
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
printf("Filterbank CUDA baseline Elapsed Time: %f Sec.\n", end_timer - start_timer);
// Data transfer back to host
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaMemcpyAsync(Vect_F[i], Vect_F_dev[i], num_size[i]*sizeof(float), cudaMemcpyDeviceToHost, filter_stream[i]));
}
checkCudaErrors(cudaDeviceSynchronize());
/*CPU tasks*/
printf("CPU program running\n");
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
h_FBCore(r[i], H[i], Vect_H[i], Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], num_size[i]);
}
end_timer = my_timer();
//printf("CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Verify*/
printf("Verify\n");
int flag = 0;
for(i = 0; i < N_ch; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_Vect_F[i][j] - Vect_F[i][j]) > 1e-3){
printf("Error:%f, %f, %d\n", h_Vect_F[i][j], Vect_F[i][j], i);
flag = 1;
break;
}
}
}
if(!flag) printf("Verify successfully\n");
/*Free Memory*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaStreamDestroy(filter_stream[i]));
}
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaFreeHost(r[i]));
checkCudaErrors(cudaFree(r_dev[i]));
checkCudaErrors(cudaFreeHost(H[i]));
checkCudaErrors(cudaFree(H_dev[i]));
checkCudaErrors(cudaFreeHost(F[i]));
checkCudaErrors(cudaFree(F_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_H[i]));
checkCudaErrors(cudaFree(Vect_H_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Dn[i]));
checkCudaErrors(cudaFree(Vect_Dn_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Up[i]));
checkCudaErrors(cudaFree(Vect_Up_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_F[i]));
checkCudaErrors(cudaFree(Vect_F_dev[i]));
free(h_Vect_F[i]);
}
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
free(h_Vect_F);
return 0;
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim){
int j, k, p;
//convolving H
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn,
float *Vect_Up, float *Vect_F, float *F, int threads, int size){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < threads){
for (j=0; j< (size/threads); j++){
for(k = 0; k < N_col; k++){
if(((j*threads+tid)-k)>=0){
Vect_H[j*threads+tid] += (r[(j*threads+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < threads)
for (j=0; j < size/N_samp/threads; j++)
Vect_Dn[(j*threads+tid)]=Vect_H[(j*threads+tid)*N_samp];
//Up Sampling
if(tid < threads)
for (j=0; j < size/N_samp/threads;j++)
Vect_Up[(j*threads+tid)*N_samp]=Vect_Dn[(j*threads+tid)];
__syncthreads();
//convolving F
if(tid < threads){
for (j=0; j< (size/threads); j++){
for(k = 0; k < N_col; k++){
if(((j*threads+tid)-k)>=0){
Vect_F[j*threads+tid]+=(F[k]*Vect_Up[(j*threads+tid)-k]);
}
}
}
}
}
|
5ae897c4dbfce85fcd67d08012d58db596d43e55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "hipcub/hipcub.hpp"
#include <limits>
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
auto input_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto input_key = input_key_buffer.get();
auto output_key = output_key_buffer.get();
auto input_value = input_value_buffer.get();
auto output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto temp_storage = temp_storage_buffer.get();
auto blocksPerGridD = (int)(ceil(static_cast<float>(dimension) / GridDim::maxThreadsPerBlock));
auto blocksPerGridK = (int)(ceil(static_cast<float>(K) / GridDim::maxThreadsPerBlock));
for (int64_t i = 0; i < N; i++) {
hipLaunchKernelGGL(( FillInput<T>), dim3(blocksPerGridD), dim3(GridDim::maxThreadsPerBlock), 0, 0, input_x, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? hipcub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension) : hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension));
if (1 == sorted) {
hipLaunchKernelGGL(( FillOutput<T>), dim3(blocksPerGridK), dim3(GridDim::maxThreadsPerBlock), 0, 0, output_key, output_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
hipLaunchKernelGGL(( ExcludeOutput), dim3(blocksPerGridD), dim3(GridDim::maxThreadsPerBlock), 0, 0, output_value, K, dimension);
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension));
hipLaunchKernelGGL(( FillOutput<T>), dim3(blocksPerGridK), dim3(GridDim::maxThreadsPerBlock), 0, 0, input_key, input_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const int64_t* elem_nums, \
size_t size, \
int64_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime | 5ae897c4dbfce85fcd67d08012d58db596d43e55.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "cub/cub.cuh"
#include <limits>
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
auto input_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto input_key = input_key_buffer.get();
auto output_key = output_key_buffer.get();
auto input_value = input_value_buffer.get();
auto output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto temp_storage = temp_storage_buffer.get();
auto blocksPerGridD = (int)(ceil(static_cast<float>(dimension) / GridDim::maxThreadsPerBlock));
auto blocksPerGridK = (int)(ceil(static_cast<float>(K) / GridDim::maxThreadsPerBlock));
for (int64_t i = 0; i < N; i++) {
FillInput<T><<<blocksPerGridD, GridDim::maxThreadsPerBlock, 0>>>(input_x, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? cub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension) : cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension));
if (1 == sorted) {
FillOutput<T><<<blocksPerGridK, GridDim::maxThreadsPerBlock, 0>>>(output_key, output_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
ExcludeOutput<<<blocksPerGridD, GridDim::maxThreadsPerBlock, 0>>>(output_value, K, dimension);
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension));
FillOutput<T><<<blocksPerGridK, GridDim::maxThreadsPerBlock, 0>>>(input_key, input_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const int64_t* elem_nums, \
size_t size, \
int64_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime |
ac35f6da23858f450e9d2001090312ee16ed9ee9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "activations.h"
#include "hip/hip_runtime.h"
}
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));}
__device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a);
check_error(hipPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a, delta);
check_error(hipPeekAtLastError());
}
| ac35f6da23858f450e9d2001090312ee16ed9ee9.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "activations.h"
#include "cuda.h"
}
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));}
__device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a);
check_error(cudaPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a, delta);
check_error(cudaPeekAtLastError());
}
|
1ac17669188b1b58f5d615f3b256075d0631549d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 256
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int n, int d, int d1, int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > (n/d1)) {
return;
}
int k = d1 * index;
idata[k + d1 - 1] += idata[k + d - 1];
}
__global__ void kernDownSweep(int n, int d, int d1, int*idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > (n / d1)) {
return;
}
int k = d1*index;
int t = idata[k + d - 1];
idata[k + d - 1] = idata[k + d1 - 1];
idata[k + d1 - 1] += t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int *dev_iData;
// smallest power of 2 >= n
int pow2 = pow(2, ilog2ceil(n));
int levels = ilog2ceil(n);
hipMalloc((void**)&dev_iData, (pow2 + 1) * sizeof(int));
checkCUDAError("hipMalloc dev_iData failed");
hipMemcpy(dev_iData, idata, sizeof(int)*n, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_iData failed");
timer().startGpuTimer();
for (int i = 0; i < levels; i++) {
int d = pow(2, i);
int d1 = pow(2, i + 1);
int blocknum = ceil(pow2/ d1);
dim3 fullBlocks((blocknum + blockSize - 1) / blockSize);
kernUpSweep << <fullBlocks, blockSize>> > (n, d, d1, dev_iData);
hipDeviceSynchronize();
}
int a = 0;
hipMemcpy(&dev_iData[pow2 - 1], &a, sizeof(int), hipMemcpyHostToDevice);
for (int i = levels - 1; i >= 0; i--) {
int d = pow(2, i);
int d1 = pow(2, i + 1);
int blocknum = ceil(pow2 / d1);
dim3 fullBlocks((blocknum + blockSize - 1) / blockSize);
kernDownSweep << <fullBlocks, blockSize>> > (n, d, d1, dev_iData);
hipDeviceSynchronize();
}
timer().endGpuTimer();
hipMemcpy(odata, dev_iData, sizeof(int)*(n), hipMemcpyDeviceToHost);
hipFree(dev_iData);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// TODO
int *dev_bools;
int *dev_indices;
int *dev_odata;
int *dev_idata;
hipMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAError("hipMalloc dev_bools failed");
hipMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAError("hipMalloc dev_indices failed");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed");
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed");
hipMemcpy(dev_idata, idata, sizeof(int)*n, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_idata failed");
timer().startGpuTimer();
dim3 otherName((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <otherName, blockSize >> > (n, dev_bools, dev_idata);
int *indices = new int[n];
int *bools = new int[n];
hipMemcpy(bools, dev_bools, sizeof(int)*n, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_bools failed");
timer().endGpuTimer();
scan(n, indices, bools);
timer().startGpuTimer();
hipMemcpy(dev_indices, indices, sizeof(int)*n, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_indices failed");
StreamCompaction::Common::kernScatter << <otherName, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices);
timer().endGpuTimer();
int count;
hipMemcpy(&count, &dev_indices[n-1], sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_indices failed");
int lastBool;
hipMemcpy(&lastBool, &dev_bools[n - 1], sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_bools failed");
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_bools failed");
hipFree(dev_bools);
hipFree(dev_indices);
hipFree(dev_idata);
hipFree(dev_odata);
delete[] bools;
delete[] indices;
return count + lastBool;
}
}
}
| 1ac17669188b1b58f5d615f3b256075d0631549d.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 256
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int n, int d, int d1, int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > (n/d1)) {
return;
}
int k = d1 * index;
idata[k + d1 - 1] += idata[k + d - 1];
}
__global__ void kernDownSweep(int n, int d, int d1, int*idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > (n / d1)) {
return;
}
int k = d1*index;
int t = idata[k + d - 1];
idata[k + d - 1] = idata[k + d1 - 1];
idata[k + d1 - 1] += t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int *dev_iData;
// smallest power of 2 >= n
int pow2 = pow(2, ilog2ceil(n));
int levels = ilog2ceil(n);
cudaMalloc((void**)&dev_iData, (pow2 + 1) * sizeof(int));
checkCUDAError("cudaMalloc dev_iData failed");
cudaMemcpy(dev_iData, idata, sizeof(int)*n, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_iData failed");
timer().startGpuTimer();
for (int i = 0; i < levels; i++) {
int d = pow(2, i);
int d1 = pow(2, i + 1);
int blocknum = ceil(pow2/ d1);
dim3 fullBlocks((blocknum + blockSize - 1) / blockSize);
kernUpSweep << <fullBlocks, blockSize>> > (n, d, d1, dev_iData);
cudaThreadSynchronize();
}
int a = 0;
cudaMemcpy(&dev_iData[pow2 - 1], &a, sizeof(int), cudaMemcpyHostToDevice);
for (int i = levels - 1; i >= 0; i--) {
int d = pow(2, i);
int d1 = pow(2, i + 1);
int blocknum = ceil(pow2 / d1);
dim3 fullBlocks((blocknum + blockSize - 1) / blockSize);
kernDownSweep << <fullBlocks, blockSize>> > (n, d, d1, dev_iData);
cudaThreadSynchronize();
}
timer().endGpuTimer();
cudaMemcpy(odata, dev_iData, sizeof(int)*(n), cudaMemcpyDeviceToHost);
cudaFree(dev_iData);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// TODO
int *dev_bools;
int *dev_indices;
int *dev_odata;
int *dev_idata;
cudaMalloc((void**)&dev_bools, n * sizeof(int));
checkCUDAError("cudaMalloc dev_bools failed");
cudaMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAError("cudaMalloc dev_indices failed");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed");
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed");
cudaMemcpy(dev_idata, idata, sizeof(int)*n, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_idata failed");
timer().startGpuTimer();
dim3 otherName((n + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <otherName, blockSize >> > (n, dev_bools, dev_idata);
int *indices = new int[n];
int *bools = new int[n];
cudaMemcpy(bools, dev_bools, sizeof(int)*n, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_bools failed");
timer().endGpuTimer();
scan(n, indices, bools);
timer().startGpuTimer();
cudaMemcpy(dev_indices, indices, sizeof(int)*n, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_indices failed");
StreamCompaction::Common::kernScatter << <otherName, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices);
timer().endGpuTimer();
int count;
cudaMemcpy(&count, &dev_indices[n-1], sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_indices failed");
int lastBool;
cudaMemcpy(&lastBool, &dev_bools[n - 1], sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_bools failed");
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_bools failed");
cudaFree(dev_bools);
cudaFree(dev_indices);
cudaFree(dev_idata);
cudaFree(dev_odata);
delete[] bools;
delete[] indices;
return count + lastBool;
}
}
}
|
467f888ee7d40a25f92c3fde60648e6fcf6b7c01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
ZLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau COMPLEX*16 array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv COMPLEX*16 array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF COMPLEX*16 array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@param
dlsticcs TODO: undocumented
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laqps
*******************************************************************************/
extern "C" magma_int_t
magma_zlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDoubleComplex_ptr dauxv,
magmaDoubleComplex_ptr dF, magma_int_t lddf,
magmaDouble_ptr dlsticcs,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
/* Constants */
const magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.);
const magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.);
const magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.);
const magma_int_t ione = 1;
/* Local variables */
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaDoubleComplex tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDoubleComplex_ptr dAkk = dauxv;
dauxv += nb;
double lsticc;
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione, queue );
if (pvt != k) {
magmablas_zswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf, queue );
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset, queue );
magmablas_zswap( m, dA(0,pvt), ione, dA(0, k), ione, queue );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_zgemv_conj( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione, queue );
}
/* Generate elementary reflector H(k). */
magma_zlarfg_gpu( m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k], queue );
magma_zsetvector( 1, &c_one, 1, dA(rk, k), 1, queue );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) {
magma_zgetvector( 1, &dtau[k], 1, &tauk, 1, queue );
}
if (k < n-1) {
magma_zgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1, queue );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_Z_NEGATE( tauk );
magma_zgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione, queue ); */
hipLaunchKernelGGL(( magma_zgemv_kernel3)
, dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_zgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione, queue );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_zgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda, queue );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dznrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, dlsticcs, queue );
magma_dgetvector( 1, &dlsticcs[0], 1, &lsticc, 1, queue );
}
//*dA(rk, k) = Akk;
//magma_zsetvector( 1, &Akk, 1, dA(rk, k), 1, queue );
//magmablas_zlacpy( MagmaFull, 1, 1, dAkk, 1, dA(rk, k), 1, queue );
++k;
}
// restore the diagonals
magma_zcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1, queue );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda, queue );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dznrm2_check( m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], dlsticcs, queue );
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n, queue );
}
return MAGMA_SUCCESS;
} /* magma_zlaqps2_q */
| 467f888ee7d40a25f92c3fde60648e6fcf6b7c01.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/***************************************************************************//**
Purpose
-------
ZLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
nb INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau COMPLEX*16 array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv COMPLEX*16 array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF COMPLEX*16 array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@param
dlsticcs TODO: undocumented
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laqps
*******************************************************************************/
extern "C" magma_int_t
magma_zlaqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dvn1, magmaDouble_ptr dvn2,
magmaDoubleComplex_ptr dauxv,
magmaDoubleComplex_ptr dF, magma_int_t lddf,
magmaDouble_ptr dlsticcs,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
/* Constants */
const magmaDoubleComplex c_zero = MAGMA_Z_MAKE( 0.,0.);
const magmaDoubleComplex c_one = MAGMA_Z_MAKE( 1.,0.);
const magmaDoubleComplex c_neg_one = MAGMA_Z_MAKE(-1.,0.);
const magma_int_t ione = 1;
/* Local variables */
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaDoubleComplex tauk;
magma_int_t pvt, itemp;
double tol3z;
magmaDoubleComplex_ptr dAkk = dauxv;
dauxv += nb;
double lsticc;
tol3z = magma_dsqrt( lapackf77_dlamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_idamax( n-k, &dvn1[k], ione, queue );
if (pvt != k) {
magmablas_zswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf, queue );
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset, queue );
magmablas_zswap( m, dA(0,pvt), ione, dA(0, k), ione, queue );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_zgemv_conj( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione, queue );
}
/* Generate elementary reflector H(k). */
magma_zlarfg_gpu( m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k], queue );
magma_zsetvector( 1, &c_one, 1, dA(rk, k), 1, queue );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) {
magma_zgetvector( 1, &dtau[k], 1, &tauk, 1, queue );
}
if (k < n-1) {
magma_zgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1, queue );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_Z_NEGATE( tauk );
magma_zgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione, queue ); */
magma_zgemv_kernel3
<<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m-rk, dA(rk, 0), ldda, dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_zgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione, queue );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_zgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda, queue );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1) {
magmablas_dznrm2_row_check_adjust( n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, dlsticcs, queue );
magma_dgetvector( 1, &dlsticcs[0], 1, &lsticc, 1, queue );
}
//*dA(rk, k) = Akk;
//magma_zsetvector( 1, &Akk, 1, dA(rk, k), 1, queue );
//magmablas_zlacpy( MagmaFull, 1, 1, dAkk, 1, dA(rk, k), 1, queue );
++k;
}
// restore the diagonals
magma_zcopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1, queue );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_zgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda, queue );
}
/* Recomputation of difficult columns. */
if ( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_dznrm2_check( m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], dlsticcs, queue );
magma_dcopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n, queue );
}
return MAGMA_SUCCESS;
} /* magma_zlaqps2_q */
|
0970d474d098f8ea8cfcd7952cf07da1749d89e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/hip/SparseBlasLegacy.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <bitset>
#include <hipsparse.h>
#include <hip/hip_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar), dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>)
, dim3(grid), dim3(block), 0, stream,
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(hipGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel), dim3(grid), dim3(block), 0, stream,
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>)
, dim3(1), dim3(1), 0, stream,
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
hipLaunchKernelGGL(( search_end_matrix_indices_cuda_kernel), dim3(grid_size), dim3(block_size), 0, stream,
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
}
hipDataType getTensorCudaDataType(Tensor self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since hipsparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(hipMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
hipMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
hipsparseSpMMAlg_t mm_alg = deterministic ? HIPSPARSE_COOMM_ALG2 : HIPSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
hipsparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
hipsparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
hipsparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
HIPSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
cusparse_handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
| 0970d474d098f8ea8cfcd7952cf07da1749d89e3.cu | #include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/cuda/SparseBlasLegacy.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.h>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/ExpandUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <bitset>
#include <cusparse.h>
#include <cuda_runtime_api.h>
#include <memory>
#define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor)
#define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor)
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
// --------------------------------------------------------------------
// Utility functions
// --------------------------------------------------------------------
namespace {
Tensor _to_csr_int(const Tensor& rowIndices, int64_t dim, int64_t nnz) {
Tensor csr = at::empty({dim+1}, CUDA(kInt));
Tensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt));
rowIndicesInt.copy_(rowIndices);
sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>());
return csr;
}
}
// NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not
// wired at all)
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense) {
Tensor rowIndices = indices.select(0, 0);
Tensor colIndices = indices.select(0, 1);
Tensor crow_indices = _to_csr_int(rowIndices, m, nnz);
Tensor col_indices = at::empty({colIndices.size(0)}, indices.options().dtype(kInt));
col_indices.copy_(colIndices);
s_addmm_out_csr_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, crow_indices, col_indices, values, dense);
}
// --------------------------------------------------------------------
// addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, const Scalar& beta, const Scalar& alpha) {
TORCH_CHECK(t.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(sparse_.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat1' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "Expected all tensors to be on the same device. addmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense}));
TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor");
TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims");
// no need to check dense_dim because dense_dim + sparse_dim = dim
// mxk * kxn = mxn
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(t.size(0) == m,
"addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0));
TORCH_CHECK(t.size(1) == n,
"addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1));
TORCH_CHECK(dense.size(0) == k,
"addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0));
r_.resize_({m, n});
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = sparse._indices();
Tensor values = sparse._values();
if (nnz == 0) {
at::mul_out(r_, t, at::scalar_tensor(beta, r_.options()));
return r_;
}
s_addmm_out_sparse_dense_cuda_worker(nnz, m, n, k, r_, beta, t, alpha, indices, values, dense);
return r_;
}
Tensor& addmm_out_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& result
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_out_sparse_dense_cuda(result, *b_self, mat1, mat2, beta, alpha);
}
Tensor s_addmm_sparse_dense_cuda(
const Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
Tensor r = at::empty({0}, t.options());
s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha);
return r;
}
Tensor addmm_sparse_dense_cuda(
const Tensor& self,
const SparseTensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha
) {
c10::MaybeOwned<Tensor> b_self = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_addmm_sparse_dense_cuda(*b_self, mat1, mat2, beta, alpha);
}
Tensor& s_addmm_sparse_dense_cuda_(
Tensor& t,
const SparseTensor& sparse,
const Tensor& dense,
const Scalar& beta,
const Scalar& alpha
) {
return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha);
}
// NB: Purposely no broadcasting version of addmm inplace
// Deleted sspaddmm (sparse, dense) -> sparse
// --------------------------------------------------------------------
// hspmm(SparseTensor mat1, Tensor mat2)
// --------------------------------------------------------------------
SparseTensor& hspmm_out_sparse_cuda(
const SparseTensor& sparse_,
const Tensor& dense,
SparseTensor& r_
/* , const Scalar& alpha */) {
TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, sparse_, dense}));
TORCH_CHECK(sparse_.sparse_dim() == 2,
"hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor");
TORCH_CHECK(sparse_.dense_dim() == 0,
"hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values");
TORCH_CHECK(dense.dim() == 2,
"hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor");
int64_t m = sparse_.size(0);
int64_t k = sparse_.size(1);
int64_t n = dense.size(1);
TORCH_CHECK(dense.size(0) == k,
"hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0));
get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
SparseTensor sparse = sparse_.coalesce();
int64_t nnz = sparse._nnz();
Tensor indices = at::empty({1, nnz}, CUDA(kLong));
// create values in column-major format to avoid copying in spaddmm
Tensor values = at::empty({n, nnz}, dense.options());
values.transpose_(0, 1);
// why does sparse need to be cloned? If this is really necessary maybe we
// need to fuse this with newCoalesce
SparseTensor newSparse = sparse.clone();
Tensor spIndices = newSparse._indices();
Tensor dstIndices = spIndices.select(0, 0);
// Save destination indices to output hybrid tensor
indices.copy_(dstIndices);
// Replace destination indices with 0, 1, 2, 3, ... and compute output values
// tensor with sparse * dense multiplication
thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>());
thrust::sequence(policy, indicesIter, indicesIter + nnz);
std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec();
new_size[0] = nnz;
get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size);
s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1);
get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values);
return r_;
}
SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) {
SparseTensor r = at::empty({0}, sparse.options());
hspmm_out_sparse_cuda(sparse, dense, r);
return r;
}
// --------------------------------------------------------------------
// add(Tensor, SparseTensor, Scalar)
// formerly known as spcadd
// --------------------------------------------------------------------
template <typename T>
struct TensorCAddOp {
TensorCAddOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* out, T* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 + val * *in2;
}
T val;
};
Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, const at::Scalar& value) {
TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor");
TORCH_CHECK(cuda::check_device({sparse, r_, dense}));
TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ",
dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)");
const int64_t nnz = sparse._nnz();
if (nnz == 0) {
r_.resize_as_(dense);
r_.copy_(dense);
return r_;
}
auto commonDtype = at::result_type(dense, sparse);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor r = r_;
if (r_.scalar_type() != commonDtype) {
r = at::empty_like(dense, r_.options().dtype(commonDtype));
}
Tensor dense_buffer = dense.to(commonDtype);
Tensor values = sparse._values().to(commonDtype);
if (is_same_tensor(r, dense_buffer)) {
TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )");
} else {
r.resize_as_(dense);
r.copy_(dense_buffer);
}
Tensor indices = sparse._indices();
int64_t nDim = dense.dim();
int64_t nDimI = sparse.sparse_dim();
if (values.numel() == 0) {
return r_;
}
if (sparse.is_coalesced()) {
// TODO benchmark to decide whether to remove this special case
const dim3 block = cuda::getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
if (sparse.dense_dim() == 0) {
TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernelScalar<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions");
// sparseElementwiseKernel needs values to be contiguous too
values = values.contiguous();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] {
apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>
<<<grid, block, 0, stream>>>(
TensorCAddOp<scalar_t>(value.to<scalar_t>()),
V_INFO(r), I_INFO(indices), V_INFO(values),
static_cast<uint64_t>(nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
Tensor indices1D = flatten_indices(indices, sparse.sizes(), 0);
int64_t view_rows = 1;
int64_t view_columns = 1;
for (int i = 0; i < nDimI; i++) {
view_rows *= r.size(i);
}
for (int i = nDimI; i < nDim; i++) {
view_columns *= r.size(i);
}
Tensor r_view = r.view({view_rows, view_columns});
values = values.reshape({nnz, view_columns});
r_view.index_add_(0, indices1D, values, value);
}
THCudaCheck(cudaGetLastError());
r_.copy_(r);
return r_;
}
// --------------------------------------------------------------------
// add(SparseTensor, SparseTensor, Scalar) [broadcasts]
// --------------------------------------------------------------------
Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, const Scalar& value);
SparseTensor& add_out_sparse_cuda(const SparseTensor& t, const SparseTensor& src, const Scalar& value, SparseTensor& r_) {
if (!t.is_sparse()) {
return add_out_dense_sparse_cuda(r_, t, src, value);
}
// TODO: This test seems a bit goofy
TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead.");
TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t, src}));
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes());
if (src._nnz() == 0) {
return copy_sparse_to_sparse_(r_, t);
}
if (t._nnz() == 0) {
return mul_out_sparse_scalar(r_, src, value);
}
TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions");
// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
// This trade-off is preferable for the common use-case of gradient accumulation.
Tensor t_indices_ = t._indices();
Tensor s_indices_ = src._indices();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_values_ = src._values().to(commonDtype);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] {
if (value.to<scalar_t>() != scalar_t(1)) {
s_values_ = s_values_.mul(value);
}
});
Tensor r_indices_ = at::cat({t_indices_, s_indices_}, 1);
Tensor r_values_ = at::cat({t_values_, s_values_}, 0);
if (r_.scalar_type() != commonDtype) {
SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype));
promoted.resize_as_(src);
alias_into_sparse(promoted, r_indices_, r_values_);
// performs the addition under the common dtype.
promoted = promoted.coalesce();
r_values_ = promoted._values().to(r_.scalar_type());
r_indices_ = promoted._indices();
} else {
r_.resize_as_(src);
}
alias_into_sparse(r_, r_indices_, r_values_);
// Prevent unbounded growth of nnz
// TODO: Improved heuristic on when to coalesce or remove need to coalesce
if (r_._nnz() > r_.numel()) {
auto c = r_.coalesce();
alias_into_sparse(r_, c._indices(), c._values());
}
return r_;
}
// --------------------------------------------------------------------
// mul(SparseTensor, SparseTensor) [broadcasts]
// --------------------------------------------------------------------
template <typename T>
struct TensorMulOp {
__device__ __forceinline__ void operator()(T* out, T* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(T* out, T* in1, T* in2) {
*out = *in1 * *in2;
}
};
SparseTensor& mul_out_sparse_cuda(const SparseTensor& t_, const SparseTensor& src_, SparseTensor& r_) {
if (src_.dim() == 0) {
return mul_out_sparse_zerodim(r_, t_, src_);
} else if (t_.dim() == 0) {
return mul_out_sparse_zerodim(r_, src_, t_);
}
TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU");
TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU");
TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU");
TORCH_CHECK(cuda::check_device({r_, t_, src_}));
TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes());
SparseTensor t = t_.coalesce();
SparseTensor src = src_.coalesce();
if (src_._nnz() == 0 || t_._nnz() == 0) {
r_.resize_as_(src_);
return r_.zero_();
}
// saving those because they can be overwritten when doing in-place operations
int64_t t_nnz = t._nnz(), s_nnz = src._nnz();
int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped
int64_t sparse_dim = src.sparse_dim();
auto commonDtype = at::result_type(t, src);
TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type());
Tensor t_indices_ = t._indices().contiguous();
Tensor t_values_ = t._values().to(commonDtype);
Tensor s_indices_ = src._indices().contiguous();
Tensor s_values_ = src._values().to(commonDtype);
Tensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options());
r_.resize_as_(src);
Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_();
int64_t valueSize = t_values_.stride(0);
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions");
Tensor resultNnz = at::empty({1}, CUDA(kLong));
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] {
apply::valueSparseIntersectionKernel<<<grid, block, 0, stream>>>(
TensorMulOp<scalar_t>(),
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_),
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
apply::indexSparseIntersectionKernel<uint64_t, scalar_t>
<<<1, 1, 0, stream>>>(
I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_),
// reinterpret_cast shenanigans, because we don't actually have
// unsigned tensors...
static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr()));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
r_values_ = r_values_.to(r_.scalar_type());
get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_);
// sync! (surely there is a more idiomatic way to do this...)
Tensor cpu_resultNnz = at::empty({1}, CPU(kLong));
cpu_resultNnz.copy_(resultNnz);
get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]);
return r_._coalesced_(true);
}
// --------------------------------------------------------------------
// sparse.sum() backward
//
// see NOTE [ sparse.sum() backward ]
// --------------------------------------------------------------------
template <typename scalar_t>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void _sparse_sum_backward_cuda_kernel(
int64_t total_threads,
const TensorInfo<int64_t, int64_t> grad_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_ti,
const TensorInfo<int64_t, int64_t> input_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> grad_values_expand_ti,
TensorInfo<scalar_t, int64_t> grad_input_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = input_indices_pos_ti.data[i];
bool has_match = false;
if (grad_indices_ti.data[j] == input_indices_ti.data[i]) {
has_match = true;
}
int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0];
int64_t out_start = i * grad_input_values_stride0;
int64_t out_end = (i + 1) * grad_input_values_stride0;
int64_t in_start = j * grad_values_expand_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i];
}
}
else {
for (int64_t out_i = out_start; out_i < out_end; out_i++) {
grad_input_values_ti.data[out_i] = scalar_t(0);
}
}
}
Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) {
TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor");
TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor");
auto input = input_.coalesce();
const int64_t input_dim = input.dim();
auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim);
auto dims_to_sum_v = dims_to_sum.vec();
maybe_wrap_dims(dims_to_sum_v, input_dim);
Tensor input_indices = input._indices();
Tensor input_values = input._values();
IntArrayRef input_sizes = input.sizes();
const int64_t input_sparse_dim = input.sparse_dim();
const int64_t input_dense_dim = input.dense_dim();
const int64_t input_nnz = input._nnz();
int64_t sparse_dims_to_sum_size = 0;
auto sparse_dims_to_keep_v = std::vector<int64_t>();
auto dense_dims_to_sum_v = std::vector<int64_t>();
for (int64_t d = 0; d < input_dim; d++) {
if (dims_to_sum_b[d]) {
if (d < input_sparse_dim) sparse_dims_to_sum_size ++;
else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim);
}
else {
if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d);
}
}
const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size);
const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0);
const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0);
if (sum_all_sparse_dim) {
TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed");
auto grad_input_values = grad_;
auto expand_size = input_values.sizes().vec();
if (sum_dense_dim) {
auto dense_expand_size = std::vector<int64_t>(expand_size);
dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim
for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim
grad_input_values = grad_input_values.expand(dense_expand_size);
}
grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous);
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype
}
else {
TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense");
auto grad = grad_.coalesce();
Tensor grad_indices = grad._indices();
Tensor grad_values = grad._values();
const int64_t grad_sparse_dim = grad.sparse_dim();
const int64_t grad_nnz = grad._nnz();
Tensor grad_values_expand = grad_values;
if (sum_dense_dim) {
auto expand_size = input_values.sizes().vec();
if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz
for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d);
grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous);
}
Tensor grad_input_values;
if (!sum_sparse_dim) {
grad_input_values = grad_values_expand;
}
else {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
typedef thrust::device_ptr<int64_t> thrust_ptr;
grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
AT_ASSERT(grad_input_values.is_cuda());
// get 1D indices
auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim);
std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0);
auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted
auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v);
thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>());
thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>());
// store lower_bound of input indices at grad indices
Tensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>());
thrust::lower_bound(policy,
grad_indices_iter, grad_indices_iter + grad_nnz,
input_indices_iter, input_indices_iter + input_nnz,
input_indices_pos_iter);
// config to run cuda kernel
int64_t total_threads = input_nnz;
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads));
dim3 grid;
TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions");
auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D);
auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D);
auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] {
auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand);
auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values);
_sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>(
total_threads,
grad_indices_ti,
input_indices_ti,
input_indices_pos_ti,
grad_values_expand_ti,
grad_input_values_ti
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options());
}
}
Tensor bmm_sparse_cuda(const SparseTensor& self, const Tensor& mat2) {
Tensor result = at::empty({self.size(0), mat2.size(2), self.size(1)}, mat2.options(), at::MemoryFormat::Contiguous);
return bmm_out_sparse_cuda(self, mat2, result);
}
#if !(defined(__HIP_PLATFORM_HCC__) || (defined(_MSC_VER) && CUSPARSE_VERSION < 11000))
__global__ void search_end_matrix_indices_cuda_kernel(
int64_t* mat_el_end_indices,
int64_t num_matrices,
const TensorInfo<int64_t, int64_t> indices_1D_ti,
const int64_t num_elements
){
const int64_t target_mat_num = blockIdx.x * blockDim.x + threadIdx.x;
if (target_mat_num >= num_matrices) return;
const int64_t* indices_1D = indices_1D_ti.data;
const int64_t indices_1D_stride = indices_1D_ti.strides[0];
int64_t start_idx = 0;
int64_t end_idx = num_elements - 1;
int64_t mid_idx = (start_idx + end_idx) >> 1;
int64_t mid_val = indices_1D[mid_idx*indices_1D_stride];
bool found;
while (
start_idx <= end_idx
) {
bool trim_right = mid_val > target_mat_num;
int64_t mid_idx_minus_1 = mid_idx - 1;
int64_t mid_idx_plus_1 = mid_idx + 1;
end_idx = trim_right ? mid_idx_minus_1 : end_idx;
start_idx = trim_right ? start_idx : mid_idx_plus_1;
mid_idx = (start_idx + end_idx) >> 1;
mid_val = indices_1D[mid_idx*indices_1D_stride];
}
found = (mid_val == target_mat_num)
&& (
(mid_idx == (num_elements-1))
|| (indices_1D[(mid_idx+1)*indices_1D_stride] != target_mat_num)
);
mat_el_end_indices[target_mat_num] = found ? mid_idx : -1;
}
// Search through a 1D tensor of sorted sparse matrix
// indices to find the end index for each matrix
void search_end_matrix_indices(int64_t* mat_el_end_indices, int64_t num_matrices, const Tensor& indices_1D) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto indices_1D_ti = getTensorInfo<int64_t, int64_t>(indices_1D);
int64_t grid_size = (num_matrices / 64)+1;
int64_t block_size = 64;
int64_t num_elements = indices_1D.size(0);
search_end_matrix_indices_cuda_kernel<<<grid_size, block_size, 0, stream>>>(
mat_el_end_indices,
num_matrices,
indices_1D_ti,
num_elements
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
}
cudaDataType getTensorCudaDataType(Tensor self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
#endif
Tensor& bmm_out_sparse_cuda(const SparseTensor& self, const Tensor& mat2, Tensor& result) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "bmm sparse-dense is not supported on HIP");
#elif defined(_MSC_VER) && (CUSPARSE_VERSION < 11000)
TORCH_CHECK(false, "bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) // linux cuda >= 10.1 or windows cuda >= 11.0
TORCH_CHECK(!mat2.is_sparse(), "bmm_sparse: Tensor 'mat2' must be dense");
TORCH_CHECK(self.dense_dim() == 0, "bmm_sparse: Tensor 'self' must have 0 dense dims, but has ", self.dense_dim());
TORCH_CHECK(self.sparse_dim() == 3, "bmm_sparse: Tensor 'self' must have 3 sparse dims, but has ", self.sparse_dim());
TORCH_CHECK(mat2.dim() == 3, "bmm_sparse: Tensor 'mat2' must have 3 dims, but has ", mat2.dim());
TORCH_CHECK(self.size(0) == mat2.size(0), "bmm_sparse: 'self.size(0)' and 'mat2.size(0)' must match");
TORCH_CHECK(self.size(2) == mat2.size(1), "bmm_sparse: 'self.size(2)' and 'mat2.size(1)' must match");
int64_t num_matrices = self.size(0);
int64_t dim_i = self.size(1);
int64_t dim_j = self.size(2);
int64_t dim_k = mat2.size(2);
result.resize_({num_matrices, dim_k, dim_i});
if ((self._nnz() == 0) || (dim_j == 0) || (dim_k == 0)) {
result.zero_().transpose_(1, 2);
return result;
}
Tensor tmp_result;
bool need_copy_result;
// If the result tensor is contiguous, we can just write results directly to it.
// Otherwise, we'll need to write results to a temp buffer and then copy.
if (result.is_contiguous()) {
tmp_result = result;
need_copy_result = false;
} else {
tmp_result = at::empty({num_matrices, dim_k, dim_i}, result.options(), at::MemoryFormat::Contiguous);
need_copy_result = true;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
const Tensor mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
// First need to coalesce to get all of the first dimension indices
// in order since we'll be sending each matrix into the MM operation
SparseTensor self_coalesced = self.coalesce();
int64_t nnz = self_coalesced._nnz();
Tensor indices = self_coalesced._indices();
Tensor values = self_coalesced._values();
Tensor indices_dim0 = indices[0];
// Need to convert dim1 and dim2 indices to 32-bit since cusparseSpMM
// only supports 32-bit indices
Tensor indices_dim1 = indices[1].to(ScalarType::Int);
Tensor indices_dim2 = indices[2].to(ScalarType::Int);
std::unique_ptr<int64_t[]> mat_el_end_indices_host(new int64_t[num_matrices]);
{
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(num_matrices*sizeof(int64_t));
int64_t* mat_el_end_indices_device = static_cast<int64_t*>(dataPtr.get());
search_end_matrix_indices(mat_el_end_indices_device, num_matrices, indices_dim0);
AT_CUDA_CHECK(cudaMemcpy(
mat_el_end_indices_host.get(),
mat_el_end_indices_device,
num_matrices*sizeof(int64_t),
cudaMemcpyDeviceToHost
));
}
// Need a pointer to an array to access within a lambda
int64_t* mat_el_end_indices = &mat_el_end_indices_host[0];
Scalar beta = 0;
Scalar alpha = 1;
int64_t mat_el_begin_idx = 0;
size_t workspace_buffer_size = 0;
void* workspace_buffer = nullptr;
auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
::c10::DataPtr dataPtr;
// See Note [Enabling Deterministic Operations]
bool deterministic = globalContext().deterministicAlgorithms();
cusparseSpMMAlg_t mm_alg = deterministic ? CUSPARSE_COOMM_ALG2 : CUSPARSE_COOMM_ALG1;
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
values.scalar_type(), "bmm_sparse_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
uint32_t* row_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim1.data_ptr());
uint32_t* col_indices_start_ptr = reinterpret_cast<uint32_t*>(indices_dim2.data_ptr());
scalar_t* values_start_ptr = reinterpret_cast<scalar_t*>(values.data_ptr());
scalar_t* mat2_start_ptr = reinterpret_cast<scalar_t*>(mat2_contig.data_ptr());
scalar_t* result_start_ptr = reinterpret_cast<scalar_t*>(tmp_result.data_ptr());
for (
int64_t cur_mat_num = 0;
(cur_mat_num < num_matrices);
cur_mat_num++
) {
int64_t mat_el_end_idx = mat_el_end_indices[cur_mat_num];
if (mat_el_end_idx != -1) {
mat_el_end_idx++;
// Create tensors to view just the current set of matrices
int64_t sparse_nnz = mat_el_end_idx - mat_el_begin_idx;
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
uint32_t* row_indices_ptr = &row_indices_start_ptr[mat_el_begin_idx];
uint32_t* col_indices_ptr = &col_indices_start_ptr[mat_el_begin_idx];
scalar_t* values_ptr = &values_start_ptr[mat_el_begin_idx];
cusparseSpMatDescr_t sparse_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateCoo(
&sparse_descr,
dim_i,
dim_j,
sparse_nnz,
reinterpret_cast<void*>(row_indices_ptr),
reinterpret_cast<void*>(col_indices_ptr),
reinterpret_cast<void*>(values_ptr),
CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO,
cuda_data_type
));
scalar_t* mat2_ptr = &mat2_start_ptr[dim_k*dim_j*cur_mat_num];
cusparseDnMatDescr_t dense_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&dense_descr,
dim_k,
dim_j,
dim_k,
reinterpret_cast<void*>(mat2_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
scalar_t* result_ptr = &result_start_ptr[dim_i*dim_k*cur_mat_num];
cusparseDnMatDescr_t result_descr;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&result_descr,
dim_i,
dim_k,
dim_i,
reinterpret_cast<void*>(result_ptr),
cuda_data_type,
CUSPARSE_ORDER_COL
));
size_t required_workspace_buffer_size = 0;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
&required_workspace_buffer_size
));
if (required_workspace_buffer_size > workspace_buffer_size) {
workspace_buffer_size = required_workspace_buffer_size;
dataPtr = allocator.allocate(workspace_buffer_size);
workspace_buffer = dataPtr.get();
}
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
cusparse_handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE,
(void*)&alpha_val,
sparse_descr,
dense_descr,
(void*)&beta_val,
result_descr,
cuda_data_type,
mm_alg,
workspace_buffer
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(result_descr));
mat_el_begin_idx = mat_el_end_idx;
} else {
tmp_result[cur_mat_num].zero_();
}
}
}
);
if (need_copy_result) {
result.copy_(tmp_result);
}
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(1,2);
#else
TORCH_CHECK(false, "bmm sparse-dense requires CUDA 10.1 or greater");
#endif
return result;
}
}} // namespace at::native
|
2c38c0ee60b957e9adbb2148c4203042ee71a9d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <unistd.h>
#include <utils.h>
using std::vector;
__global__
void Initialize(int n, int* a, int* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
a[n*i + j] = j;
b[n*i + j] = i-2*j;
}
}
__global__
void Add(int n, int* a, int* b, int* c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
c[n*i + j] = a[n*i + j] + b[n*i + j];
}
}
int main(int argc, const char** argv) {
int n = 1024;
int n_thread = 512;
if (checkCmdLineFlag(argc, argv, "t")) {
n_thread = getCmdLineArgumentInt(argc, argv, "t");
printf("Using %d threads = %d warps\n",n_thread, (n_thread+31)/32);
}
printf("Dimensions of matrix: %5d x %5d\n",n,n);
int* d_a, *d_b, *d_c;
/* Allocate memory */
checkCudaErrors(hipMalloc(&d_a, sizeof(int) * n*n));
checkCudaErrors(hipMalloc(&d_b, sizeof(int) * n*n));
checkCudaErrors(hipMalloc(&d_c, sizeof(int) * n*n));
dim3 threads_per_block(2,n_thread);
int blocks_per_grid_x = (n + 2 - 1) / 2;
int blocks_per_grid_y = (n + n_thread - 1) / n_thread;
/* This formula is needed to make sure we process all entries in matrix */
dim3 num_blocks(blocks_per_grid_x, blocks_per_grid_y);
printf("Dimension of thread block along y: %2d\n", n_thread);
printf("Dimension of grid: %3d x %3d\n", num_blocks.x, num_blocks.y);
/* Run calculation on GPU */
hipLaunchKernelGGL(( Initialize), dim3(num_blocks), dim3(threads_per_block), 0, 0, n, d_a, d_b);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( Add), dim3(num_blocks), dim3(threads_per_block), 0, 0, n, d_a, d_b, d_c);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
/* Note that kernels execute asynchronously.
They will fail without any error message!
This can be confusing when debugging.
The output arrays will be left uninitialized with no warning.
*/
vector<int> h_c(n*n);
/* Copy the result back */
checkCudaErrors(hipMemcpy(&h_c[0], d_c, sizeof(int) * n*n,
hipMemcpyDeviceToHost));
/* Test result */
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
if(!(h_c[n*i + j] == i-j)) {
printf("%d %d %d %d %d\n",n,i,j,h_c[n*i + j],i-j);
}
assert(h_c[n*i + j] == i-j);
}
}
printf("All tests have passed; calculation is correct.\n");
return 0;
}
| 2c38c0ee60b957e9adbb2148c4203042ee71a9d2.cu | #include <iostream>
#include <vector>
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <unistd.h>
#include <utils.h>
using std::vector;
__global__
void Initialize(int n, int* a, int* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
a[n*i + j] = j;
b[n*i + j] = i-2*j;
}
}
__global__
void Add(int n, int* a, int* b, int* c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
c[n*i + j] = a[n*i + j] + b[n*i + j];
}
}
int main(int argc, const char** argv) {
int n = 1024;
int n_thread = 512;
if (checkCmdLineFlag(argc, argv, "t")) {
n_thread = getCmdLineArgumentInt(argc, argv, "t");
printf("Using %d threads = %d warps\n",n_thread, (n_thread+31)/32);
}
printf("Dimensions of matrix: %5d x %5d\n",n,n);
int* d_a, *d_b, *d_c;
/* Allocate memory */
checkCudaErrors(cudaMalloc(&d_a, sizeof(int) * n*n));
checkCudaErrors(cudaMalloc(&d_b, sizeof(int) * n*n));
checkCudaErrors(cudaMalloc(&d_c, sizeof(int) * n*n));
dim3 threads_per_block(2,n_thread);
int blocks_per_grid_x = (n + 2 - 1) / 2;
int blocks_per_grid_y = (n + n_thread - 1) / n_thread;
/* This formula is needed to make sure we process all entries in matrix */
dim3 num_blocks(blocks_per_grid_x, blocks_per_grid_y);
printf("Dimension of thread block along y: %2d\n", n_thread);
printf("Dimension of grid: %3d x %3d\n", num_blocks.x, num_blocks.y);
/* Run calculation on GPU */
Initialize<<<num_blocks, threads_per_block>>>(n, d_a, d_b);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
Add<<<num_blocks, threads_per_block>>>(n, d_a, d_b, d_c);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
/* Note that kernels execute asynchronously.
They will fail without any error message!
This can be confusing when debugging.
The output arrays will be left uninitialized with no warning.
*/
vector<int> h_c(n*n);
/* Copy the result back */
checkCudaErrors(cudaMemcpy(&h_c[0], d_c, sizeof(int) * n*n,
cudaMemcpyDeviceToHost));
/* Test result */
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
if(!(h_c[n*i + j] == i-j)) {
printf("%d %d %d %d %d\n",n,i,j,h_c[n*i + j],i-j);
}
assert(h_c[n*i + j] == i-j);
}
}
printf("All tests have passed; calculation is correct.\n");
return 0;
}
|
9ddcac8d7c5a18fb7b8088391ce977de37bf71ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
// This should lead to each element in the array having NUM_THREADS/ARRAY_SIZE in the arrays
// But here we will get a race condition a condition where multiple threads are
// trying to access the same memory.
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
// This should lead to each element in the array having NUM_THREADS/ARRAY_SIZE in the arrays
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Here there will be a race condition since many threads are trying to update
// the same memory location. This will lead to it getting values lower than
// NUM_THREADS/ARRAY_SIZE
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// Here by atomicizing the global array we have prevented the possibility of
// a race condition and so every block should have value of NUM_THREADS/ARRAY_SIZE
// using this kernel
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
| 9ddcac8d7c5a18fb7b8088391ce977de37bf71ca.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
// This should lead to each element in the array having NUM_THREADS/ARRAY_SIZE in the arrays
// But here we will get a race condition a condition where multiple threads are
// trying to access the same memory.
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
// This should lead to each element in the array having NUM_THREADS/ARRAY_SIZE in the arrays
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Here there will be a race condition since many threads are trying to update
// the same memory location. This will lead to it getting values lower than
// NUM_THREADS/ARRAY_SIZE
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
// Here by atomicizing the global array we have prevented the possibility of
// a race condition and so every block should have value of NUM_THREADS/ARRAY_SIZE
// using this kernel
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
9d163b4fda758543cb5241a19f5182201f9a78d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ge_sigmoid.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ge_sigmoid), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ge_sigmoid), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ge_sigmoid), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9d163b4fda758543cb5241a19f5182201f9a78d9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ge_sigmoid.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ge_sigmoid<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ge_sigmoid<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ge_sigmoid<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a29f4a939665193844ef699ed55a976d24d338a7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "precalculateABC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *ABCm = NULL;
hipMalloc(&ABCm, XSIZE*YSIZE);
float *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
float timestep = 1;
float alpha = 2;
unsigned int numPoints = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
precalculateABC), dim3(gridBlock),dim3(threadBlock), 0, 0, ABCm,M,timestep,alpha,numPoints);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
precalculateABC), dim3(gridBlock),dim3(threadBlock), 0, 0, ABCm,M,timestep,alpha,numPoints);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
precalculateABC), dim3(gridBlock),dim3(threadBlock), 0, 0, ABCm,M,timestep,alpha,numPoints);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a29f4a939665193844ef699ed55a976d24d338a7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "precalculateABC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *ABCm = NULL;
cudaMalloc(&ABCm, XSIZE*YSIZE);
float *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
float timestep = 1;
float alpha = 2;
unsigned int numPoints = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
precalculateABC<<<gridBlock,threadBlock>>>(ABCm,M,timestep,alpha,numPoints);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
precalculateABC<<<gridBlock,threadBlock>>>(ABCm,M,timestep,alpha,numPoints);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
precalculateABC<<<gridBlock,threadBlock>>>(ABCm,M,timestep,alpha,numPoints);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a08c0f4708e1c9a3e45d497c8180cbad501e89c0.hip | // !!! This is a file automatically generated by hipify!!!
// Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
hipEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(double) * vector_size);
// allocate host memory
double *h_iA = (double *) malloc(mem_size);
double *h_oC1 = (double *) malloc(mem_size);
double *h_oC2 = (double *) malloc(mem_size);
double *h_oC3 = (double *) malloc(mem_size);
double *h_oC4 = (double *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (double) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
double *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
hipMalloc((void **) &d_iA, mem_size);
// hipMalloc((void **) &d_iB, mem_size);
hipMalloc((void **) &d_oC1, mem_size);
hipMalloc((void **) &d_oC2, mem_size);
hipMalloc((void **) &d_oC3, mem_size);
hipMalloc((void **) &d_oC4, mem_size);
// copy host data to device
hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice);
// hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
hipEventCreate(&start);
hipEventCreate(&stop);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
hipLaunchKernelGGL(( simpleKernel<double>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float kernelTime;
hipEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
hipFree(d_iA);
// hipFree(d_iB);
hipFree(d_oC1);
hipFree(d_oC2);
hipFree(d_oC3);
hipFree(d_oC4);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| a08c0f4708e1c9a3e45d497c8180cbad501e89c0.cu | // Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(double) * vector_size);
// allocate host memory
double *h_iA = (double *) malloc(mem_size);
double *h_oC1 = (double *) malloc(mem_size);
double *h_oC2 = (double *) malloc(mem_size);
double *h_oC3 = (double *) malloc(mem_size);
double *h_oC4 = (double *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (double) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
double *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
cudaMalloc((void **) &d_iA, mem_size);
// cudaMalloc((void **) &d_iB, mem_size);
cudaMalloc((void **) &d_oC1, mem_size);
cudaMalloc((void **) &d_oC2, mem_size);
cudaMalloc((void **) &d_oC3, mem_size);
cudaMalloc((void **) &d_oC4, mem_size);
// copy host data to device
cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
simpleKernel<double><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
cudaFree(d_iA);
// cudaFree(d_iB);
cudaFree(d_oC1);
cudaFree(d_oC2);
cudaFree(d_oC3);
cudaFree(d_oC4);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
21adc2d7ee91ad3313f716f02a4c47ced30e22b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kRgb2XYZ.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uchar4 *inputImg = NULL;
hipMalloc(&inputImg, XSIZE*YSIZE);
float4 *outputImg = NULL;
hipMalloc(&outputImg, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kRgb2XYZ), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImg,outputImg,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kRgb2XYZ), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImg,outputImg,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kRgb2XYZ), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImg,outputImg,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 21adc2d7ee91ad3313f716f02a4c47ced30e22b9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kRgb2XYZ.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uchar4 *inputImg = NULL;
cudaMalloc(&inputImg, XSIZE*YSIZE);
float4 *outputImg = NULL;
cudaMalloc(&outputImg, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kRgb2XYZ<<<gridBlock,threadBlock>>>(inputImg,outputImg,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kRgb2XYZ<<<gridBlock,threadBlock>>>(inputImg,outputImg,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kRgb2XYZ<<<gridBlock,threadBlock>>>(inputImg,outputImg,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8a1c065a3d1aaba43b8e687c54ec44db5fc534b5.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file pr_app.cu
*
* @brief Gunrock PageRank application
*/
// <primitive>_app.cuh includes
#include <gunrock/app/app.cuh>
// page-rank includes
#include <gunrock/app/pr/pr_enactor.cuh>
#include <gunrock/app/pr/pr_test.cuh>
namespace gunrock {
namespace app {
namespace pr {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"invalid",
"<Vertex-ID|random|largestdegree|invalid> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees;\n"
"\tIf invalid, do not use personalized PageRank.",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"output-filename",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"", "file to output ranking values", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run PageRank tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_node_ids Reference top-ranked vertex IDs
* @param[in] ref_ranks Reference ranking values per vertex
* @param[in] target Whether to perform the PageRank
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT **ref_node_ids = NULL,
ValueT **ref_ranks = NULL,
util::Location target = util::DEVICE) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("PR", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_ranks = new ValueT[graph.nodes];
VertexT *h_node_ids = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
// util::PrintMsg("Before init");
GUARD_CU(problem.Init(graph, target));
// util::PrintMsg("Problem init");
GUARD_CU(enactor.Init(problem, target));
// util::PrintMsg("After init");
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform PageRank
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet);
if (validation == "each") {
GUARD_CU(enactor.Extract());
GUARD_CU(problem.Extract(h_node_ids, h_ranks));
ValueT total_rank = 0;
#pragma omp parallel for reduction(+ : total_rank)
for (VertexT v = 0; v < graph.nodes; v++) {
total_rank += h_ranks[v];
}
util::PrintMsg("Total_rank = " + std::to_string(total_rank));
SizeT num_errors = app::pr::Validate_Results(
parameters, graph, src, h_node_ids, h_ranks,
ref_node_ids == NULL ? NULL : ref_node_ids[run_num % num_srcs],
ref_ranks == NULL ? NULL : ref_ranks[run_num % num_srcs], false);
}
}
cpu_timer.Start();
if (validation == "last") {
// Copy out results
GUARD_CU(enactor.Extract());
GUARD_CU(problem.Extract(h_node_ids, h_ranks));
if (!quiet) {
ValueT total_rank = 0;
#pragma omp parallel for reduction(+ : total_rank)
for (VertexT v = 0; v < graph.nodes; v++) {
total_rank += h_ranks[v];
}
util::PrintMsg("Total_rank = " + std::to_string(total_rank));
// Display Solution
DisplaySolution(h_node_ids, h_ranks, graph.nodes);
}
SizeT num_errors = app::pr::Validate_Results(
parameters, graph, src, h_node_ids, h_ranks,
ref_node_ids == NULL ? NULL : ref_node_ids[(num_runs - 1) % num_srcs],
ref_ranks == NULL ? NULL : ref_ranks[(num_runs - 1) % num_srcs], false);
}
if (parameters.Get<std::string>("output-filename") != "") {
cpu_timer.Start();
std::ofstream fout;
size_t buf_size = 1024 * 1024 * 16;
char *fout_buf = new char[buf_size];
fout.rdbuf()->pubsetbuf(fout_buf, buf_size);
fout.open(parameters.Get<std::string>("output-filename").c_str());
for (VertexT v = 0; v < graph.nodes; v++) {
fout << h_node_ids[v] + 1 << "," << h_ranks[v] << std::endl;
}
fout.close();
delete[] fout_buf;
fout_buf = NULL;
cpu_timer.Stop();
parameters.Set("write-time", cpu_timer.ElapsedMillis());
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_node_ids;
h_node_ids = NULL;
delete[] h_ranks;
h_ranks = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace pr
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_sssp function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the ranking values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] ranks Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_pagerank(gunrock::util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT **node_ids, ValueT **ranks) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::pr::Problem<GraphT> ProblemT;
typedef gunrock::app::pr::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
printf("Init Problem and Enactor for PR.\n");
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
printf("For run_num: %d, Reset problem and enactor and Enact.\n", run_num);
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
enactor.Extract();
problem.Extract(node_ids[src_num], ranks[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform PR
* @param[in] sources Sources for personalized PR
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int, typename ValueT = float>
double pagerank(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, bool normalize, VertexT *sources,
VertexT **node_ids, ValueT **ranks) {
typedef typename gunrock::app::TestGraph<
VertexT, SizeT, ValueT, gunrock::graph::HAS_COO | gunrock::graph::HAS_CSC>
GraphT;
typedef typename gunrock::app::TestGraph<VertexT, SizeT, ValueT,
gunrock::graph::HAS_CSR>
Graph_CsrT;
typedef typename Graph_CsrT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("pr");
gunrock::graphio::UseParameters(parameters);
gunrock::app::pr::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("normalize", normalize);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
VertexT InvalidValue = gunrock::util::PreDefinedValues<VertexT>::InvalidValue;
for (int i = 0; i < num_runs; i++) {
if (sources != NULL)
srcs.push_back(sources[i]);
else
srcs.push_back(InvalidValue);
}
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
CsrT csr;
// Assign pointers into gunrock graph format
csr.Allocate(num_nodes, num_edges, gunrock::util::HOST);
csr.row_offsets.SetPointer((int *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
csr.column_indices.SetPointer((int *)col_indices, num_edges,
gunrock::util::HOST);
// csr.Move(gunrock::util::HOST, gunrock::util::DEVICE);
gunrock::util::Location target = gunrock::util::HOST;
GraphT graph;
graph.FromCsr(csr, target, 0, quiet, true);
csr.Release();
gunrock::graphio::LoadGraph(parameters, graph);
// Run the PR
double elapsed_time = gunrock_pagerank(parameters, graph, node_ids, ranks);
// Cleanup
// graph.Release();
// srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] source Source for personalized PR
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int, typename ValueT = float>
double pagerank(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
bool normalize, VertexT source, VertexT *node_ids,
ValueT *ranks) {
if (source == -1) {
return pagerank(num_nodes, num_edges, row_offsets, col_indices,
1 /* num_runs */, normalize, (int *)NULL, &node_ids,
&ranks);
}
return pagerank(num_nodes, num_edges, row_offsets, col_indices,
1 /* num_runs */, normalize, &source, &node_ids, &ranks);
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
double pagerank(const int num_nodes, const int num_edges,
const int *row_offsets, const int *col_indices, bool normalize,
int *node_ids, float *ranks) {
return pagerank(num_nodes, num_edges, row_offsets, col_indices, normalize,
(int)-1 /* source */, node_ids, ranks);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 8a1c065a3d1aaba43b8e687c54ec44db5fc534b5.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file pr_app.cu
*
* @brief Gunrock PageRank application
*/
// <primitive>_app.cuh includes
#include <gunrock/app/app.cuh>
// page-rank includes
#include <gunrock/app/pr/pr_enactor.cuh>
#include <gunrock/app/pr/pr_test.cuh>
namespace gunrock {
namespace app {
namespace pr {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"invalid",
"<Vertex-ID|random|largestdegree|invalid> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees;\n"
"\tIf invalid, do not use personalized PageRank.",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"output-filename",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"", "file to output ranking values", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run PageRank tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_node_ids Reference top-ranked vertex IDs
* @param[in] ref_ranks Reference ranking values per vertex
* @param[in] target Whether to perform the PageRank
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT **ref_node_ids = NULL,
ValueT **ref_ranks = NULL,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
util::Info info("PR", parameters, graph); // initialize Info structure
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_ranks = new ValueT[graph.nodes];
VertexT *h_node_ids = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
// util::PrintMsg("Before init");
GUARD_CU(problem.Init(graph, target));
// util::PrintMsg("Problem init");
GUARD_CU(enactor.Init(problem, target));
// util::PrintMsg("After init");
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform PageRank
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
src = srcs[run_num % num_srcs];
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet);
if (validation == "each") {
GUARD_CU(enactor.Extract());
GUARD_CU(problem.Extract(h_node_ids, h_ranks));
ValueT total_rank = 0;
#pragma omp parallel for reduction(+ : total_rank)
for (VertexT v = 0; v < graph.nodes; v++) {
total_rank += h_ranks[v];
}
util::PrintMsg("Total_rank = " + std::to_string(total_rank));
SizeT num_errors = app::pr::Validate_Results(
parameters, graph, src, h_node_ids, h_ranks,
ref_node_ids == NULL ? NULL : ref_node_ids[run_num % num_srcs],
ref_ranks == NULL ? NULL : ref_ranks[run_num % num_srcs], false);
}
}
cpu_timer.Start();
if (validation == "last") {
// Copy out results
GUARD_CU(enactor.Extract());
GUARD_CU(problem.Extract(h_node_ids, h_ranks));
if (!quiet) {
ValueT total_rank = 0;
#pragma omp parallel for reduction(+ : total_rank)
for (VertexT v = 0; v < graph.nodes; v++) {
total_rank += h_ranks[v];
}
util::PrintMsg("Total_rank = " + std::to_string(total_rank));
// Display Solution
DisplaySolution(h_node_ids, h_ranks, graph.nodes);
}
SizeT num_errors = app::pr::Validate_Results(
parameters, graph, src, h_node_ids, h_ranks,
ref_node_ids == NULL ? NULL : ref_node_ids[(num_runs - 1) % num_srcs],
ref_ranks == NULL ? NULL : ref_ranks[(num_runs - 1) % num_srcs], false);
}
if (parameters.Get<std::string>("output-filename") != "") {
cpu_timer.Start();
std::ofstream fout;
size_t buf_size = 1024 * 1024 * 16;
char *fout_buf = new char[buf_size];
fout.rdbuf()->pubsetbuf(fout_buf, buf_size);
fout.open(parameters.Get<std::string>("output-filename").c_str());
for (VertexT v = 0; v < graph.nodes; v++) {
fout << h_node_ids[v] + 1 << "," << h_ranks[v] << std::endl;
}
fout.close();
delete[] fout_buf;
fout_buf = NULL;
cpu_timer.Stop();
parameters.Set("write-time", cpu_timer.ElapsedMillis());
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_node_ids;
h_node_ids = NULL;
delete[] h_ranks;
h_ranks = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace pr
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_sssp function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the ranking values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] ranks Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_pagerank(gunrock::util::Parameters ¶meters, GraphT &graph,
typename GraphT::VertexT **node_ids, ValueT **ranks) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::pr::Problem<GraphT> ProblemT;
typedef gunrock::app::pr::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
printf("Init Problem and Enactor for PR.\n");
problem.Init(graph, target);
enactor.Init(problem, target);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_runs = parameters.Get<int>("num-runs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
printf("For run_num: %d, Reset problem and enactor and Enact.\n", run_num);
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
enactor.Extract();
problem.Extract(node_ids[src_num], ranks[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform PR
* @param[in] sources Sources for personalized PR
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int, typename ValueT = float>
double pagerank(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, bool normalize, VertexT *sources,
VertexT **node_ids, ValueT **ranks) {
typedef typename gunrock::app::TestGraph<
VertexT, SizeT, ValueT, gunrock::graph::HAS_COO | gunrock::graph::HAS_CSC>
GraphT;
typedef typename gunrock::app::TestGraph<VertexT, SizeT, ValueT,
gunrock::graph::HAS_CSR>
Graph_CsrT;
typedef typename Graph_CsrT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("pr");
gunrock::graphio::UseParameters(parameters);
gunrock::app::pr::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("normalize", normalize);
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
VertexT InvalidValue = gunrock::util::PreDefinedValues<VertexT>::InvalidValue;
for (int i = 0; i < num_runs; i++) {
if (sources != NULL)
srcs.push_back(sources[i]);
else
srcs.push_back(InvalidValue);
}
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
CsrT csr;
// Assign pointers into gunrock graph format
csr.Allocate(num_nodes, num_edges, gunrock::util::HOST);
csr.row_offsets.SetPointer((int *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
csr.column_indices.SetPointer((int *)col_indices, num_edges,
gunrock::util::HOST);
// csr.Move(gunrock::util::HOST, gunrock::util::DEVICE);
gunrock::util::Location target = gunrock::util::HOST;
GraphT graph;
graph.FromCsr(csr, target, 0, quiet, true);
csr.Release();
gunrock::graphio::LoadGraph(parameters, graph);
// Run the PR
double elapsed_time = gunrock_pagerank(parameters, graph, node_ids, ranks);
// Cleanup
// graph.Release();
// srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] source Source for personalized PR
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int, typename ValueT = float>
double pagerank(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
bool normalize, VertexT source, VertexT *node_ids,
ValueT *ranks) {
if (source == -1) {
return pagerank(num_nodes, num_edges, row_offsets, col_indices,
1 /* num_runs */, normalize, (int *)NULL, &node_ids,
&ranks);
}
return pagerank(num_nodes, num_edges, row_offsets, col_indices,
1 /* num_runs */, normalize, &source, &node_ids, &ranks);
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] normalize Whether to normalize ranking values
* @param[out] node_ids Return top-ranked vertex IDs
* @param[out] pagerank Return PageRank scores per node
* \return double Return accumulated elapsed times for all runs
*/
double pagerank(const int num_nodes, const int num_edges,
const int *row_offsets, const int *col_indices, bool normalize,
int *node_ids, float *ranks) {
return pagerank(num_nodes, num_edges, row_offsets, col_indices, normalize,
(int)-1 /* source */, node_ids, ranks);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
91ef1ae57b75640eafa74da0ad19a2ccedaf95f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffepro/layers/dropoutsame_layer.h>
#include <caffepro/proto/caffe.pb.h>
#include <caffepro/utils/utils.h>
#include <caffepro/math/cublas_wrapper.h>
#include <caffepro/math/cublas_debug.h>
namespace caffepro {
dropoutsame_layer::dropoutsame_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES
| layer_attribute::CF_ALLOW_INPLACE
);
}
dropoutsame_layer::~dropoutsame_layer() {
release_all();
}
void dropoutsame_layer::init() {
check_input();
threshold_ = layer_param_.dropoutsame_param().dropout_ratio();
force_random_ = layer_param_.dropoutsame_param().force_random();
DCHECK(threshold_ >= 0.);
DCHECK(threshold_ <= 1.);
scale_ = 1.f / (1.f - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
}
void dropoutsame_layer::resize() {
caffepro_layer::resize();
}
void dropoutsame_layer::on_before_forward() {
// threshold_ = 0.2 means removing 20%
unsigned int uint_r = (unsigned int)((double)rand() / RAND_MAX * UINT_MAX);
open_or_not_ = (uint_r > uint_thres_) ? true : false;
}
void dropoutsame_layer::on_forward(int device_index) {
const data_type* bottom_data = inputs_[0]->get(device_index)->gpu_data();
data_type* top_data = outputs_[0]->get(device_index)->mutable_gpu_data();
const int count = inputs_[0]->get(device_index)->count();
if (context_->get_phase() == caffepro_context::TRAIN || force_random_) {
#if _DEBUG
const int n = 100000;
int n_open = 0;
for (int i = 0; i < n; i++)
{
float r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
if (r > threshold_)
n_open ++;
}
printf("ratio: %.5f\n", n_open / (float)n);
#endif
cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id());
if (open_or_not_)
cublas.copy(count, bottom_data, top_data);
else
outputs_[0]->get(device_index)->fill_data(0.f);
//cublas.scale(count, (data_type)0.0f, bottom_data, top_data);
}
else {
cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id());
cublas.scale(count, (data_type)1.f / scale_, bottom_data, top_data);
}
}
__global__ void dropoutsame_backward(const int n, const data_type* in_diff,
const bool open_or_not,
data_type* out_diff, const data_type scale_target) {
CUDA_KERNEL_LOOP(index, n) {
data_type v = in_diff[index] * (open_or_not);
if (scale_target == 0) {
out_diff[index] = v;
}
else {
out_diff[index] = out_diff[index] * scale_target + v;
}
}
}
void dropoutsame_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
const data_type beta_acts = get_beta(clear_acts_diff, 0);
if (should_bp(bp_acts, 0)) {
const data_type* top_diff = outputs_[0]->get(device_index)->gpu_diff();
data_type* bottom_diff = inputs_[0]->get(device_index)->mutable_gpu_diff();
const int count = inputs_[0]->get(device_index)->count();
KERNEL_CALL(dropoutsame_backward, count)(count, top_diff, open_or_not_, bottom_diff, beta_acts);
CUDA_POST_KERNEL_CHECK;
}
}
} | 91ef1ae57b75640eafa74da0ad19a2ccedaf95f0.cu |
#include <caffepro/layers/dropoutsame_layer.h>
#include <caffepro/proto/caffe.pb.h>
#include <caffepro/utils/utils.h>
#include <caffepro/math/cublas_wrapper.h>
#include <caffepro/math/cublas_debug.h>
namespace caffepro {
dropoutsame_layer::dropoutsame_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES
| layer_attribute::CF_ALLOW_INPLACE
);
}
dropoutsame_layer::~dropoutsame_layer() {
release_all();
}
void dropoutsame_layer::init() {
check_input();
threshold_ = layer_param_.dropoutsame_param().dropout_ratio();
force_random_ = layer_param_.dropoutsame_param().force_random();
DCHECK(threshold_ >= 0.);
DCHECK(threshold_ <= 1.);
scale_ = 1.f / (1.f - threshold_);
uint_thres_ = (unsigned int)(UINT_MAX * threshold_);
}
void dropoutsame_layer::resize() {
caffepro_layer::resize();
}
void dropoutsame_layer::on_before_forward() {
// threshold_ = 0.2 means removing 20%
unsigned int uint_r = (unsigned int)((double)rand() / RAND_MAX * UINT_MAX);
open_or_not_ = (uint_r > uint_thres_) ? true : false;
}
void dropoutsame_layer::on_forward(int device_index) {
const data_type* bottom_data = inputs_[0]->get(device_index)->gpu_data();
data_type* top_data = outputs_[0]->get(device_index)->mutable_gpu_data();
const int count = inputs_[0]->get(device_index)->count();
if (context_->get_phase() == caffepro_context::TRAIN || force_random_) {
#if _DEBUG
const int n = 100000;
int n_open = 0;
for (int i = 0; i < n; i++)
{
float r = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
if (r > threshold_)
n_open ++;
}
printf("ratio: %.5f\n", n_open / (float)n);
#endif
cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id());
if (open_or_not_)
cublas.copy(count, bottom_data, top_data);
else
outputs_[0]->get(device_index)->fill_data(0.f);
//cublas.scale(count, (data_type)0.0f, bottom_data, top_data);
}
else {
cublas_wrapper<data_type> cublas(context_, context_->get_current_device()->device_id());
cublas.scale(count, (data_type)1.f / scale_, bottom_data, top_data);
}
}
__global__ void dropoutsame_backward(const int n, const data_type* in_diff,
const bool open_or_not,
data_type* out_diff, const data_type scale_target) {
CUDA_KERNEL_LOOP(index, n) {
data_type v = in_diff[index] * (open_or_not);
if (scale_target == 0) {
out_diff[index] = v;
}
else {
out_diff[index] = out_diff[index] * scale_target + v;
}
}
}
void dropoutsame_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
const data_type beta_acts = get_beta(clear_acts_diff, 0);
if (should_bp(bp_acts, 0)) {
const data_type* top_diff = outputs_[0]->get(device_index)->gpu_diff();
data_type* bottom_diff = inputs_[0]->get(device_index)->mutable_gpu_diff();
const int count = inputs_[0]->get(device_index)->count();
KERNEL_CALL(dropoutsame_backward, count)(count, top_diff, open_or_not_, bottom_diff, beta_acts);
CUDA_POST_KERNEL_CHECK;
}
}
} |
b735596fdb4921138ccacaf064f04cc495b75b88.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMultKernel_global.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Ad = NULL;
hipMalloc(&Ad, XSIZE*YSIZE);
float *Bd = NULL;
hipMalloc(&Bd, XSIZE*YSIZE);
float *Cd = NULL;
hipMalloc(&Cd, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixMultKernel_global), dim3(gridBlock),dim3(threadBlock), 0, 0, Ad,Bd,Cd,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixMultKernel_global), dim3(gridBlock),dim3(threadBlock), 0, 0, Ad,Bd,Cd,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixMultKernel_global), dim3(gridBlock),dim3(threadBlock), 0, 0, Ad,Bd,Cd,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b735596fdb4921138ccacaf064f04cc495b75b88.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixMultKernel_global.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Ad = NULL;
cudaMalloc(&Ad, XSIZE*YSIZE);
float *Bd = NULL;
cudaMalloc(&Bd, XSIZE*YSIZE);
float *Cd = NULL;
cudaMalloc(&Cd, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixMultKernel_global<<<gridBlock,threadBlock>>>(Ad,Bd,Cd,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixMultKernel_global<<<gridBlock,threadBlock>>>(Ad,Bd,Cd,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixMultKernel_global<<<gridBlock,threadBlock>>>(Ad,Bd,Cd,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0a0bed2dafa1f9d9460fc18fe03183182d28d677.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <vector>
#include "nvstrings/NVStrings.h"
#include "./utils.h"
struct TestTimestamp : public GdfTest {
};
TEST_F(TestTimestamp, ToTimestamp)
{
{
std::vector<const char*> hstrs{
"1974-02-28T01:23:45Z", "2019-07-17T21:34:37Z", nullptr, "", "1974"};
NVStrings* strs = NVStrings::create_from_array(hstrs.data(), hstrs.size());
thrust::device_vector<unsigned long> results(hstrs.size(), 0);
strs->timestamp2long("%Y-%m-%dT%H:%M:%SZ", NVStrings::seconds, results.data().get());
int expected[] = {131246625, 1563399277, 0, 0, 0};
for (int idx = 0; idx < (int)hstrs.size(); ++idx) EXPECT_EQ((int)results[idx], expected[idx]);
NVStrings::destroy(strs);
}
{
std::vector<const char*> hstrs{"12.28.1982", "07.17.2019", "06"};
NVStrings* strs = NVStrings::create_from_array(hstrs.data(), hstrs.size());
thrust::device_vector<unsigned long> results(hstrs.size(), 0);
strs->timestamp2long("%m-%d-%Y", NVStrings::days, results.data().get());
int expected[] = {4744, 18094, 0};
for (int idx = 0; idx < (int)hstrs.size(); ++idx) EXPECT_EQ((int)results[idx], expected[idx]);
NVStrings::destroy(strs);
}
}
TEST_F(TestTimestamp, FromTimestamp)
{
{
unsigned long values[] = {1563399273};
thrust::device_vector<unsigned long> results(1);
hipMemcpy(results.data().get(), values, 1 * sizeof(unsigned long), hipMemcpyHostToDevice);
NVStrings* got =
NVStrings::long2timestamp(results.data().get(), 1, NVStrings::seconds, "%m/%d/%Y %H:%M");
const char* expected[] = {"07/17/2019 21:34"};
EXPECT_TRUE(verify_strings(got, expected));
NVStrings::destroy(got);
}
{
unsigned long values[] = {1563399273123};
thrust::device_vector<unsigned long> results(1);
hipMemcpy(results.data().get(), values, 1 * sizeof(unsigned long), hipMemcpyHostToDevice);
NVStrings* got =
NVStrings::long2timestamp(results.data().get(), 1, NVStrings::ms, "%H:%M:%S.%f");
const char* expected[] = {"21:34:33.123"};
EXPECT_TRUE(verify_strings(got, expected));
NVStrings::destroy(got);
}
}
| 0a0bed2dafa1f9d9460fc18fe03183182d28d677.cu | #include <cuda_runtime.h>
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <vector>
#include "nvstrings/NVStrings.h"
#include "./utils.h"
struct TestTimestamp : public GdfTest {
};
TEST_F(TestTimestamp, ToTimestamp)
{
{
std::vector<const char*> hstrs{
"1974-02-28T01:23:45Z", "2019-07-17T21:34:37Z", nullptr, "", "1974"};
NVStrings* strs = NVStrings::create_from_array(hstrs.data(), hstrs.size());
thrust::device_vector<unsigned long> results(hstrs.size(), 0);
strs->timestamp2long("%Y-%m-%dT%H:%M:%SZ", NVStrings::seconds, results.data().get());
int expected[] = {131246625, 1563399277, 0, 0, 0};
for (int idx = 0; idx < (int)hstrs.size(); ++idx) EXPECT_EQ((int)results[idx], expected[idx]);
NVStrings::destroy(strs);
}
{
std::vector<const char*> hstrs{"12.28.1982", "07.17.2019", "06"};
NVStrings* strs = NVStrings::create_from_array(hstrs.data(), hstrs.size());
thrust::device_vector<unsigned long> results(hstrs.size(), 0);
strs->timestamp2long("%m-%d-%Y", NVStrings::days, results.data().get());
int expected[] = {4744, 18094, 0};
for (int idx = 0; idx < (int)hstrs.size(); ++idx) EXPECT_EQ((int)results[idx], expected[idx]);
NVStrings::destroy(strs);
}
}
TEST_F(TestTimestamp, FromTimestamp)
{
{
unsigned long values[] = {1563399273};
thrust::device_vector<unsigned long> results(1);
cudaMemcpy(results.data().get(), values, 1 * sizeof(unsigned long), cudaMemcpyHostToDevice);
NVStrings* got =
NVStrings::long2timestamp(results.data().get(), 1, NVStrings::seconds, "%m/%d/%Y %H:%M");
const char* expected[] = {"07/17/2019 21:34"};
EXPECT_TRUE(verify_strings(got, expected));
NVStrings::destroy(got);
}
{
unsigned long values[] = {1563399273123};
thrust::device_vector<unsigned long> results(1);
cudaMemcpy(results.data().get(), values, 1 * sizeof(unsigned long), cudaMemcpyHostToDevice);
NVStrings* got =
NVStrings::long2timestamp(results.data().get(), 1, NVStrings::ms, "%H:%M:%S.%f");
const char* expected[] = {"21:34:33.123"};
EXPECT_TRUE(verify_strings(got, expected));
NVStrings::destroy(got);
}
}
|
33c3f3d95eba3647c5eef38a581bcebdac941def.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <THHUNN/row2col.h>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <THHUNN/generic/TemporalRowConvolution.hip>
#include <THH/THHGenerateFloatTypes.h>
| 33c3f3d95eba3647c5eef38a581bcebdac941def.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <THCUNN/row2col.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <THCUNN/generic/TemporalRowConvolution.cu>
#include <THC/THCGenerateFloatTypes.h>
|
f3c8692cd1d5feca03d6afe0a6da68eebd7cb6ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include "book.h"
void fillMatrix(int *, int);
void printMatrix(int *, int);
__global__ void matrixMult(int *a, int *b, int *c, int *n) {
int tid_i = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < *n; ++i){
c[(tid_i * (*n)) + i] = 0;
for(int j = 0; j < *n; ++j){
c[(tid_i * (*n)) + i] += a[(tid_i * (*n)) + j] * b[(j * (*n)) + i];
}
}
}
int main(int argc, const char *argv[]) {
if (argc != 3) {
printf("Se necesita dos argumentos para correr este programa, el primero para el tamano de las matrices, y "
"otra que es 1 o 0 para indicar para imprimir o no las matrices\n");
exit(-1);
}
int n = atoi(argv[1]);
int print = atoi(argv[2]);
float tiempo1, tiempo2;
hipEvent_t inicio1, fin1, inicio2, fin2;
hipEventCreate(&inicio1);
hipEventCreate(&fin1);
hipEventCreate(&inicio2);
hipEventCreate(&fin2);
hipEventRecord( inicio1, 0 );
int *matA = (int *) malloc(n * n * sizeof(int));
int *matB = (int *) malloc(n * n * sizeof(int));
int *matC = (int *) malloc(n * n * sizeof(int));
/*
for (int i = 0; i < n; ++i) {
matA[i] = (int *) malloc(n * sizeof(int *));
matB[i] = (int *) malloc(n * sizeof(int *));
}
*/
int *dev_ma, *dev_mb, *dev_mc;// **tempa, **tempb;
int *dev_n;
// tempa = (int **) malloc(n * sizeof(int *));
// tempb = (int **) malloc(n * sizeof(int *));
HANDLE_ERROR(hipMalloc((void **) &dev_ma, n * n * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **) &dev_mb, n * n * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **) &dev_mc, n * n * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **) &dev_n, sizeof(int)));
srand((int) time(NULL));
fillMatrix(matA, n);
fillMatrix(matB, n);
HANDLE_ERROR(hipMemcpy(dev_n, &n, sizeof(int), hipMemcpyHostToDevice));
/*
for (int i = 0; i < n; ++i) {
HANDLE_ERROR(hipMalloc((void **) &tempa[i], n * sizeof(int)));
HANDLE_ERROR(hipMemcpy (tempa[i], matA[i], n * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **) &tempb[i], n * sizeof(int)));
HANDLE_ERROR(hipMemcpy (tempb[i], matB[i], n * sizeof(int), hipMemcpyHostToDevice));
}
*/
HANDLE_ERROR(hipMemcpy (dev_ma, matA, n * n * sizeof(int), hipMemcpyHostToDevice) );
HANDLE_ERROR(hipMemcpy (dev_mb, matB, n * n * sizeof(int), hipMemcpyHostToDevice) );
hipEventRecord( inicio2, 0 );
hipLaunchKernelGGL(( matrixMult), dim3(n/10),dim3(n/5), 0, 0, dev_ma, dev_mb, dev_mc, dev_n);
hipEventRecord( fin2, 0); // Se toma el tiempo final.
hipEventSynchronize( fin2 ); // Se sincroniza
hipEventElapsedTime( &tiempo2, inicio2, fin2 );
HANDLE_ERROR(hipMemcpy(matC, dev_mc, n * n * sizeof(int), hipMemcpyDeviceToHost));
if (print) {
printMatrix(matA, n);
printMatrix(matB, n);
printMatrix(matC, n);
}
HANDLE_ERROR( hipFree( dev_ma ) );
HANDLE_ERROR( hipFree( dev_mb ) );
HANDLE_ERROR( hipFree( dev_mc ) );
HANDLE_ERROR( hipFree( dev_n ) );
// free(tempa);
// free(tempb);
free(matA);
free(matB);
free(matC);
hipEventRecord( fin1, 0); // Se toma el tiempo final.
hipEventSynchronize( fin1 ); // Se sincroniza
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
printf("Tiempo de clculo: %f , tiempo total: %f\n", tiempo2, tiempo1);
return 0;
}
void fillMatrix(int *m, int n) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
m[(i * n) + j] = (rand() % 991) + 10;
}
}
}
void printMatrix(int *m, int n) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
printf("m[%d][%d]= %d ", i, j, m[(i * n) + j]);
}
printf("\n");
}
}
| f3c8692cd1d5feca03d6afe0a6da68eebd7cb6ea.cu | #include <time.h>
#include "book.h"
void fillMatrix(int *, int);
void printMatrix(int *, int);
__global__ void matrixMult(int *a, int *b, int *c, int *n) {
int tid_i = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < *n; ++i){
c[(tid_i * (*n)) + i] = 0;
for(int j = 0; j < *n; ++j){
c[(tid_i * (*n)) + i] += a[(tid_i * (*n)) + j] * b[(j * (*n)) + i];
}
}
}
int main(int argc, const char *argv[]) {
if (argc != 3) {
printf("Se necesita dos argumentos para correr este programa, el primero para el tamano de las matrices, y "
"otra que es 1 o 0 para indicar para imprimir o no las matrices\n");
exit(-1);
}
int n = atoi(argv[1]);
int print = atoi(argv[2]);
float tiempo1, tiempo2;
cudaEvent_t inicio1, fin1, inicio2, fin2;
cudaEventCreate(&inicio1);
cudaEventCreate(&fin1);
cudaEventCreate(&inicio2);
cudaEventCreate(&fin2);
cudaEventRecord( inicio1, 0 );
int *matA = (int *) malloc(n * n * sizeof(int));
int *matB = (int *) malloc(n * n * sizeof(int));
int *matC = (int *) malloc(n * n * sizeof(int));
/*
for (int i = 0; i < n; ++i) {
matA[i] = (int *) malloc(n * sizeof(int *));
matB[i] = (int *) malloc(n * sizeof(int *));
}
*/
int *dev_ma, *dev_mb, *dev_mc;// **tempa, **tempb;
int *dev_n;
// tempa = (int **) malloc(n * sizeof(int *));
// tempb = (int **) malloc(n * sizeof(int *));
HANDLE_ERROR(cudaMalloc((void **) &dev_ma, n * n * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **) &dev_mb, n * n * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **) &dev_mc, n * n * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **) &dev_n, sizeof(int)));
srand((int) time(NULL));
fillMatrix(matA, n);
fillMatrix(matB, n);
HANDLE_ERROR(cudaMemcpy(dev_n, &n, sizeof(int), cudaMemcpyHostToDevice));
/*
for (int i = 0; i < n; ++i) {
HANDLE_ERROR(cudaMalloc((void **) &tempa[i], n * sizeof(int)));
HANDLE_ERROR(cudaMemcpy (tempa[i], matA[i], n * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &tempb[i], n * sizeof(int)));
HANDLE_ERROR(cudaMemcpy (tempb[i], matB[i], n * sizeof(int), cudaMemcpyHostToDevice));
}
*/
HANDLE_ERROR(cudaMemcpy (dev_ma, matA, n * n * sizeof(int), cudaMemcpyHostToDevice) );
HANDLE_ERROR(cudaMemcpy (dev_mb, matB, n * n * sizeof(int), cudaMemcpyHostToDevice) );
cudaEventRecord( inicio2, 0 );
matrixMult<<<n/10,n/5>>>(dev_ma, dev_mb, dev_mc, dev_n);
cudaEventRecord( fin2, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin2 ); // Se sincroniza
cudaEventElapsedTime( &tiempo2, inicio2, fin2 );
HANDLE_ERROR(cudaMemcpy(matC, dev_mc, n * n * sizeof(int), cudaMemcpyDeviceToHost));
if (print) {
printMatrix(matA, n);
printMatrix(matB, n);
printMatrix(matC, n);
}
HANDLE_ERROR( cudaFree( dev_ma ) );
HANDLE_ERROR( cudaFree( dev_mb ) );
HANDLE_ERROR( cudaFree( dev_mc ) );
HANDLE_ERROR( cudaFree( dev_n ) );
// free(tempa);
// free(tempb);
free(matA);
free(matB);
free(matC);
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
printf("Tiempo de cálculo: %f , tiempo total: %f\n", tiempo2, tiempo1);
return 0;
}
void fillMatrix(int *m, int n) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
m[(i * n) + j] = (rand() % 991) + 10;
}
}
}
void printMatrix(int *m, int n) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
printf("m[%d][%d]= %d ", i, j, m[(i * n) + j]);
}
printf("\n");
}
}
|
1369244499ef0870c46b970ea10953e687ca44df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
gc[5120 * 6 + i * 6 + 5] = float(a[i]);
} | 1369244499ef0870c46b970ea10953e687ca44df.cu | #include "includes.h"
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
gc[5120 * 6 + i * 6 + 5] = float(a[i]);
} |
6e0011619fb0d5f38b080f2c5fded68f5f45ac7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "cutil.h"
// #define DEBUG
// Only one of these should be enabled at a time. MKaech
//#define TM_SYNC
#define ATOMIC_SYNC
//#define NO_SYNC
#define NUM_ACCOUNTS 1000000
#define NUM_TRANSACTIONS 122880
#define THREADS_PER_BLOCK_X 192
#define THREADS_PER_BLOCK_Y 1
#define THREADS_PER_BLOCK_Z 1
#define BLOCKS_PER_GRID_X 120
#define BLOCKS_PER_GRID_Y 1
#define BLOCKS_PER_GRID_Z 1 // As of CUDA 2.0 this dimension MUST be 1. MKaech
#define THREADS_PER_BLOCK (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y * THREADS_PER_BLOCK_Z)
#define TOTAL_THREADS (THREADS_PER_BLOCK * BLOCKS_PER_GRID_X * BLOCKS_PER_GRID_Y * BLOCKS_PER_GRID_Z)
// these macros are for use in the shader!
#define BLOCK_ID ( blockIdx.x + (BLOCKS_PER_GRID_X * blockIdx.y) + (BLOCKS_PER_GRID_X * BLOCKS_PER_GRID_Y * blockIdx.z) )
#define THREAD_ID ( (THREADS_PER_BLOCK * BLOCK_ID) + threadIdx.x + (THREADS_PER_BLOCK_X * threadIdx.y) + (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y * threadIdx.z) )
struct account
{
int lock;
int balance;
};
struct transaction
{
int amount;
int src_account;
int dest_account;
};
__global__ void interac_atomic( account* __restrict__ accounts, transaction *__restrict__ transactions, int numTransactions)
{
int id = THREAD_ID;
for(int index = id; index < numTransactions; index += TOTAL_THREADS)
{
transaction* action = &transactions[index];
account* src = &accounts[action->src_account];
account* dest = &accounts[action->dest_account];
// sanity check
if(action->src_account == action->dest_account)
{
continue;
}
// acquire locks
account* lock1;
account* lock2;
if (src > dest) {
lock1 = src;
lock2 = dest;
} else {
lock2 = src;
lock1 = dest;
}
// do transaction
//int done = 0;
// do{
while(true){
if(atomicCAS(&lock1->lock, 0, 1) == 0){
//if(atomicCAS(&lock2->lock,0,1) ==0){
// do transaction
src->balance -= action->amount;
dest->balance += action->amount;
// release locks
//atomicExch(&lock2->lock, 0);
atomicExch(&lock1->lock, 0);
//lock1->lock = 0;
//done=1;
break;
// }
//atomicExch(&lock1->lock,0);
}
// atomicExch(&lock1->lock, 0);
}//while(!done);
}
}
void interac_gold(account* __restrict__ accounts, transaction* __restrict__ transactions, int num_transactions)
{
for(int i = 0; i < num_transactions; ++i)
{
transaction* action = &transactions[i];
account* src = &accounts[action->src_account];
account* dest = &accounts[action->dest_account];
src->balance -= action->amount;
dest->balance += action->amount;
}
}
int main(int argc, const char** argv)
{
printf("Initializing...\n");
//CUT_DEVICE_INIT(argc, argv);
bool useTM = false;
srand(2009); // set seed for rand()
// allocate host memory for accounts
unsigned int accounts_size = sizeof(account) * NUM_ACCOUNTS;
unsigned int transactions_size = sizeof(transaction) * NUM_TRANSACTIONS;
account* host_accounts = (account*)malloc(accounts_size);
account* gold_accounts = (account*)malloc(accounts_size);
transaction* host_transactions = (transaction*)malloc(transactions_size);
// create random account balances
for (int i = 0; i < NUM_ACCOUNTS; ++i)
{
host_accounts[i].lock = 0;
host_accounts[i].balance = (int) fmod((float)rand(),100.0f);
gold_accounts[i].lock = 0;
gold_accounts[i].balance = host_accounts[i].balance;
#ifdef DEBUG
printf( "acct%u : $%d\n", i, host_accounts[i].balance );
#endif
}
// create random transaction pairs
for (int i = 0; i < NUM_TRANSACTIONS; ++i)
{
host_transactions[i].amount = (int) fmod((float)rand(),50.0f);
host_transactions[i].src_account = rand() % NUM_ACCOUNTS;
host_transactions[i].dest_account = rand() % NUM_ACCOUNTS;
#ifdef DEBUG
printf( "%u : $%d from acct%u => to acct%u\n",
i, host_transactions[i].amount,
host_transactions[i].src_account,
host_transactions[i].dest_account );
#endif
// make sure src != dest
while(host_transactions[i].src_account == host_transactions[i].dest_account)
{
host_transactions[i].dest_account = rand() % NUM_ACCOUNTS;
}
}
// allocate device memory
account* device_accounts;
transaction* device_transactions;
CUDA_SAFE_CALL(hipMalloc((void**) &device_accounts, accounts_size));
CUDA_SAFE_CALL(hipMalloc((void**) &device_transactions, transactions_size));
// copy host memory to device
CUDA_SAFE_CALL(hipMemcpy(device_accounts, host_accounts, accounts_size, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL(hipMemcpy(device_transactions, host_transactions, transactions_size, hipMemcpyHostToDevice) );
// setup execution parameters
dim3 block_size(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, THREADS_PER_BLOCK_Z);
dim3 grid_size(BLOCKS_PER_GRID_X, BLOCKS_PER_GRID_Y, BLOCKS_PER_GRID_Z);
printf("Beginning kernel execution...\n");
// create and start timer
unsigned int timer = 0;
//CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
hipLaunchKernelGGL(( interac_atomic), dim3(grid_size), dim3(block_size) , 0, 0, device_accounts, device_transactions, NUM_TRANSACTIONS);
hipDeviceSynchronize();
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy(host_accounts, device_accounts, accounts_size, hipMemcpyDeviceToHost) );
// stop and destroy timer
//CUT_SAFE_CALL(cutStopTimer(timer));
//printf("Kernel processing time: %f (ms) \n", cutGetTimerValue(timer));
//CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("Computing gold results...\n");
unsigned int timer_cpu = 0;
//CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
//CUT_SAFE_CALL(cutStartTimer(timer_cpu));
interac_gold(gold_accounts, host_transactions, NUM_TRANSACTIONS);
//CUT_SAFE_CALL(cutStopTimer(timer_cpu));
//printf("Gold result processing time: %f (ms) \n", cutGetTimerValue(timer_cpu));
//CUT_SAFE_CALL(cutDeleteTimer(timer_cpu));
printf("Comparing results...\n");
// check result
bool success = true;
for (int i = 0; i < NUM_ACCOUNTS; ++i)
{
if(gold_accounts[i].balance != host_accounts[i].balance)
{
success = false;
printf("Difference found in account %d: Gold = %d, Kernel = %d\n", i, gold_accounts[i].balance, host_accounts[i].balance);
}
}
printf("Test %s\n", (success ? "PASSED! All account balances were correct." : "FAILED!"));
// clean up memory
free(host_accounts);
free(gold_accounts);
free(host_transactions);
CUDA_SAFE_CALL(hipFree(device_accounts));
CUDA_SAFE_CALL(hipFree(device_transactions));
//CUT_EXIT(argc, argv);
}
| 6e0011619fb0d5f38b080f2c5fded68f5f45ac7f.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "cutil.h"
// #define DEBUG
// Only one of these should be enabled at a time. MKaech
//#define TM_SYNC
#define ATOMIC_SYNC
//#define NO_SYNC
#define NUM_ACCOUNTS 1000000
#define NUM_TRANSACTIONS 122880
#define THREADS_PER_BLOCK_X 192
#define THREADS_PER_BLOCK_Y 1
#define THREADS_PER_BLOCK_Z 1
#define BLOCKS_PER_GRID_X 120
#define BLOCKS_PER_GRID_Y 1
#define BLOCKS_PER_GRID_Z 1 // As of CUDA 2.0 this dimension MUST be 1. MKaech
#define THREADS_PER_BLOCK (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y * THREADS_PER_BLOCK_Z)
#define TOTAL_THREADS (THREADS_PER_BLOCK * BLOCKS_PER_GRID_X * BLOCKS_PER_GRID_Y * BLOCKS_PER_GRID_Z)
// these macros are for use in the shader!
#define BLOCK_ID ( blockIdx.x + (BLOCKS_PER_GRID_X * blockIdx.y) + (BLOCKS_PER_GRID_X * BLOCKS_PER_GRID_Y * blockIdx.z) )
#define THREAD_ID ( (THREADS_PER_BLOCK * BLOCK_ID) + threadIdx.x + (THREADS_PER_BLOCK_X * threadIdx.y) + (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y * threadIdx.z) )
struct account
{
int lock;
int balance;
};
struct transaction
{
int amount;
int src_account;
int dest_account;
};
__global__ void interac_atomic( account* __restrict__ accounts, transaction *__restrict__ transactions, int numTransactions)
{
int id = THREAD_ID;
for(int index = id; index < numTransactions; index += TOTAL_THREADS)
{
transaction* action = &transactions[index];
account* src = &accounts[action->src_account];
account* dest = &accounts[action->dest_account];
// sanity check
if(action->src_account == action->dest_account)
{
continue;
}
// acquire locks
account* lock1;
account* lock2;
if (src > dest) {
lock1 = src;
lock2 = dest;
} else {
lock2 = src;
lock1 = dest;
}
// do transaction
//int done = 0;
// do{
while(true){
if(atomicCAS(&lock1->lock, 0, 1) == 0){
//if(atomicCAS(&lock2->lock,0,1) ==0){
// do transaction
src->balance -= action->amount;
dest->balance += action->amount;
// release locks
//atomicExch(&lock2->lock, 0);
atomicExch(&lock1->lock, 0);
//lock1->lock = 0;
//done=1;
break;
// }
//atomicExch(&lock1->lock,0);
}
// atomicExch(&lock1->lock, 0);
}//while(!done);
}
}
void interac_gold(account* __restrict__ accounts, transaction* __restrict__ transactions, int num_transactions)
{
for(int i = 0; i < num_transactions; ++i)
{
transaction* action = &transactions[i];
account* src = &accounts[action->src_account];
account* dest = &accounts[action->dest_account];
src->balance -= action->amount;
dest->balance += action->amount;
}
}
int main(int argc, const char** argv)
{
printf("Initializing...\n");
//CUT_DEVICE_INIT(argc, argv);
bool useTM = false;
srand(2009); // set seed for rand()
// allocate host memory for accounts
unsigned int accounts_size = sizeof(account) * NUM_ACCOUNTS;
unsigned int transactions_size = sizeof(transaction) * NUM_TRANSACTIONS;
account* host_accounts = (account*)malloc(accounts_size);
account* gold_accounts = (account*)malloc(accounts_size);
transaction* host_transactions = (transaction*)malloc(transactions_size);
// create random account balances
for (int i = 0; i < NUM_ACCOUNTS; ++i)
{
host_accounts[i].lock = 0;
host_accounts[i].balance = (int) fmod((float)rand(),100.0f);
gold_accounts[i].lock = 0;
gold_accounts[i].balance = host_accounts[i].balance;
#ifdef DEBUG
printf( "acct%u : $%d\n", i, host_accounts[i].balance );
#endif
}
// create random transaction pairs
for (int i = 0; i < NUM_TRANSACTIONS; ++i)
{
host_transactions[i].amount = (int) fmod((float)rand(),50.0f);
host_transactions[i].src_account = rand() % NUM_ACCOUNTS;
host_transactions[i].dest_account = rand() % NUM_ACCOUNTS;
#ifdef DEBUG
printf( "%u : $%d from acct%u => to acct%u\n",
i, host_transactions[i].amount,
host_transactions[i].src_account,
host_transactions[i].dest_account );
#endif
// make sure src != dest
while(host_transactions[i].src_account == host_transactions[i].dest_account)
{
host_transactions[i].dest_account = rand() % NUM_ACCOUNTS;
}
}
// allocate device memory
account* device_accounts;
transaction* device_transactions;
CUDA_SAFE_CALL(cudaMalloc((void**) &device_accounts, accounts_size));
CUDA_SAFE_CALL(cudaMalloc((void**) &device_transactions, transactions_size));
// copy host memory to device
CUDA_SAFE_CALL(cudaMemcpy(device_accounts, host_accounts, accounts_size, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL(cudaMemcpy(device_transactions, host_transactions, transactions_size, cudaMemcpyHostToDevice) );
// setup execution parameters
dim3 block_size(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, THREADS_PER_BLOCK_Z);
dim3 grid_size(BLOCKS_PER_GRID_X, BLOCKS_PER_GRID_Y, BLOCKS_PER_GRID_Z);
printf("Beginning kernel execution...\n");
// create and start timer
unsigned int timer = 0;
//CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
// execute the kernel
interac_atomic<<< grid_size, block_size >>>(device_accounts, device_transactions, NUM_TRANSACTIONS);
cudaThreadSynchronize();
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy(host_accounts, device_accounts, accounts_size, cudaMemcpyDeviceToHost) );
// stop and destroy timer
//CUT_SAFE_CALL(cutStopTimer(timer));
//printf("Kernel processing time: %f (ms) \n", cutGetTimerValue(timer));
//CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("Computing gold results...\n");
unsigned int timer_cpu = 0;
//CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
//CUT_SAFE_CALL(cutStartTimer(timer_cpu));
interac_gold(gold_accounts, host_transactions, NUM_TRANSACTIONS);
//CUT_SAFE_CALL(cutStopTimer(timer_cpu));
//printf("Gold result processing time: %f (ms) \n", cutGetTimerValue(timer_cpu));
//CUT_SAFE_CALL(cutDeleteTimer(timer_cpu));
printf("Comparing results...\n");
// check result
bool success = true;
for (int i = 0; i < NUM_ACCOUNTS; ++i)
{
if(gold_accounts[i].balance != host_accounts[i].balance)
{
success = false;
printf("Difference found in account %d: Gold = %d, Kernel = %d\n", i, gold_accounts[i].balance, host_accounts[i].balance);
}
}
printf("Test %s\n", (success ? "PASSED! All account balances were correct." : "FAILED!"));
// clean up memory
free(host_accounts);
free(gold_accounts);
free(host_transactions);
CUDA_SAFE_CALL(cudaFree(device_accounts));
CUDA_SAFE_CALL(cudaFree(device_transactions));
//CUT_EXIT(argc, argv);
}
|
4b748be99ef7f3748307c7c1423152b3c552899d.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, );
CHECK(hipDeviceReset());
return 0;
}
| 4b748be99ef7f3748307c7c1423152b3c552899d.cu | #include "common.h"
#include <stdio.h>
#include <cuda.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
CHECK(cudaDeviceReset());
return 0;
}
|
1da863b3dc0e79465135b72fa08f326bb53eb935.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "sceneLoader.h"
// Costanti
#define ThrInBlock_X 32
#define ThrInBlock_Y 32
#define ThrInBlock (ThrInBlock_X*ThrInBlock_Y)
#define CirclesPerThread 16
#define MaxCircles 2048
#define SCAN_BLOCK_DIM (ThrInBlock)
#include "exclusiveScan.cu_inl"
#include "circleBoxTest.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU.
__constant__ GlobalConstants cuConstRendererParams;
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelRenderCircles -- (CUDA device code)
//
// ogni thread in un blocco analizza una parte dell'array di cerchi,
// fino ad avere, per ogni blocco, la lista degli indici dei cerchi
// che stanno su tale blocco.
// infine ogni thread renderizza un pixel dell'immagine finale.
//
__global__ void kernelRenderCircles() {
__shared__ uint circleCount[ThrInBlock];
__shared__ uint circleList[MaxCircles];
__shared__ uint indexList[ThrInBlock];
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
//pre-calcolo gli inversi per futuri calcoli
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// a ogni blocco associata una porzione quadrata
// di dim. ThrInBlock_X x ThrInBlock_Y dell'immagine finale
// indice lineare del thread nel blocco
int threadIndex = threadIdx.y * blockDim.x + threadIdx.x;
// coordinate blocco
short blockLeftIdx = blockIdx.x * ThrInBlock_X;
short blockRightIdx = blockLeftIdx + ThrInBlock_X - 1;
short blockTopIdx = blockIdx.y * ThrInBlock_Y;
short blockBottomIdx = blockTopIdx + ThrInBlock_Y - 1;
// coordinate normalizzate blocco
float blockLeftNormIdx = blockLeftIdx * invWidth;
float blockRightNormIdx = blockRightIdx * invWidth;
float blockTopNormIdx = blockTopIdx * invHeight;
float blockBottomNormIdx = blockBottomIdx * invHeight;
// ogni thread in ogni blocco analizza una segmentino dell'array di cerchi
// quindi ogni blocco avr analizzato l'intero array
// calcolo dunque gli indici di competenza a ogni thread
// il " + ThrInBlock - 1 " serve a compensare il troncamento a int
int circlesPerThread = (cuConstRendererParams.numCircles + ThrInBlock - 1) / ThrInBlock;
int circleStartIdx = threadIndex * circlesPerThread;
int circleEndIdx = circleStartIdx + circlesPerThread;
//l'ultimo thread prende tutti i restanti cerchi
if(threadIndex == ThrInBlock)
circleEndIdx = cuConstRendererParams.numCircles;
int threadCircleCount = 0;
uint threadCircleList[CirclesPerThread];
for(int c = circleStartIdx; c<circleEndIdx; c++){
// controllo sempre di non sbordare dall'array di cerchi
if(c >= cuConstRendererParams.numCircles)
break;
float3 position = *(float3*)(&cuConstRendererParams.position[c * 3]);
float radius = cuConstRendererParams.radius[c];
// se il quadrato che comprende il cerchio ha almeno un punto
// dentro il blocco corrente, mi salvo il suo indice
if(circleInBoxConservative(position.x, position.y, radius, blockLeftNormIdx, blockRightNormIdx, blockBottomNormIdx, blockTopNormIdx) == 1){
threadCircleList[threadCircleCount++] = c;
}
}
// adesso threadCircleCount contiene, della parte dell'array di cerchi analizzati, quanti stanno nel blocco corrente,
// e in threadCircleList, della parte dell'array di cerchi analizzati, gli indici di tali cerchi.
circleCount[threadIndex] = threadCircleCount;
__syncthreads();
// circleCount una successione che contiene per ogni thread il numero di cerchi (appartenenti
// al blocco corrente) trovati nel rispettivo segmentino dell'array di tutti i cerchi.
// Adesso metto in indexList la serie della successione circleCount
// cos da permettere poi ai thread di avere il corretto indice di partenza (privateIndex)
// in circleList a partire da cui copiare gli indici dei cerchi relativi al loro segmentino
//
sharedMemExclusiveScan(threadIndex, circleCount, indexList, circleList, ThrInBlock);
__syncthreads();
uint privateIndex = indexList[threadIndex];
for(int i=0; i<threadCircleCount; i++){
circleList[privateIndex++] = threadCircleList[i];
}
__syncthreads();
// la serie non contiene l'ultima somma ( esclusiva). La aggiungo ora per
// ottenere il totale dei cerchi nel blocco
uint totalCircles = indexList[ThrInBlock-1] + circleCount[ThrInBlock-1];
// adesso che abbiamo la lista dei cerchi relativi al blocco corrente possiamo
// iniziare il rendering vero e proprio
uint pixelXCoord = blockLeftIdx + threadIdx.x;
uint pixelYCoord = blockTopIdx + threadIdx.y;
// calcolo la coordinata continua del centro del pixel
// campionando poi il colore del pixel in base al fatto che il cerchio
// includa effettivamente il centro del pixel o meno, ovvero
// faccio ci che in gergo CGI si definisce Point-Sampling
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelXCoord) + 0.5f),
invHeight * (static_cast<float>(pixelYCoord) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelYCoord * imageWidth + pixelXCoord)]);
float4 pixelData = *imgPtr;
// adesso ogni thread renderizza un pixel, partendo dal cerchio pi lontano.
// Questa invariante data dal fatto che i cerchi vengono gi dati in ordine
// di lontananza decrescente e mantenuta nella copia degli indici in circleList.
for (uint i=0; i<totalCircles; i++){
uint circleIndex = circleList[i];
float3 position = *(float3*)(&cuConstRendererParams.position[circleIndex * 3]);
float radius = cuConstRendererParams.radius[circleIndex];
float diffX = position.x - pixelCenterNorm.x;
float diffY = position.y - pixelCenterNorm.y;
float pixelDist = diffX * diffX + diffY * diffY;
float maxDist = radius * radius;
// controllo se il cerchio effettivamente sul pixel
// o se lo era solo il quadrato che lo contiene
if (pixelDist <= maxDist){
int index3 = 3 * circleIndex;
float3 rgb = *(float3*)&(cuConstRendererParams.color[index3]);
float alpha = .5f;
float oneMinusAlpha = 1.f - alpha;
pixelData.x = alpha * rgb.x + oneMinusAlpha * pixelData.x;
pixelData.y = alpha * rgb.y + oneMinusAlpha * pixelData.y;
pixelData.z = alpha * rgb.z + oneMinusAlpha * pixelData.z;
pixelData.w = alpha + pixelData.w;
}
}
*imgPtr = pixelData;
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene, bool benchMode) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, color, radius, benchMode);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Inizializzo CUDA per il Renderer\n");
printf("Trovati %d dispositivi CUDA\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Dispositivo %d: %s\n", i, deviceProps.name);
printf(" Streaming Multiprocessors: %d\n", deviceProps.multiProcessorCount);
printf(" Memoria Globale: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" Compute Capability: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Dimensione Warp: %d\n", deviceProps.warpSize);
printf(" Shared memory per blocco: %.0f KB\n", static_cast<float>(deviceProps.sharedMemPerBlock) / 1024);
printf(" Shared memory per Str.Multipr.: %.0f KB\n", static_cast<float>(deviceProps.sharedMemPerMultiprocessor) / 1024);
printf(" Registri per blocco: %d\n", deviceProps.regsPerBlock);
printf(" Registri per Str. Mulitpr.: %d\n", deviceProps.regsPerMultiprocessor);
printf(" Max threads per blocco: %d\n", deviceProps.maxThreadsPerBlock);
printf(" Max threads per Str. Multipr.: %d\n", deviceProps.maxThreadsPerBlock);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
dim3 blockDim(ThrInBlock_X, ThrInBlock_Y);
dim3 gridDim((image->width + ThrInBlock_X - 1) / ThrInBlock_X,
(image->height + ThrInBlock_Y - 1) / ThrInBlock_Y);
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
}
| 1da863b3dc0e79465135b72fa08f326bb53eb935.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "sceneLoader.h"
// Costanti
#define ThrInBlock_X 32
#define ThrInBlock_Y 32
#define ThrInBlock (ThrInBlock_X*ThrInBlock_Y)
#define CirclesPerThread 16
#define MaxCircles 2048
#define SCAN_BLOCK_DIM (ThrInBlock)
#include "exclusiveScan.cu_inl"
#include "circleBoxTest.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU.
__constant__ GlobalConstants cuConstRendererParams;
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelRenderCircles -- (CUDA device code)
//
// ogni thread in un blocco analizza una parte dell'array di cerchi,
// fino ad avere, per ogni blocco, la lista degli indici dei cerchi
// che stanno su tale blocco.
// infine ogni thread renderizza un pixel dell'immagine finale.
//
__global__ void kernelRenderCircles() {
__shared__ uint circleCount[ThrInBlock];
__shared__ uint circleList[MaxCircles];
__shared__ uint indexList[ThrInBlock];
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
//pre-calcolo gli inversi per futuri calcoli
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
// a ogni blocco è associata una porzione quadrata
// di dim. ThrInBlock_X x ThrInBlock_Y dell'immagine finale
// indice lineare del thread nel blocco
int threadIndex = threadIdx.y * blockDim.x + threadIdx.x;
// coordinate blocco
short blockLeftIdx = blockIdx.x * ThrInBlock_X;
short blockRightIdx = blockLeftIdx + ThrInBlock_X - 1;
short blockTopIdx = blockIdx.y * ThrInBlock_Y;
short blockBottomIdx = blockTopIdx + ThrInBlock_Y - 1;
// coordinate normalizzate blocco
float blockLeftNormIdx = blockLeftIdx * invWidth;
float blockRightNormIdx = blockRightIdx * invWidth;
float blockTopNormIdx = blockTopIdx * invHeight;
float blockBottomNormIdx = blockBottomIdx * invHeight;
// ogni thread in ogni blocco analizza una segmentino dell'array di cerchi
// quindi ogni blocco avrà analizzato l'intero array
// calcolo dunque gli indici di competenza a ogni thread
// il " + ThrInBlock - 1 " serve a compensare il troncamento a int
int circlesPerThread = (cuConstRendererParams.numCircles + ThrInBlock - 1) / ThrInBlock;
int circleStartIdx = threadIndex * circlesPerThread;
int circleEndIdx = circleStartIdx + circlesPerThread;
//l'ultimo thread prende tutti i restanti cerchi
if(threadIndex == ThrInBlock)
circleEndIdx = cuConstRendererParams.numCircles;
int threadCircleCount = 0;
uint threadCircleList[CirclesPerThread];
for(int c = circleStartIdx; c<circleEndIdx; c++){
// controllo sempre di non sbordare dall'array di cerchi
if(c >= cuConstRendererParams.numCircles)
break;
float3 position = *(float3*)(&cuConstRendererParams.position[c * 3]);
float radius = cuConstRendererParams.radius[c];
// se il quadrato che comprende il cerchio ha almeno un punto
// dentro il blocco corrente, mi salvo il suo indice
if(circleInBoxConservative(position.x, position.y, radius, blockLeftNormIdx, blockRightNormIdx, blockBottomNormIdx, blockTopNormIdx) == 1){
threadCircleList[threadCircleCount++] = c;
}
}
// adesso threadCircleCount contiene, della parte dell'array di cerchi analizzati, quanti stanno nel blocco corrente,
// e in threadCircleList, della parte dell'array di cerchi analizzati, gli indici di tali cerchi.
circleCount[threadIndex] = threadCircleCount;
__syncthreads();
// circleCount è una successione che contiene per ogni thread il numero di cerchi (appartenenti
// al blocco corrente) trovati nel rispettivo segmentino dell'array di tutti i cerchi.
// Adesso metto in indexList la serie della successione circleCount
// così da permettere poi ai thread di avere il corretto indice di partenza (privateIndex)
// in circleList a partire da cui copiare gli indici dei cerchi relativi al loro segmentino
//
sharedMemExclusiveScan(threadIndex, circleCount, indexList, circleList, ThrInBlock);
__syncthreads();
uint privateIndex = indexList[threadIndex];
for(int i=0; i<threadCircleCount; i++){
circleList[privateIndex++] = threadCircleList[i];
}
__syncthreads();
// la serie non contiene l'ultima somma (è esclusiva). La aggiungo ora per
// ottenere il totale dei cerchi nel blocco
uint totalCircles = indexList[ThrInBlock-1] + circleCount[ThrInBlock-1];
// adesso che abbiamo la lista dei cerchi relativi al blocco corrente possiamo
// iniziare il rendering vero e proprio
uint pixelXCoord = blockLeftIdx + threadIdx.x;
uint pixelYCoord = blockTopIdx + threadIdx.y;
// calcolo la coordinata continua del centro del pixel
// campionando poi il colore del pixel in base al fatto che il cerchio
// includa effettivamente il centro del pixel o meno, ovvero
// faccio ciò che in gergo CGI si definisce Point-Sampling
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelXCoord) + 0.5f),
invHeight * (static_cast<float>(pixelYCoord) + 0.5f));
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelYCoord * imageWidth + pixelXCoord)]);
float4 pixelData = *imgPtr;
// adesso ogni thread renderizza un pixel, partendo dal cerchio più lontano.
// Questa invariante è data dal fatto che i cerchi vengono già dati in ordine
// di lontananza decrescente e mantenuta nella copia degli indici in circleList.
for (uint i=0; i<totalCircles; i++){
uint circleIndex = circleList[i];
float3 position = *(float3*)(&cuConstRendererParams.position[circleIndex * 3]);
float radius = cuConstRendererParams.radius[circleIndex];
float diffX = position.x - pixelCenterNorm.x;
float diffY = position.y - pixelCenterNorm.y;
float pixelDist = diffX * diffX + diffY * diffY;
float maxDist = radius * radius;
// controllo se il cerchio è effettivamente sul pixel
// o se lo era solo il quadrato che lo contiene
if (pixelDist <= maxDist){
int index3 = 3 * circleIndex;
float3 rgb = *(float3*)&(cuConstRendererParams.color[index3]);
float alpha = .5f;
float oneMinusAlpha = 1.f - alpha;
pixelData.x = alpha * rgb.x + oneMinusAlpha * pixelData.x;
pixelData.y = alpha * rgb.y + oneMinusAlpha * pixelData.y;
pixelData.z = alpha * rgb.z + oneMinusAlpha * pixelData.z;
pixelData.w = alpha + pixelData.w;
}
}
*imgPtr = pixelData;
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene, bool benchMode) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, color, radius, benchMode);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Inizializzo CUDA per il Renderer\n");
printf("Trovati %d dispositivi CUDA\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Dispositivo %d: %s\n", i, deviceProps.name);
printf(" Streaming Multiprocessors: %d\n", deviceProps.multiProcessorCount);
printf(" Memoria Globale: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" Compute Capability: %d.%d\n", deviceProps.major, deviceProps.minor);
printf(" Dimensione Warp: %d\n", deviceProps.warpSize);
printf(" Shared memory per blocco: %.0f KB\n", static_cast<float>(deviceProps.sharedMemPerBlock) / 1024);
printf(" Shared memory per Str.Multipr.: %.0f KB\n", static_cast<float>(deviceProps.sharedMemPerMultiprocessor) / 1024);
printf(" Registri per blocco: %d\n", deviceProps.regsPerBlock);
printf(" Registri per Str. Mulitpr.: %d\n", deviceProps.regsPerMultiprocessor);
printf(" Max threads per blocco: %d\n", deviceProps.maxThreadsPerBlock);
printf(" Max threads per Str. Multipr.: %d\n", deviceProps.maxThreadsPerBlock);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
dim3 blockDim(ThrInBlock_X, ThrInBlock_Y);
dim3 gridDim((image->width + ThrInBlock_X - 1) / ThrInBlock_X,
(image->height + ThrInBlock_Y - 1) / ThrInBlock_Y);
kernelRenderCircles<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
}
|
7fb0551d28a18d2a00282cf0512bbe2569f6755f.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
const char logical_and_name[] = "logical_and_kernel";
void logical_and_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_and_string = jiterator_stringify(
template <typename T>
bool logical_and_kernel(T a, T b) {
return a && b;
}
); // logical_and_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_and_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_and_string);
}); // logical_and_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_and_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
}
const char logical_or_name[] = "logical_or_kernel";
void logical_or_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_or_string = jiterator_stringify(
template <typename T>
bool logical_or_kernel(T a, T b) {
return a || b;
}
); // logical_or_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_or_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_or_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_or_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_or_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
}
const char logical_xor_name[] = "logical_xor_kernel";
void logical_xor_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_xor_string = jiterator_stringify(
template <typename T>
bool logical_xor_kernel(T a, T b) {
return bool(a) != bool(b);
}
);
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_xor_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_xor_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_xor_string);
}); // logical_xor_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_xor_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
| 7fb0551d28a18d2a00282cf0512bbe2569f6755f.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
const char logical_and_name[] = "logical_and_kernel";
void logical_and_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_and_string = jiterator_stringify(
template <typename T>
bool logical_and_kernel(T a, T b) {
return a && b;
}
); // logical_and_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_and_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_and_string);
}); // logical_and_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_and_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
}
const char logical_or_name[] = "logical_or_kernel";
void logical_or_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_or_string = jiterator_stringify(
template <typename T>
bool logical_or_kernel(T a, T b) {
return a || b;
}
); // logical_or_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_or_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_or_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_or_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_or_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
}
const char logical_xor_name[] = "logical_xor_kernel";
void logical_xor_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_xor_string = jiterator_stringify(
template <typename T>
bool logical_xor_kernel(T a, T b) {
return bool(a) != bool(b);
}
);
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_xor_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_xor_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_xor_string);
}); // logical_xor_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_xor_cuda", [&]() {
opmath_symmetric_gpu_kernel_with_scalars<scalar_t, bool>(
iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
|
521bb76da49c608b39eacc7cc4d0dd7b7d9e3556.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <hip/hip_runtime.h>
#include <cusolverDn.h>
#include "cusolver_utils.h"
int main(int argc, char *argv[]) {
hipsolverDnHandle_t cusolverH = NULL;
hipStream_t stream = NULL;
const int batchSize = 2;
const int m = 3;
const int n = 2;
const int lda = m;
const int ldu = m;
const int ldv = n;
const int rank = n;
const long long int strideA = static_cast<long long int>(lda * n);
const long long int strideS = n;
const long long int strideU = static_cast<long long int>(ldu * n);
const long long int strideV = static_cast<long long int>(ldv * n);
/*
* | 1 2 | | 10 9 |
* A0 = | 4 5 |, A1 = | 8 7 |
* | 2 1 | | 6 5 |
*/
const std::vector<float> A = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0, 10.0, 8.0, 6.0, 9.0, 7.0, 5.0};
std::vector<float> U(strideU * batchSize, 0); /* left singular vectors */
std::vector<float> V(strideV * batchSize, 0); /* right singular vectors */
std::vector<float> S(strideS * batchSize, 0); /* numerical singular value */
/* exact singular values */
const std::vector<float> S_exact = {7.065283497082729, 1.040081297712078, 18.839649186929730,
0.260035600289472};
float *d_A = nullptr; /* device copy of A */
float *d_S = nullptr; /* singular values */
float *d_U = nullptr; /* left singular vectors */
float *d_V = nullptr; /* right singular vectors */
int *d_info = nullptr; /* error info */
int lwork = 0; /* size of workspace */
float *d_work = nullptr; /* device workspace for getrf */
std::vector<int> info(batchSize, 0); /* host copy of error info */
std::vector<double> RnrmF(batchSize, 0); /* residual norm */
const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; /* compute eigenvectors */
std::printf("A0 = (matlab base-1)\n");
print_matrix(m, n, A.data(), lda);
std::printf("=====\n");
std::printf("A1 = (matlab base-1)\n");
print_matrix(m, n, A.data() + strideA, lda);
std::printf("=====\n");
/* step 1: create cusolver handle, bind a stream */
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUSOLVER_CHECK(hipsolverDnSetStream(cusolverH, stream));
/* step 2: copy A to device */
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(float) * A.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_S), sizeof(float) * S.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_U), sizeof(float) * U.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_V), sizeof(float) * V.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_info), sizeof(int) * info.size()));
CUDA_CHECK(
hipMemcpyAsync(d_A, A.data(), sizeof(float) * A.size(), hipMemcpyHostToDevice, stream));
/* step 3: query working space of SVD */
CUSOLVER_CHECK(hipsolverDnSgesvdaStridedBatched_bufferSize(
cusolverH, jobz, /* HIPSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* HIPSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
rank, /* number of singular values */
m, /* nubmer of rows of Aj, 0 <= m */
n, /* number of columns of Aj, 0 <= n */
d_A, /* Aj is m-by-n */
lda, /* leading dimension of Aj */
strideA, /* >= lda*n */
d_S, /* Sj is rank-by-1, singular values in descending order */
strideS, /* >= rank */
d_U, /* Uj is m-by-rank */
ldu, /* leading dimension of Uj, ldu >= max(1,m) */
strideU, /* >= ldu*rank */
d_V, /* Vj is n-by-rank */
ldv, /* leading dimension of Vj, ldv >= max(1,n) */
strideV, /* >= ldv*rank */
&lwork, batchSize /* number of matrices */
));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_work), sizeof(float) * lwork));
/* step 4: compute SVD */
CUSOLVER_CHECK(hipsolverDnSgesvdaStridedBatched(
cusolverH, jobz, /* HIPSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* HIPSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
rank, /* number of singular values */
m, /* nubmer of rows of Aj, 0 <= m */
n, /* number of columns of Aj, 0 <= n */
d_A, /* Aj is m-by-n */
lda, /* leading dimension of Aj */
strideA, /* >= lda*n */
d_S, /* Sj is rank-by-1 */
/* the singular values in descending order */
strideS, /* >= rank */
d_U, /* Uj is m-by-rank */
ldu, /* leading dimension of Uj, ldu >= max(1,m) */
strideU, /* >= ldu*rank */
d_V, /* Vj is n-by-rank */
ldv, /* leading dimension of Vj, ldv >= max(1,n) */
strideV, /* >= ldv*rank */
d_work, lwork, d_info, RnrmF.data(), batchSize /* number of matrices */
));
CUDA_CHECK(
hipMemcpyAsync(U.data(), d_U, sizeof(float) * U.size(), hipMemcpyDeviceToHost, stream));
CUDA_CHECK(
hipMemcpyAsync(V.data(), d_V, sizeof(float) * V.size(), hipMemcpyDeviceToHost, stream));
CUDA_CHECK(
hipMemcpyAsync(S.data(), d_S, sizeof(float) * S.size(), hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipMemcpyAsync(info.data(), d_info, sizeof(int) * info.size(),
hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
if (0 > info[0]) {
std::printf("%d-th parameter is wrong \n", -info[0]);
exit(1);
}
for (int idx = 0; idx < batchSize; idx++) {
if (0 == info[idx]) {
std::printf("%d-th matrix, gesvda converges \n", idx);
} else {
std::printf("WARNING: info[%d] = %d : gesvda does not converge \n", idx, info[idx]);
}
}
std::printf("S0 = (matlab base-1)\n");
print_matrix(rank, 1, S.data(), n);
std::printf("=====\n");
std::printf("U0 = (matlab base-1)\n");
print_matrix(m, rank, U.data(), ldu);
std::printf("=====\n");
std::printf("V) = (matlab base-1)\n");
print_matrix(n, rank, V.data(), ldv);
std::printf("=====\n");
float ds_sup = 0;
for (int j = 0; j < n; j++) {
float err = fabs(S[j] - S_exact[j]);
ds_sup = (ds_sup > err) ? ds_sup : err;
}
std::printf("|S0 - S0_exact|_sup = %E \n", ds_sup);
std::printf("residual |A0 - U0*S0*V0**H|_F = %E \n", RnrmF[0]);
std::printf("S1 = (matlab base-1)\n");
print_matrix(rank, 1, S.data() + strideS, n);
std::printf("=====\n");
std::printf("U1 = (matlab base-1)\n");
print_matrix(m, rank, U.data() + strideU, ldu);
std::printf("=====\n");
std::printf("V1 = (matlab base-1)\n");
print_matrix(n, rank, V.data() + strideV, ldv);
std::printf("=====\n");
ds_sup = 0;
for (int j = 0; j < n; j++) {
float err = fabs(S[strideS + j] - S_exact[strideS + j]);
ds_sup = (ds_sup > err) ? ds_sup : err;
}
std::printf("|S1 - S1_exact|_sup = %E \n", ds_sup);
std::printf("residual |A1 - U1*S1*V1**H|_F = %E \n", RnrmF[1]);
/* free resources */
CUDA_CHECK(hipFree(d_A));
CUDA_CHECK(hipFree(d_S));
CUDA_CHECK(hipFree(d_U));
CUDA_CHECK(hipFree(d_V));
CUDA_CHECK(hipFree(d_info));
CUDA_CHECK(hipFree(d_work));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 521bb76da49c608b39eacc7cc4d0dd7b7d9e3556.cu | /*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include "cusolver_utils.h"
int main(int argc, char *argv[]) {
cusolverDnHandle_t cusolverH = NULL;
cudaStream_t stream = NULL;
const int batchSize = 2;
const int m = 3;
const int n = 2;
const int lda = m;
const int ldu = m;
const int ldv = n;
const int rank = n;
const long long int strideA = static_cast<long long int>(lda * n);
const long long int strideS = n;
const long long int strideU = static_cast<long long int>(ldu * n);
const long long int strideV = static_cast<long long int>(ldv * n);
/*
* | 1 2 | | 10 9 |
* A0 = | 4 5 |, A1 = | 8 7 |
* | 2 1 | | 6 5 |
*/
const std::vector<float> A = {1.0, 4.0, 2.0, 2.0, 5.0, 1.0, 10.0, 8.0, 6.0, 9.0, 7.0, 5.0};
std::vector<float> U(strideU * batchSize, 0); /* left singular vectors */
std::vector<float> V(strideV * batchSize, 0); /* right singular vectors */
std::vector<float> S(strideS * batchSize, 0); /* numerical singular value */
/* exact singular values */
const std::vector<float> S_exact = {7.065283497082729, 1.040081297712078, 18.839649186929730,
0.260035600289472};
float *d_A = nullptr; /* device copy of A */
float *d_S = nullptr; /* singular values */
float *d_U = nullptr; /* left singular vectors */
float *d_V = nullptr; /* right singular vectors */
int *d_info = nullptr; /* error info */
int lwork = 0; /* size of workspace */
float *d_work = nullptr; /* device workspace for getrf */
std::vector<int> info(batchSize, 0); /* host copy of error info */
std::vector<double> RnrmF(batchSize, 0); /* residual norm */
const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; /* compute eigenvectors */
std::printf("A0 = (matlab base-1)\n");
print_matrix(m, n, A.data(), lda);
std::printf("=====\n");
std::printf("A1 = (matlab base-1)\n");
print_matrix(m, n, A.data() + strideA, lda);
std::printf("=====\n");
/* step 1: create cusolver handle, bind a stream */
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUSOLVER_CHECK(cusolverDnSetStream(cusolverH, stream));
/* step 2: copy A to device */
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(float) * A.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_S), sizeof(float) * S.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_U), sizeof(float) * U.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_V), sizeof(float) * V.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_info), sizeof(int) * info.size()));
CUDA_CHECK(
cudaMemcpyAsync(d_A, A.data(), sizeof(float) * A.size(), cudaMemcpyHostToDevice, stream));
/* step 3: query working space of SVD */
CUSOLVER_CHECK(cusolverDnSgesvdaStridedBatched_bufferSize(
cusolverH, jobz, /* CUSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* CUSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
rank, /* number of singular values */
m, /* nubmer of rows of Aj, 0 <= m */
n, /* number of columns of Aj, 0 <= n */
d_A, /* Aj is m-by-n */
lda, /* leading dimension of Aj */
strideA, /* >= lda*n */
d_S, /* Sj is rank-by-1, singular values in descending order */
strideS, /* >= rank */
d_U, /* Uj is m-by-rank */
ldu, /* leading dimension of Uj, ldu >= max(1,m) */
strideU, /* >= ldu*rank */
d_V, /* Vj is n-by-rank */
ldv, /* leading dimension of Vj, ldv >= max(1,n) */
strideV, /* >= ldv*rank */
&lwork, batchSize /* number of matrices */
));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_work), sizeof(float) * lwork));
/* step 4: compute SVD */
CUSOLVER_CHECK(cusolverDnSgesvdaStridedBatched(
cusolverH, jobz, /* CUSOLVER_EIG_MODE_NOVECTOR: compute singular values only */
/* CUSOLVER_EIG_MODE_VECTOR: compute singular value and singular vectors */
rank, /* number of singular values */
m, /* nubmer of rows of Aj, 0 <= m */
n, /* number of columns of Aj, 0 <= n */
d_A, /* Aj is m-by-n */
lda, /* leading dimension of Aj */
strideA, /* >= lda*n */
d_S, /* Sj is rank-by-1 */
/* the singular values in descending order */
strideS, /* >= rank */
d_U, /* Uj is m-by-rank */
ldu, /* leading dimension of Uj, ldu >= max(1,m) */
strideU, /* >= ldu*rank */
d_V, /* Vj is n-by-rank */
ldv, /* leading dimension of Vj, ldv >= max(1,n) */
strideV, /* >= ldv*rank */
d_work, lwork, d_info, RnrmF.data(), batchSize /* number of matrices */
));
CUDA_CHECK(
cudaMemcpyAsync(U.data(), d_U, sizeof(float) * U.size(), cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(
cudaMemcpyAsync(V.data(), d_V, sizeof(float) * V.size(), cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(
cudaMemcpyAsync(S.data(), d_S, sizeof(float) * S.size(), cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaMemcpyAsync(info.data(), d_info, sizeof(int) * info.size(),
cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
if (0 > info[0]) {
std::printf("%d-th parameter is wrong \n", -info[0]);
exit(1);
}
for (int idx = 0; idx < batchSize; idx++) {
if (0 == info[idx]) {
std::printf("%d-th matrix, gesvda converges \n", idx);
} else {
std::printf("WARNING: info[%d] = %d : gesvda does not converge \n", idx, info[idx]);
}
}
std::printf("S0 = (matlab base-1)\n");
print_matrix(rank, 1, S.data(), n);
std::printf("=====\n");
std::printf("U0 = (matlab base-1)\n");
print_matrix(m, rank, U.data(), ldu);
std::printf("=====\n");
std::printf("V) = (matlab base-1)\n");
print_matrix(n, rank, V.data(), ldv);
std::printf("=====\n");
float ds_sup = 0;
for (int j = 0; j < n; j++) {
float err = fabs(S[j] - S_exact[j]);
ds_sup = (ds_sup > err) ? ds_sup : err;
}
std::printf("|S0 - S0_exact|_sup = %E \n", ds_sup);
std::printf("residual |A0 - U0*S0*V0**H|_F = %E \n", RnrmF[0]);
std::printf("S1 = (matlab base-1)\n");
print_matrix(rank, 1, S.data() + strideS, n);
std::printf("=====\n");
std::printf("U1 = (matlab base-1)\n");
print_matrix(m, rank, U.data() + strideU, ldu);
std::printf("=====\n");
std::printf("V1 = (matlab base-1)\n");
print_matrix(n, rank, V.data() + strideV, ldv);
std::printf("=====\n");
ds_sup = 0;
for (int j = 0; j < n; j++) {
float err = fabs(S[strideS + j] - S_exact[strideS + j]);
ds_sup = (ds_sup > err) ? ds_sup : err;
}
std::printf("|S1 - S1_exact|_sup = %E \n", ds_sup);
std::printf("residual |A1 - U1*S1*V1**H|_F = %E \n", RnrmF[1]);
/* free resources */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_S));
CUDA_CHECK(cudaFree(d_U));
CUDA_CHECK(cudaFree(d_V));
CUDA_CHECK(cudaFree(d_info));
CUDA_CHECK(cudaFree(d_work));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
71f08fbeca777350e497c0fbc27a8a2340d28951.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <utilities/cudf_utils.h>
#include <utilities/column_utils.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <iostream>
#include <random>
#include <cstring>
namespace cudf {
namespace test {
std::string random_string(size_t len, std::string const &allowed_chars) {
std::mt19937_64 gen { std::random_device()() };
std::uniform_int_distribution<size_t> dist { 0, allowed_chars.length()-1 };
std::string ret;
std::generate_n(std::back_inserter(ret), len, [&] { return allowed_chars[dist(gen)]; });
return ret;
}
gdf_column * create_nv_category_column(gdf_size_type num_rows, bool repeat_strings){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[(num_rows + 25) / 26]; //allows string to grow depending on numbe of rows
std::string temp_string = "";
int num_chars = repeat_strings ? 1 : (row_index / 26) + 1;
char repeat_char = (26 - (row_index % 26)) + 65; //chars are Z,Y ...C,B,A,ZZ,YY,.....BBB,AAA.....
for(int char_index = 0; char_index < num_chars; char_index++){
temp_string.push_back(repeat_char);
}
temp_string.push_back(0);
std::memcpy((void *) string_host_data[row_index],temp_string.c_str(),temp_string.size());
}
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
gdf_column * create_nv_category_column_strings(const char ** string_host_data, gdf_size_type num_rows){
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
const char ** generate_string_data(gdf_size_type num_rows, size_t length, bool print){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[length+1];
std::string rand_string = cudf::test::random_string(length);
rand_string.push_back(0);
if(print)
std::cout<<rand_string<<"\t";
std::memcpy((void *) string_host_data[row_index],rand_string.c_str(),rand_string.size());
}
if(print)
std::cout<<std::endl;
return string_host_data;
}
std::tuple<std::vector<std::string>, std::vector<gdf_valid_type>> nvcategory_column_to_host(gdf_column * column){
if (column->dtype == GDF_STRING_CATEGORY && column->dtype_info.category != nullptr && column->size > 0) {
NVStrings* tptr = static_cast<NVCategory*>(column->dtype_info.category)->gather_strings(static_cast<nv_category_index_type*>(column->data),
column->size,
DEVICE_ALLOCATED);
unsigned int count = tptr->size();
if( count==0 )
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
std::vector<char*> list(count);
char** plist = list.data();
std::vector<int> lens(count);
size_t totalmem = tptr->byte_count(lens.data(),false);
std::vector<char> buffer(totalmem+count,0); // null terminates each string
char* pbuffer = buffer.data();
size_t offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
tptr->to_host(plist,0,count);
// TODO: workaround for custrings issue #330. Remove once fix is merged
// workaround just resets the nullptr entries back to their proper offsets
// so that the std::vector constructor below can succeed.
offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
NVStrings::destroy(tptr);
std::vector<std::string> host_strings_vector(plist, plist + column->size);
std::vector<gdf_valid_type> host_bitmask(gdf_valid_allocation_size(column->size));
if (cudf::is_nullable(*column)) {
CUDA_TRY(hipMemcpy(host_bitmask.data(),
column->valid,
host_bitmask.size()*sizeof(gdf_valid_type),
hipMemcpyDeviceToHost));
}
return std::make_tuple(host_strings_vector, host_bitmask);
} else {
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
}
}
} // namespace test
} // namespace cudf
| 71f08fbeca777350e497c0fbc27a8a2340d28951.cu | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <bitmask/legacy/bit_mask.cuh>
#include <utilities/cudf_utils.h>
#include <utilities/column_utils.hpp>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <iostream>
#include <random>
#include <cstring>
namespace cudf {
namespace test {
std::string random_string(size_t len, std::string const &allowed_chars) {
std::mt19937_64 gen { std::random_device()() };
std::uniform_int_distribution<size_t> dist { 0, allowed_chars.length()-1 };
std::string ret;
std::generate_n(std::back_inserter(ret), len, [&] { return allowed_chars[dist(gen)]; });
return ret;
}
gdf_column * create_nv_category_column(gdf_size_type num_rows, bool repeat_strings){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[(num_rows + 25) / 26]; //allows string to grow depending on numbe of rows
std::string temp_string = "";
int num_chars = repeat_strings ? 1 : (row_index / 26) + 1;
char repeat_char = (26 - (row_index % 26)) + 65; //chars are Z,Y ...C,B,A,ZZ,YY,.....BBB,AAA.....
for(int char_index = 0; char_index < num_chars; char_index++){
temp_string.push_back(repeat_char);
}
temp_string.push_back(0);
std::memcpy((void *) string_host_data[row_index],temp_string.c_str(),temp_string.size());
}
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
gdf_column * create_nv_category_column_strings(const char ** string_host_data, gdf_size_type num_rows){
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
const char ** generate_string_data(gdf_size_type num_rows, size_t length, bool print){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[length+1];
std::string rand_string = cudf::test::random_string(length);
rand_string.push_back(0);
if(print)
std::cout<<rand_string<<"\t";
std::memcpy((void *) string_host_data[row_index],rand_string.c_str(),rand_string.size());
}
if(print)
std::cout<<std::endl;
return string_host_data;
}
std::tuple<std::vector<std::string>, std::vector<gdf_valid_type>> nvcategory_column_to_host(gdf_column * column){
if (column->dtype == GDF_STRING_CATEGORY && column->dtype_info.category != nullptr && column->size > 0) {
NVStrings* tptr = static_cast<NVCategory*>(column->dtype_info.category)->gather_strings(static_cast<nv_category_index_type*>(column->data),
column->size,
DEVICE_ALLOCATED);
unsigned int count = tptr->size();
if( count==0 )
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
std::vector<char*> list(count);
char** plist = list.data();
std::vector<int> lens(count);
size_t totalmem = tptr->byte_count(lens.data(),false);
std::vector<char> buffer(totalmem+count,0); // null terminates each string
char* pbuffer = buffer.data();
size_t offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
tptr->to_host(plist,0,count);
// TODO: workaround for custrings issue #330. Remove once fix is merged
// workaround just resets the nullptr entries back to their proper offsets
// so that the std::vector constructor below can succeed.
offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
NVStrings::destroy(tptr);
std::vector<std::string> host_strings_vector(plist, plist + column->size);
std::vector<gdf_valid_type> host_bitmask(gdf_valid_allocation_size(column->size));
if (cudf::is_nullable(*column)) {
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
column->valid,
host_bitmask.size()*sizeof(gdf_valid_type),
cudaMemcpyDeviceToHost));
}
return std::make_tuple(host_strings_vector, host_bitmask);
} else {
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
}
}
} // namespace test
} // namespace cudf
|
1fb335ef526af0ce2cb52dbda91364e87cf80fc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cassert>
void checkError (hipError_t err, int line) {
if (err == hipSuccess) return;
std::cout << "Error code " << err << " : " << hipGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
data[threadIdx.x] = exp(-0.5 * pow((data[threadIdx.x] - mean) / sigma, 2));
data[threadIdx.x] /= (sigma * sqrt(2*M_PI));
}
__global__ void dev_reduce_vector (double* data, double* result) {
int currentArraySize = blockDim.x;
while (currentArraySize > 1) {
int secondHalfBegin = (1 + currentArraySize) / 2;
if (threadIdx.x + secondHalfBegin < currentArraySize) {
data[threadIdx.x] += data[secondHalfBegin + threadIdx.x];
}
__syncthreads();
currentArraySize = secondHalfBegin;
}
if (0 == threadIdx.x) (*result) = data[0];
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
assert(sizeOfVector < devProp.maxThreadsPerBlock);
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
// Create a device-side array and copy the data into it.
double* dev_data = 0;
CUDACALL(hipMalloc((void**) &dev_data, sizeOfVector*sizeof(double)));
CUDACALL(hipMemcpy(dev_data, host_data, sizeOfVector*sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dev_calculate_Gaussians), dim3(1), dim3(sizeOfVector), 0, 0, dev_data, mean, sigma);
// Copy back results
CUDACALL(hipMemcpy(host_data, dev_data, sizeOfVector*sizeof(double), hipMemcpyDeviceToHost));
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
double* device_sum_address;
std::cout << "Sum from CPU: " << host_sum << std::endl;
CUDACALL(hipMalloc((void**) &device_sum_address, sizeof(double)));
hipLaunchKernelGGL(( dev_reduce_vector), dim3(1), dim3(sizeOfVector), 0, 0, dev_data, device_sum_address);
CUDACALL(hipMemcpy(&host_sum, device_sum_address, sizeof(double), hipMemcpyDeviceToHost));
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
}
| 1fb335ef526af0ce2cb52dbda91364e87cf80fc5.cu | #include <iostream>
#include <cmath>
#include <cassert>
void checkError (cudaError_t err, int line) {
if (err == cudaSuccess) return;
std::cout << "Error code " << err << " : " << cudaGetErrorString(err) << " " << " on line " << line << ", aborting.\n";
assert(false);
}
#define CUDACALL(x) checkError(x, __LINE__)
__global__ void dev_calculate_Gaussians (double* data, double mean, double sigma) {
data[threadIdx.x] = exp(-0.5 * pow((data[threadIdx.x] - mean) / sigma, 2));
data[threadIdx.x] /= (sigma * sqrt(2*M_PI));
}
__global__ void dev_reduce_vector (double* data, double* result) {
int currentArraySize = blockDim.x;
while (currentArraySize > 1) {
int secondHalfBegin = (1 + currentArraySize) / 2;
if (threadIdx.x + secondHalfBegin < currentArraySize) {
data[threadIdx.x] += data[secondHalfBegin + threadIdx.x];
}
__syncthreads();
currentArraySize = secondHalfBegin;
}
if (0 == threadIdx.x) (*result) = data[0];
}
int main (int argc, char** argv) {
int sizeOfVector = atoi(argv[1]);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
assert(sizeOfVector < devProp.maxThreadsPerBlock);
double mean = 5;
double sigma = 3;
// Generate a host-side vector and fill it with random numbers.
double* host_data = new double[sizeOfVector];
for (int i = 0; i < sizeOfVector; ++i) {
host_data[i] = (rand() % 11) - 5;
}
// Host-side numbers to check against device-side ones.
double* host_probs = new double[sizeOfVector];
double host_sum = 0;
for (int i = 0; i < sizeOfVector; ++i) {
host_probs[i] = exp(-0.5 * pow((host_data[i] - mean) / sigma, 2));
host_probs[i] /= (sigma * sqrt(2*M_PI));
host_sum += host_probs[i];
}
// Create a device-side array and copy the data into it.
double* dev_data = 0;
CUDACALL(cudaMalloc((void**) &dev_data, sizeOfVector*sizeof(double)));
CUDACALL(cudaMemcpy(dev_data, host_data, sizeOfVector*sizeof(double), cudaMemcpyHostToDevice));
dev_calculate_Gaussians<<<1, sizeOfVector>>>(dev_data, mean, sigma);
// Copy back results
CUDACALL(cudaMemcpy(host_data, dev_data, sizeOfVector*sizeof(double), cudaMemcpyDeviceToHost));
// Check for reasonableness
double tolerance = 1e-6;
for (int i = 0; i < sizeOfVector; ++i) {
if (fabs(host_data[i] - host_probs[i]) <= tolerance) continue;
std::cout << "Problem with entry " << i << ": "
<< host_probs[i] << " " << host_data[i] << " "
<< (host_probs[i] - host_data[i])
<< std::endl;
}
double* device_sum_address;
std::cout << "Sum from CPU: " << host_sum << std::endl;
CUDACALL(cudaMalloc((void**) &device_sum_address, sizeof(double)));
dev_reduce_vector<<<1, sizeOfVector>>>(dev_data, device_sum_address);
CUDACALL(cudaMemcpy(&host_sum, device_sum_address, sizeof(double), cudaMemcpyDeviceToHost));
std::cout << "Sum from GPU: " << host_sum << std::endl;
return 0;
}
|
f9df00453aa1748f712d1f78753e8fdeb6b0e9cb.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* \file solver.cu
* \date 2018/10/12 15:51
*
* \author sireer
* Contact: [email protected]
*
* \brief
*
* TODO: long description
*
* \note
*/
#pragma once
#include "Common.h"
#ifdef USE_ROCM
#include "MSFRUtil.cu"
#include "PclUtil.h"
#include <vector>
#include <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <pcl\gpu\utils\safe_call.hpp>
#include <pcl\gpu\utils\cutil_math.h>
#include <pcl\gpu\containers\device_array.h>
#include <pcl\gpu\containers\kernel_containers.h>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#define M 4096
#define BIN_WIDTH 36
#define BIN_WIDTH2 6
#define BIN_LENGTH 32
__device__ float dev_reg_lambda;
__device__ int dev_Ii_size;
__device__ int dev_Iij_size;
__device__ int dev_row_num;
__device__ int dev_col_num;
__device__ int dev_nonzero_Iij;
__device__ int dev_ATA_rowptr_size;
__global__ void
kernelExtractNewWeightFromWeightMap(pcl::gpu::PtrSz<float> new_weight,
const pcl::gpu::PtrStepSz<float3> weight_map,
const pcl::gpu::PtrStepSz<float> tri_map,
const pcl::gpu::PtrSz<int3> tri_list)
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < tri_map.cols && row < tri_map.rows)
{
int3 triIdx = tri_list[__float2int_rd(tri_map(row, col) + 0.5)];
float3 weight = weight_map(row, col);
new_weight[triIdx.x] += weight.x;
new_weight[triIdx.y] += weight.y;
new_weight[triIdx.z] += weight.z;
}
}
void cudaExtractNewWeightFromWeightMap(pcl::gpu::DeviceArray<float> new_weight,
const pcl::gpu::DeviceArray2D<float3> weight_map,
const pcl::gpu::DeviceArray2D<float> tri_map,
const pcl::gpu::DeviceArray<int3> tri_list)
{
clearCudaMem(new_weight);
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(tri_map.cols(), block.x),
pcl::gpu::divUp(tri_map.rows(), block.y));
hipLaunchKernelGGL(( kernelExtractNewWeightFromWeightMap), dim3(grid), dim3(block), 0, 0, new_weight,
weight_map, tri_map, tri_list);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void
kernelUpdateProjectionICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<unsigned short> is_front,
const pcl::gpu::PtrStepSz<float> depth_image,
const msfr::intrinsics camera_intr)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
width = depth_image.cols;
height = depth_image.rows;
}
__syncthreads();
int vId = blockDim.x * blockIdx.x + threadIdx.x;
if (vId < position_sRT.size)
{
projected_position[vId] = make_float3(-1.0f, -1.0f, -1.0f);
if (is_front[vId] == 1)
{
float3 pos = position_sRT[vId];
int2 uv = getProjectIndex(camera, pos);
if (uv.x > 0 && uv.x < width && uv.y>0 && uv.y < height)
{
float depth = depth_image(uv.y, uv.x);
if (depth > 0.0f && fabs(depth - pos.z) < 1e-2f) /// set sqrt(5)cm as the threshold
{
projected_position[vId] = unProjectedFromIndex(camera,
make_float3(__int2float_rn(uv.x) + 0.5f, __int2float_rn(uv.y) + 0.5f, depth));
}
}
}
}
}
void cudaUpdateProjectionICPFromDepthImage(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const pcl::gpu::DeviceArray2D<float> depth_image,
const msfr::intrinsics camera_intr)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateProjectionICPFromDepthImage), dim3(grid), dim3(block), 0, 0, projected_position,
position_sRT, is_front, depth_image, camera_intr);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
//std::vector<float3> host_position, host_target_position;
//temp.download(host_position);
//projected_position.download(host_target_position);
}
__global__ void kernelDownloadDepthMap(pcl::gpu::PtrSz<float3> dst,
hipTextureObject_t src, const int width, const int height)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < dst.size)
{
int row = id / width;
int col = id - width * row;
dst[id].x = tex2D<float>(src, col, row);
dst[id].y = row;
dst[id].z = col;
}
}
__global__ void
kernelUpdateNormalICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const pcl::gpu::PtrStepSz<float> depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
__shared__ msfr::intrinsics camera;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position_sRT.size)
{
projected_position[id] = make_float3(-1.0f, -1.0f, -1.0f);
float step = threshold; /// set sqrt(5)cm as the threshold
float3 pos = position_sRT[id];
float3 n = normal_R[id];
n = n / length(n);
int2 uv = getProjectIndex(camera, pos);
float depth;
if (is_front[id] == 1 && n.z < 0.0f)
{
float3 pos_f, pos_b;
int2 uv_f, uv_b;
unsigned short is_legal = 1; /// 1 represents legal
if (depth_image(uv.y, uv.x) < pos.z)
{
pos_f = pos + step * n;
pos_b = pos;
uv_f = getProjectIndex(camera, pos_f);
uv_b = uv;
depth = depth_image(uv_f.y, uv_f.x);
if (depth == 0.0f || depth < pos_f.z)
{
is_legal = 0;
}
}
else
{
pos_b = pos - step * n;
pos_f = pos;
uv_f = uv;
uv_b = getProjectIndex(camera, pos_b);
depth = depth_image(uv_b.y, uv_b.x);
if (depth == 0.0f || depth >= pos_b.z)
{
is_legal = 0;
}
}
for (int i = 0; i < 5; ++i)
{
float3 mid = (pos_b + pos_f) / 2;
uv = getProjectIndex(camera, pos);
depth = depth_image(uv.y, uv.x);
if (depth == 0.0f)
{
is_legal = 0;
}
if (depth < mid.z)
{
pos_b = mid;
uv_b = uv;
}
else
{
pos_f = mid;
uv_f = uv;
}
}
if (is_legal == 1)
{
projected_position[id] = unProjectedFromIndex(camera, make_float3(uv.x + 0.5f, uv.y + 0.5f, depth));
}
}
}
}
__global__ void
kernelUpdateNormalICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
__shared__ msfr::intrinsics camera;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position_sRT.size)
{
projected_position[id] = make_float3(-1.0f, -1.0f, -1.0f);
float step = threshold; /// set sqrt(5)cm as the threshold
float3 pos = position_sRT[id];
float3 n = normal_R[id];
n = n / length(n);
int2 uv = getProjectIndex(camera, pos);
float depth;
if (is_front[id] == 1 && n.z < 0.0f)
{
float3 pos_f, pos_b;
int2 uv_f, uv_b;
unsigned short is_legal = 1; /// 1 represents legal
if (tex2D<float>(depth_image, uv.x, uv.y) < pos.z)
{
pos_f = pos + step * n;
pos_b = pos;
uv_f = getProjectIndex(camera, pos_f);
uv_b = uv;
depth = tex2D<float>(depth_image, uv_f.x, uv_f.y);
if (depth == 0.0f || depth < pos_f.z)
{
is_legal = 0;
}
}
else
{
pos_b = pos - step * n;
pos_f = pos;
uv_f = uv;
uv_b = getProjectIndex(camera, pos_b);
depth = tex2D<float>(depth_image, uv_b.x, uv_b.y);
if (depth == 0.0f || depth >= pos_b.z)
{
is_legal = 0;
}
}
for (int i = 0; i < 5; ++i)
{
float3 mid = (pos_b + pos_f) / 2;
uv = getProjectIndex(camera, pos);
depth = tex2D<float>(depth_image, uv.x, uv.y);
if (depth == 0.0f)
{
is_legal = 0;
}
if (depth < mid.z)
{
pos_b = mid;
uv_b = uv;
}
else
{
pos_f = mid;
uv_f = uv;
}
}
if (is_legal == 1)
{
projected_position[id] = unProjectedFromIndex(camera, make_float3(uv.x + 0.5f, uv.y + 0.5f, depth));
}
}
}
}
void cudaUpdateNormalICPFromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateNormalICPFromDepthImage), dim3(grid), dim3(block), 0, 0, projected_position,
position_sRT, normal_R, is_front, depth_image, camera_intr, threshold);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
void cudaUpdateNormalICPFromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const pcl::gpu::DeviceArray2D<float> depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateNormalICPFromDepthImage), dim3(grid), dim3(block), 0, 0, projected_position,
position_sRT, normal_R, is_front, depth_image, camera_intr, threshold);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const pcl::gpu::PtrSz<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width_, const int height_,
const float threshold_sq)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0)
{
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size)
{
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
if (is_front[vid] == 1)
{
float3 pos_i = position_RT[vid];
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i)
{
for (int j = min_y; j < max_y; ++j)
{
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f)
{
float3 pos_ij = unProjectedFromIndex(camera,
make_float3(__float2int_rn(i) + 0.5f, __float2int_rn(j) + 0.5f,
depth));
float dis2_ij = norm2(pos_i - pos_ij);
if (dis2_ij < min_dis2)
{
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width, const int height,
const float threshold)
{
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateClosestPointfromDepthImage), dim3(grid), dim3(block), 0, 0, projected_position,
position_RT, is_front, depth_image, camera_intr, width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImageNew(
pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const hipTextureObject_t depth_image, const msfr::intrinsics camera_intr,
const int width_, const int height_, const float threshold_sq) {
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0) {
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size) {
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
{
float3 pos_i = position_RT[vid];
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i) {
for (int j = min_y; j < max_y; ++j) {
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f) {
float3 pos_ij = unProjectedFromIndex(
camera, make_float3(__float2int_rn(i) + 0.5f,
__float2int_rn(j) + 0.5f, depth));
float dis2_ij = norm2(pos_i - pos_ij);
if (dis2_ij < min_dis2) {
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImageNew(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const hipTextureObject_t depth_image, const msfr::intrinsics camera_intr,
const int width, const int height, const float threshold) {
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateClosestPointfromDepthImageNew), dim3(grid), dim3(block), 0, 0,
projected_position, position_RT, depth_image, camera_intr,
width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC == 1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImageWithNormalConstraint(
pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width_, const int height_,
const float threshold_sq)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0)
{
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size)
{
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
float3 pos_i = position_RT[vid];
float3 normal_i = normal_R[vid];
if (is_front[vid] == 1 && normal_i.z < -0.2f)
{
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i)
{
for (int j = min_y; j < max_y; ++j)
{
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f)
{
float3 pos_ij = unProjectedFromIndex(camera,
make_float3(__float2int_rn(i) + 0.5f, __float2int_rn(j) + 0.5f,
depth));
float dis2_ij = norm2(pos_i - pos_ij);
float cos_i = abs(dot(pos_i - pos_ij, normal_i) / sqrtf(dis2_ij));
if (dis2_ij < min_dis2 && cos_i < 0.5f)
{
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImageWithNormalConstraint(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const hipTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width, const int height,
const float threshold)
{
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateClosestPointfromDepthImageWithNormalConstraint), dim3(grid), dim3(block), 0, 0, projected_position,
position_RT, normal_R, is_front, depth_image,
camera_intr, width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__device__ __forceinline__ bool is_in_0_1(float a, float delta = 0.0f) {
return a >= -delta && a <= 1.0f + delta;
}
__global__ void kernelRenderMesh(pcl::gpu::PtrStepSz<float> canvas,
const pcl::gpu::PtrSz<float3> position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_,
const msfr::intrinsics camera_)
{
__shared__ msfr::intrinsics camera;
__shared__ float rotation[9];
__shared__ float3 translation;
__shared__ int width, height;
int id = threadIdx.x;
if (id == 1)
{
camera = camera_;
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
width = canvas.cols;
height = canvas.rows;
}
if (id < 9)
{
rotation[id] = rotation_[id] * translation_[3];
}
__syncthreads();
id += blockDim.x*blockIdx.x;
if (id < position.size)
{
float3 pos = M33xV3(rotation, position[id]) + translation;
int2 uv = getProjectIndex(camera, pos);
if (uv.x>=0 && uv.x< width && uv.y>=0 && uv.y< height)
canvas(uv.y, uv.x) += pos.z;
}
}
void cudaRenderMesh(pcl::gpu::DeviceArray2D<float> canvas,
const pcl::gpu::DeviceArray<float3> position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation,
const msfr::intrinsics & camera)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position.size(), block.x));
clearCudaMem(canvas);
hipLaunchKernelGGL(( kernelRenderMesh), dim3(grid), dim3(block), 0, 0, canvas, position,
rotation, translation, camera);
}
__global__ void kernelUpdateInvSRTTargetPosition(pcl::gpu::PtrSz<float3> target_position_inv_sRT,
const pcl::gpu::PtrSz<float3> target_position,
const pcl::gpu::PtrSz<float> lambda_position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9];
__shared__ float3 translation;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
}
if (id < 9)
{
rotation[id] = rotation_[id] / translation_[3];
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < target_position.size)
{
float lambda_i = __ldg(&lambda_position[id]);
if (lambda_i > 0.0f)
{
target_position_inv_sRT[id] = M33TxV3(rotation, target_position[id]
- lambda_i * translation);
}
else
{
target_position_inv_sRT[id] = make_float3(0.0f, 0.0f, 0.0f);
}
}
}
void cudaUpdateInvSRTTargetPosition(pcl::gpu::DeviceArray<float3> target_position_inv_sRT,
const pcl::gpu::DeviceArray<float3> target_position,
const pcl::gpu::DeviceArray<float> lambda_position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(target_position.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateInvSRTTargetPosition), dim3(grid), dim3(block), 0, 0, target_position_inv_sRT, target_position,
lambda_position, rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateInvSRTProjectionPosition(pcl::gpu::PtrSz<float3> projection_position_inv_sRT,
const pcl::gpu::PtrSz<float3> projection_position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9];
__shared__ float3 translation;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
}
if (id < 9)
{
rotation[id] = rotation_[id] / translation_[3];
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < projection_position_inv_sRT.size)
{
if (projection_position[id].z > 0.0f)
{
projection_position_inv_sRT[id] = M33TxV3(rotation, projection_position[id]
- translation);
}
else
{
projection_position_inv_sRT[id] = make_float3(0.0f, 0.0f, 0.0f);
}
}
}
void cudaUpdateInvSRTProjectionPosition(pcl::gpu::DeviceArray<float3> projection_position_inv_sRT,
const pcl::gpu::DeviceArray<float3> projection_position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(projection_position.size(), block.x));
hipLaunchKernelGGL(( kernelUpdateInvSRTProjectionPosition), dim3(grid), dim3(block), 0, 0, projection_position_inv_sRT, projection_position,
rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelSRTPositionNormal(pcl::gpu::PtrSz<float3> position_RT,
pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<float3> position,
const pcl::gpu::PtrSz<float3> normal,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9], rotation_scale[9];
__shared__ float3 translation;
__shared__ float scale;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
scale = translation_[3];
}
if (id < 9)
{
rotation[id] = rotation_[id];
rotation_scale[id] = rotation[id] * scale;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position.size)
{
position_RT[id] = M33xV3(rotation_scale, position[id]) + translation;
normal_R[id] = M33xV3(rotation, normal[id]);
}
}
void cudaUpdateSRTPositionNormal(pcl::gpu::DeviceArray<float3> position_RT,
pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<float3> position,
const pcl::gpu::DeviceArray<float3> normal,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position.size(), block.x));
hipLaunchKernelGGL(( kernelSRTPositionNormal), dim3(grid), dim3(block), 0, 0, position_RT, normal_R,
position, normal, rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
//std::vector<float3> host_position_RT, host_position;
//std::vector<float> host_rotation, host_translation;
//position_RT.download(host_position_RT);
//position.download(host_position);
//rotation.download(host_rotation);
//translation.download(host_translation);
}
// Dense Matrix Compute: The grid number equal to the size of x
__device__ __forceinline__ void computeAx_i(float & xi,
const pcl::gpu::PtrSz<float> &x,
const pcl::gpu::PtrSz<float> &A)
{
int threadDim = (gridDim.x + 31) >> 5; /// x.size equals to gridDim.x
int beginId = threadDim * threadIdx.x;
int endId = min(gridDim.x, beginId + threadDim);
xi = 0;
for (int i = beginId, offset = blockIdx.x*x.size; i < endId; ++i)
{
xi += A[offset + i] * x[i];
}
__syncthreads();
xi = warp_scan(xi);
}
__global__ void prepare_r_p(pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
const pcl::gpu::PtrSz<float> x,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> b)
{
float xi;
computeAx_i(xi, x, A);
if (threadIdx.x == 31)
{
r[blockIdx.x] = b[blockIdx.x] - xi;
p[blockIdx.x] = r[blockIdx.x];
}
}
__global__ void kernelComputeAx(pcl::gpu::PtrSz<float> Ax,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> x)
{
float xi;
computeAx_i(xi, x, A);
if (threadIdx.x == 31)
{
Ax[blockIdx.x] = xi;
}
}
__device__ __forceinline__
float warp_up_scan8(float data)
{
data += __shfl_up(data, 1);
data += __shfl_up(data, 2);
data += __shfl_up(data, 4);
return data;
}
__device__ __forceinline__
void compute_uTvi(float & uTv, const pcl::gpu::PtrSz<float> & u,
const pcl::gpu::PtrSz<float> & v)
{
__shared__ float partial_sum[8];
uTv = 0;
if (threadIdx.x < u.size)
{
uTv = u[threadIdx.x] * v[threadIdx.x];
}
__syncthreads();
uTv = warp_scan(uTv);
if ((threadIdx.x & 31) == 31)
{
partial_sum[threadIdx.x >> 5] = uTv;
}
__syncthreads();
if (threadIdx.x < 8)
{
uTv = partial_sum[threadIdx.x];
uTv = warp_up_scan8(uTv);
}
}
__global__ void kernelCGIter(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
const pcl::gpu::PtrSz<float> Ap)
{
__shared__ float alpha, beta;
float rTri, pTApi;
compute_uTvi(rTri, r, r);
compute_uTvi(pTApi, p, Ap);
if (threadIdx.x == 7)
{
alpha = rTri / pTApi;
}
__syncthreads();
if (threadIdx.x < x.size)
{
x[threadIdx.x] += alpha*p[threadIdx.x];
r[threadIdx.x] -= alpha*Ap[threadIdx.x];
}
float new_rTri;
compute_uTvi(new_rTri, r, r);
if (threadIdx.x == 7)
{
beta = new_rTri / rTri;
}
__syncthreads();
if (threadIdx.x < x.size)
{
p[threadIdx.x] = beta * p[threadIdx.x] + r[threadIdx.x];
}
}
void cudaCGSolver(pcl::gpu::DeviceArray<float> x,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
int nIters)
{
pcl::gpu::DeviceArray<float> p(b.size()), r(b.size()), Ap(b.size());
dim3 block(32);
dim3 grid(x.size());
hipLaunchKernelGGL(( prepare_r_p), dim3(grid), dim3(block), 0, 0, r, p, x, A, b);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
for (int i = 0; i < nIters; i++)
{
hipLaunchKernelGGL(( kernelComputeAx), dim3(grid), dim3(block), 0, 0, Ap, A, p);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
hipLaunchKernelGGL(( kernelCGIter), dim3(1), dim3(256), 0, 0, x, r ,p, Ap);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
}
#define USE_PRECONDITION 0 // fixme why? why?!!
#define PCG_EVAL_RESIDUAL 0
__global__ void prepare_r_p_z(pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
pcl::gpu::PtrSz<float> z,
const pcl::gpu::PtrSz<float> x,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> b,
pcl::gpu::PtrSz<float> j) {
float xi;
computeAx_i(xi, x, A);
if(threadIdx.x==31) {
auto& rTmp = r[blockIdx.x];
rTmp = b[blockIdx.x] - xi;
p[blockIdx.x] = rTmp;
#if USE_PRECONDITION
j[blockIdx.x] = A[blockIdx.x*(x.size + 1)];
z[blockIdx.x] = rTmp / j[blockIdx.x];
#else
z[blockIdx.x] = rTmp;
#endif
}
}
__global__ void kernelPCGIter(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
pcl::gpu::PtrSz<float> z,
const pcl::gpu::PtrSz<float> j,
const pcl::gpu::PtrSz<float> Ap) {
__shared__ float alpha, beta;
float zTri, pTApi;
compute_uTvi(zTri, z, r);
compute_uTvi(pTApi, p, Ap);
if (threadIdx.x == 7)
{
alpha = zTri / pTApi;
}
__syncthreads();
if (threadIdx.x < x.size) {
auto& ri = r[threadIdx.x];
x[threadIdx.x] += alpha * p[threadIdx.x];
ri -= alpha * Ap[threadIdx.x];
#if USE_PRECONDITION
z[threadIdx.x] = ri / j[threadIdx.x];
#else
z[threadIdx.x] = ri;
#endif
}
float new_zTri;
compute_uTvi(new_zTri, z, r);
if (threadIdx.x == 7)
{
beta = new_zTri / zTri;
}
__syncthreads();
if (threadIdx.x < x.size)
{
p[threadIdx.x] = beta * p[threadIdx.x] + z[threadIdx.x];
}
}
void cudaPCGSolver(pcl::gpu::DeviceArray<float> x,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
int nIters,
hipStream_t stream) {
pcl::gpu::DeviceArray<float> p(b.size()), r(b.size()), Ap(b.size()),z(b.size()), j(x.size());
dim3 block(32);
dim3 grid(x.size());
prepare_r_p_z << <grid, block, 0, stream >> > (r, p, z, x, A, b, j);
for (int k = 0; k < nIters; k++) {
kernelComputeAx << <grid, block, 0, stream >> > (Ap, A, p);
hipLaunchKernelGGL(( kernelPCGIter), dim3(1),dim3(256),0,stream, x, r, p, z, j, Ap);
}
}
__global__ void kernelShootingSolve(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> S,
const pcl::gpu::PtrSz<float> ATA,
const float lambda)
{
const int x_size = x.size;
const int tId = threadIdx.x;
__shared__ float delta_x_i;
if (tId < x_size)
{
for (int i = 0; i < 100; ++i)
{
for (int j = 0; j < x_size; ++j)
{
if (tId == j)
{
float ATA_jj = __ldg(&ATA[j * x_size + j]);
float x_i = __ldg(&x[j]);
float prev_x_i = x_i;
float S_0 = -2 * (__ldg(&S[j]) + x_i * ATA_jj);
if (S_0 > lambda)
{
x_i = (lambda - S_0) / (2 * ATA_jj);
}
else if (S_0 < -lambda)
{
x_i = -(lambda + S_0) / (2 * ATA_jj);
}
else
{
x_i = 0.0f;
}
if (x_i < 0.0f)
{
x_i = 0.0f;
}
if (x_i > 1.f)
{
x_i = 1.0f;
}
delta_x_i = x_i - prev_x_i;
x[j] = x_i;
}
__syncthreads();
S[tId] -= delta_x_i * __ldg(&ATA[tId * x_size + j]);
}
}
}
}
void cudaShootingSolve(pcl::gpu::DeviceArray<float> x,
pcl::gpu::DeviceArray<float> S,
const pcl::gpu::DeviceArray<float> ATA,
const float lambda)
{
/* cudaSafeCall(hipStreamSynchronize(0));
std::vector<float> host_x, host_S, host_ATA;
x.download(host_x); S.download(host_S); ATA.download(host_ATA);
for (int i = 0; i < 10; ++i)
{
for (int j = 0; j < host_x.size(); ++j)
{
float x_i = host_x[j];
float ATA_jj = host_ATA[j*x.size() + j];
float prev_x_i = x_i;
float S_0 = - 2 * (host_S[j] + x_i * ATA_jj);
if (S_0 > lambda)
{
x_i = (lambda - S_0) / (2 * ATA_jj);
}
else if (S_0 < -lambda)
{
x_i = -(lambda + S_0) / (2 * ATA_jj);
}
else
{
x_i = 0.0f;
}
if (x_i < 0.0f)
{
x_i = 0.0f;
}
if (x_i > 1.f)
{
x_i = 1.0f;
}
host_x[j] = x_i;
float delta_x_i = x_i - prev_x_i;
for (int k = 0; k < x.size(); ++k)
{
host_S[k] -= delta_x_i * host_ATA[k*x.size() + j];
}
}
}*/
hipLaunchKernelGGL(( kernelShootingSolve), dim3(1), dim3(x.size()), 0, 0, x, S, ATA, lambda);
std::cout << lambda << std::endl;
}
__global__ void kernalSolveRMxb(pcl::gpu::PtrSz<float> B,
const int mB, const int ldB,
const pcl::gpu::PtrSz<float> R,
const int nR, const int ldR,
const pcl::gpu::PtrSz<float> Mask,
const int R_colIdx)
{
extern __shared__ float R_col[];
const int B_colIdx = threadIdx.x + blockDim.x * blockIdx.x;
if (threadIdx.x < nR)
{
R_col[threadIdx.x] = __ldg(&R[ldR*R_colIdx + threadIdx.x]);
}
__syncthreads();
if (B_colIdx < mB)
{
const float Mask_ii = __ldg(&Mask[nR * B_colIdx + R_colIdx]);
float * B_col = &B[ldB * B_colIdx];
if (Mask_ii > 0.0f)
{
float b_ii = B_col[R_colIdx] /= R_col[R_colIdx];
for (int i = 0; i < R_colIdx; ++i)
{
B_col[i] -= b_ii * R_col[i];
}
}
}
}
void cudaSolveRMxb(pcl::gpu::DeviceArray<float> B,
const int mB, const int ldB,
const pcl::gpu::DeviceArray<float> R,
const int nR, const int ldR,
const pcl::gpu::DeviceArray<float> Mask,
const hipStream_t stream)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(mB, block.x));
for (int i = 0; i < nR; ++i)
{
//std::cout << i << ": " << nR << std::endl;
hipLaunchKernelGGL(( kernalSolveRMxb), dim3(grid), dim3(block), nR*sizeof(float), stream, B, mB, ldB, R, nR, ldR, Mask, i);
cudaSafeCall(hipStreamSynchronize(stream));
}
}
__global__ void kernelSORUpdateA(pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> A_,
const int n,
const float omega)
{
const int rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
const int colIdx = threadIdx.y + blockDim.y * blockIdx.y;
if (rowIdx < n && colIdx < A_.size)
{
if (rowIdx == colIdx)
{
A[colIdx * n + rowIdx] = __ldg(&A_[colIdx * n + rowIdx]);
}
else
{
A[colIdx * n + rowIdx] = __ldg(&A_[colIdx * n + rowIdx]) * omega;
}
}
}
__global__ void kernelSORUpdateb(pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> b_,
const pcl::gpu::PtrSz<float> D,
const pcl::gpu::PtrSz<float> B,
const int n,
const float omega)
{
const int rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
const int colIdx = threadIdx.y + blockDim.y * blockIdx.y;
const int D_colIdx = colIdx / 3;
if (rowIdx < n && colIdx*n<b_.size)
{
const int index = colIdx * n + rowIdx;
b[index] = (__ldg(&b_[index]) + __ldg(&D[D_colIdx * n + rowIdx]) * __ldg(&B[index])) * omega;
}
}
void cudaSORUpdateAb(pcl::gpu::DeviceArray<float> A,
pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> A_,
const pcl::gpu::DeviceArray<float> b_,
const pcl::gpu::DeviceArray<float> D,
const pcl::gpu::DeviceArray<float> B,
const int n,
const float omega,
const hipStream_t stream)
{
{
dim3 block(8, 8);
dim3 grid(pcl::gpu::divUp(n, block.x), pcl::gpu::divUp(n, block.y));
hipLaunchKernelGGL(( kernelSORUpdateA), dim3(grid), dim3(block), 0, stream, A, A_, n, omega);
//cudaSafeCall(hipStreamSynchronize(0));
}
{
dim3 block(8, 8);
dim3 grid(pcl::gpu::divUp(n, block.x), pcl::gpu::divUp(b.size()/n, block.y));
hipLaunchKernelGGL(( kernelSORUpdateb), dim3(grid), dim3(block), 0, stream, b, b_, D, B, n, omega);
//cudaSafeCall(hipStreamSynchronize(0));
}
}
__global__ void kernelSORIter(pcl::gpu::PtrSz<float> X_,
pcl::gpu::PtrSz<float> X_assist,
const pcl::gpu::PtrSz<float> X,
const pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> D,
const pcl::gpu::PtrSz<float> error,
const int n,
const float omega)
//pcl::gpu::PtrSz<float> temp_diag)
{
extern __shared__ float A_row[];
float diag;
const int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
const int X_offset = colIdx * n;
const int D_colIdx_offset = colIdx / 3 * n;
const int col_max = X.size / n;
float result, comp;
float error_i = 1.0f;
if (colIdx < col_max)
{
error_i = sqrtf(__ldg(&error[colIdx]) / n);
}
for (int i = 0; i < n; ++i)
{
if (threadIdx.x < n)
{
A_row[threadIdx.x] = __ldg(&A[i*n + threadIdx.x]);
}
__syncthreads();
if (colIdx < col_max)
{
if (error_i > 1e-7f)
{
diag = __ldg(&D[D_colIdx_offset + i]) + A_row[i];
result = 0.0f;
comp = 0.0f;
for (int j = i + 1; j < n; ++j)
{
float t;
//result -= A_row[j] * __ldg(&X[X_offset + j]);
comp -= -A_row[j] * __ldg(&X[X_offset + j]);
t = result - comp;
comp = (t - result) + comp;
result = t;
}
for (int j = 0; j < i; ++j)
{
float t;
comp -= -A_row[j] * X_[X_offset + j];
t = result - comp;
comp = (t - result) + comp;
result = t;
}
{
float t;
//result += __ldg(&b[X_offset + i]);// +X_assist[X_offset + i];
comp -= __ldg(&b[X_offset + i]);
t = result - comp;
comp = (t - result) + comp;
result = t;
result /= diag;
result = result + (1.0f - omega) * __ldg(&X[X_offset + i]);
//result += (1.0f - omega) * __ldg(&X[X_offset + i]);
//comp -= (1.0f - omega) * diag * __ldg(&X[X_offset + i]);
//t = result - comp;
//comp = (t - result) + comp;
//result = t;
}
//for (int j = i + 1; j < n; ++j)
//{
// X_assist[X_offset + j] -= A_row[j] * result;
//}
X_[X_offset + i] = result;
//temp_diag[X_offset + i] = diag;
}
else
{
X_[X_offset + i] = __ldg(&X[X_offset + i]);
}
}
__syncthreads();
}
}
void cudaSORIter(pcl::gpu::DeviceArray<float> & X,
pcl::gpu::DeviceArray<float> & X_,
pcl::gpu::DeviceArray<float> X_assist,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> D,
const pcl::gpu::DeviceArray<float> error,
const int n, const float omega,
const hipStream_t stream)
{
{
//std::vector<float> host_X, host_X_, host_diag, host_D;
//X.download(host_X);
//clearCudaMemAsync(X_assist, stream);
//pcl::gpu::DeviceArray<float> diag(X.size());
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(X_.size() / n, block.x));
hipLaunchKernelGGL(( kernelSORIter), dim3(grid), dim3(block), n * sizeof(float), stream, X_, X_assist, X, b, A, D, error, n, omega);
cudaSafeCall(hipStreamSynchronize(stream));
X_.swap(X);
//
//X.download(host_X_);
////diag.download(host_diag);
//D.download(host_D);
//for (int i = 0; i < n; ++i)
//{
// std::cout << i << " " << host_D[32352 * n + i] << std::endl;
//}
//std::cout << 1;
}
}
__device__ __forceinline__ float square(float x)
{
return x*x;
}
__global__ void kernelSORComputeError(pcl::gpu::PtrSz<float> error,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> X,
const pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> D,
const int n)
{
extern __shared__ float A_row[];
const int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
const int b_offset = colIdx * n;
const int D_colIdx_offset = colIdx / 3 * n;
const int col_max = X.size / n;
float result, comp, error_i = 0.0f;
for (int i = 0; i < n; ++i)
{
if (threadIdx.x < n)
{
A_row[threadIdx.x] = __ldg(&A[i*n + threadIdx.x]);
}
__syncthreads();
if (colIdx < col_max)
{
float diag = __ldg(&D[D_colIdx_offset + i]);
result = 0.0f;
comp = 0.0f;
for (int j = 0; j < n; ++j)
{
float t;
//result += A_row[j] * __ldg(&X[b_offset + j]);
comp -= A_row[j] * __ldg(&X[b_offset + j]);
t = result - comp;
comp = (t - result) + comp;
result = t;
}
float t;
//result += diag * __ldg(&X[b_offset + i]);
comp -= diag * __ldg(&X[b_offset + i]);
t = result - comp;
comp = (t - result) + comp;
result = t;
error_i += square(result - __ldg(&b[b_offset + i]));
}
__syncthreads();
}
if (colIdx < col_max)
{
error[colIdx] = error_i;
}
}
void cudaSORComputeError(pcl::gpu::DeviceArray<float> error,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> X,
const pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> D,
const int n,
const hipStream_t stream)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(error.size(), block.x));
hipLaunchKernelGGL(( kernelSORComputeError), dim3(grid), dim3(block), n * sizeof(float), stream, error, A, X, b, D, n);
cudaSafeCall(hipStreamSynchronize(stream));
//std::vector<float> host_error;
//error.download(host_error);
//float sum = 0.0f;
//for (auto &iter : host_error)
//{
// sum += iter;
//}
//std::cout << sqrtf(host_error[32352 * 3]/n) << std::endl;
}
#endif // USE_ROCM
| f9df00453aa1748f712d1f78753e8fdeb6b0e9cb.cu | /*!
* \file solver.cu
* \date 2018/10/12 15:51
*
* \author sireer
* Contact: [email protected]
*
* \brief
*
* TODO: long description
*
* \note
*/
#pragma once
#include "Common.h"
#ifdef USE_CUDA
#include "MSFRUtil.cu"
#include "PclUtil.h"
#include <vector>
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <pcl\gpu\utils\safe_call.hpp>
#include <pcl\gpu\utils\cutil_math.h>
#include <pcl\gpu\containers\device_array.h>
#include <pcl\gpu\containers\kernel_containers.h>
#include <cub/device/device_radix_sort.cuh>
#include <cub/device/device_scan.cuh>
#define M 4096
#define BIN_WIDTH 36
#define BIN_WIDTH2 6
#define BIN_LENGTH 32
__device__ float dev_reg_lambda;
__device__ int dev_Ii_size;
__device__ int dev_Iij_size;
__device__ int dev_row_num;
__device__ int dev_col_num;
__device__ int dev_nonzero_Iij;
__device__ int dev_ATA_rowptr_size;
__global__ void
kernelExtractNewWeightFromWeightMap(pcl::gpu::PtrSz<float> new_weight,
const pcl::gpu::PtrStepSz<float3> weight_map,
const pcl::gpu::PtrStepSz<float> tri_map,
const pcl::gpu::PtrSz<int3> tri_list)
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < tri_map.cols && row < tri_map.rows)
{
int3 triIdx = tri_list[__float2int_rd(tri_map(row, col) + 0.5)];
float3 weight = weight_map(row, col);
new_weight[triIdx.x] += weight.x;
new_weight[triIdx.y] += weight.y;
new_weight[triIdx.z] += weight.z;
}
}
void cudaExtractNewWeightFromWeightMap(pcl::gpu::DeviceArray<float> new_weight,
const pcl::gpu::DeviceArray2D<float3> weight_map,
const pcl::gpu::DeviceArray2D<float> tri_map,
const pcl::gpu::DeviceArray<int3> tri_list)
{
clearCudaMem(new_weight);
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(tri_map.cols(), block.x),
pcl::gpu::divUp(tri_map.rows(), block.y));
kernelExtractNewWeightFromWeightMap<<<grid, block>>>(new_weight,
weight_map, tri_map, tri_list);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void
kernelUpdateProjectionICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<unsigned short> is_front,
const pcl::gpu::PtrStepSz<float> depth_image,
const msfr::intrinsics camera_intr)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
width = depth_image.cols;
height = depth_image.rows;
}
__syncthreads();
int vId = blockDim.x * blockIdx.x + threadIdx.x;
if (vId < position_sRT.size)
{
projected_position[vId] = make_float3(-1.0f, -1.0f, -1.0f);
if (is_front[vId] == 1)
{
float3 pos = position_sRT[vId];
int2 uv = getProjectIndex(camera, pos);
if (uv.x > 0 && uv.x < width && uv.y>0 && uv.y < height)
{
float depth = depth_image(uv.y, uv.x);
if (depth > 0.0f && fabs(depth - pos.z) < 1e-2f) /// set sqrt(5)cm as the threshold
{
projected_position[vId] = unProjectedFromIndex(camera,
make_float3(__int2float_rn(uv.x) + 0.5f, __int2float_rn(uv.y) + 0.5f, depth));
}
}
}
}
}
void cudaUpdateProjectionICPFromDepthImage(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const pcl::gpu::DeviceArray2D<float> depth_image,
const msfr::intrinsics camera_intr)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
kernelUpdateProjectionICPFromDepthImage<<<grid, block>>>(projected_position,
position_sRT, is_front, depth_image, camera_intr);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
//std::vector<float3> host_position, host_target_position;
//temp.download(host_position);
//projected_position.download(host_target_position);
}
__global__ void kernelDownloadDepthMap(pcl::gpu::PtrSz<float3> dst,
cudaTextureObject_t src, const int width, const int height)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < dst.size)
{
int row = id / width;
int col = id - width * row;
dst[id].x = tex2D<float>(src, col, row);
dst[id].y = row;
dst[id].z = col;
}
}
__global__ void
kernelUpdateNormalICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const pcl::gpu::PtrStepSz<float> depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
__shared__ msfr::intrinsics camera;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position_sRT.size)
{
projected_position[id] = make_float3(-1.0f, -1.0f, -1.0f);
float step = threshold; /// set sqrt(5)cm as the threshold
float3 pos = position_sRT[id];
float3 n = normal_R[id];
n = n / length(n);
int2 uv = getProjectIndex(camera, pos);
float depth;
if (is_front[id] == 1 && n.z < 0.0f)
{
float3 pos_f, pos_b;
int2 uv_f, uv_b;
unsigned short is_legal = 1; /// 1 represents legal
if (depth_image(uv.y, uv.x) < pos.z)
{
pos_f = pos + step * n;
pos_b = pos;
uv_f = getProjectIndex(camera, pos_f);
uv_b = uv;
depth = depth_image(uv_f.y, uv_f.x);
if (depth == 0.0f || depth < pos_f.z)
{
is_legal = 0;
}
}
else
{
pos_b = pos - step * n;
pos_f = pos;
uv_f = uv;
uv_b = getProjectIndex(camera, pos_b);
depth = depth_image(uv_b.y, uv_b.x);
if (depth == 0.0f || depth >= pos_b.z)
{
is_legal = 0;
}
}
for (int i = 0; i < 5; ++i)
{
float3 mid = (pos_b + pos_f) / 2;
uv = getProjectIndex(camera, pos);
depth = depth_image(uv.y, uv.x);
if (depth == 0.0f)
{
is_legal = 0;
}
if (depth < mid.z)
{
pos_b = mid;
uv_b = uv;
}
else
{
pos_f = mid;
uv_f = uv;
}
}
if (is_legal == 1)
{
projected_position[id] = unProjectedFromIndex(camera, make_float3(uv.x + 0.5f, uv.y + 0.5f, depth));
}
}
}
}
__global__ void
kernelUpdateNormalICPFromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_sRT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
__shared__ msfr::intrinsics camera;
int id = threadIdx.x;
if (id == 0)
{
camera = camera_intr;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position_sRT.size)
{
projected_position[id] = make_float3(-1.0f, -1.0f, -1.0f);
float step = threshold; /// set sqrt(5)cm as the threshold
float3 pos = position_sRT[id];
float3 n = normal_R[id];
n = n / length(n);
int2 uv = getProjectIndex(camera, pos);
float depth;
if (is_front[id] == 1 && n.z < 0.0f)
{
float3 pos_f, pos_b;
int2 uv_f, uv_b;
unsigned short is_legal = 1; /// 1 represents legal
if (tex2D<float>(depth_image, uv.x, uv.y) < pos.z)
{
pos_f = pos + step * n;
pos_b = pos;
uv_f = getProjectIndex(camera, pos_f);
uv_b = uv;
depth = tex2D<float>(depth_image, uv_f.x, uv_f.y);
if (depth == 0.0f || depth < pos_f.z)
{
is_legal = 0;
}
}
else
{
pos_b = pos - step * n;
pos_f = pos;
uv_f = uv;
uv_b = getProjectIndex(camera, pos_b);
depth = tex2D<float>(depth_image, uv_b.x, uv_b.y);
if (depth == 0.0f || depth >= pos_b.z)
{
is_legal = 0;
}
}
for (int i = 0; i < 5; ++i)
{
float3 mid = (pos_b + pos_f) / 2;
uv = getProjectIndex(camera, pos);
depth = tex2D<float>(depth_image, uv.x, uv.y);
if (depth == 0.0f)
{
is_legal = 0;
}
if (depth < mid.z)
{
pos_b = mid;
uv_b = uv;
}
else
{
pos_f = mid;
uv_f = uv;
}
}
if (is_legal == 1)
{
projected_position[id] = unProjectedFromIndex(camera, make_float3(uv.x + 0.5f, uv.y + 0.5f, depth));
}
}
}
}
void cudaUpdateNormalICPFromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
kernelUpdateNormalICPFromDepthImage<<<grid, block>>>(projected_position,
position_sRT, normal_R, is_front, depth_image, camera_intr, threshold);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
void cudaUpdateNormalICPFromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_sRT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const pcl::gpu::DeviceArray2D<float> depth_image,
const msfr::intrinsics camera_intr,
const float threshold)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_sRT.size(), block.x));
kernelUpdateNormalICPFromDepthImage<<<grid, block>>>(projected_position,
position_sRT, normal_R, is_front, depth_image, camera_intr, threshold);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImage(pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const pcl::gpu::PtrSz<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width_, const int height_,
const float threshold_sq)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0)
{
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size)
{
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
if (is_front[vid] == 1)
{
float3 pos_i = position_RT[vid];
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i)
{
for (int j = min_y; j < max_y; ++j)
{
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f)
{
float3 pos_ij = unProjectedFromIndex(camera,
make_float3(__float2int_rn(i) + 0.5f, __float2int_rn(j) + 0.5f,
depth));
float dis2_ij = norm2(pos_i - pos_ij);
if (dis2_ij < min_dis2)
{
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImage(pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width, const int height,
const float threshold)
{
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
kernelUpdateClosestPointfromDepthImage<<<grid, block>>>(projected_position,
position_RT, is_front, depth_image, camera_intr, width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImageNew(
pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const cudaTextureObject_t depth_image, const msfr::intrinsics camera_intr,
const int width_, const int height_, const float threshold_sq) {
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0) {
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size) {
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
{
float3 pos_i = position_RT[vid];
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i) {
for (int j = min_y; j < max_y; ++j) {
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f) {
float3 pos_ij = unProjectedFromIndex(
camera, make_float3(__float2int_rn(i) + 0.5f,
__float2int_rn(j) + 0.5f, depth));
float dis2_ij = norm2(pos_i - pos_ij);
if (dis2_ij < min_dis2) {
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImageNew(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const cudaTextureObject_t depth_image, const msfr::intrinsics camera_intr,
const int width, const int height, const float threshold) {
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
kernelUpdateClosestPointfromDepthImageNew<<<grid, block>>>(
projected_position, position_RT, depth_image, camera_intr,
width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC == 1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateClosestPointfromDepthImageWithNormalConstraint(
pcl::gpu::PtrSz<float3> projected_position,
const pcl::gpu::PtrSz<float3> position_RT,
const pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width_, const int height_,
const float threshold_sq)
{
__shared__ msfr::intrinsics camera;
__shared__ int width, height;
if (threadIdx.x == 0)
{
camera = camera_intr;
width = width_;
height = height_;
}
__syncthreads();
int vid = blockDim.x * blockIdx.x + threadIdx.x;
if (vid < position_RT.size)
{
float min_dis2 = threshold_sq; ///
float3 nearest_pos = make_float3(-1.0f, -1.0f, -1.0f);
float3 pos_i = position_RT[vid];
float3 normal_i = normal_R[vid];
if (is_front[vid] == 1 && normal_i.z < -0.2f)
{
int2 uv = getProjectIndex(camera, pos_i);
int min_x = max(uv.x - 10, 1);
int max_x = min(uv.x + 11, width - 1);
int min_y = max(uv.y - 10, 1);
int max_y = min(uv.y + 11, height - 1);
for (int i = min_x; i < max_x; ++i)
{
for (int j = min_y; j < max_y; ++j)
{
float depth = tex2D<float>(depth_image, i, j);
if (depth > 0.0f)
{
float3 pos_ij = unProjectedFromIndex(camera,
make_float3(__float2int_rn(i) + 0.5f, __float2int_rn(j) + 0.5f,
depth));
float dis2_ij = norm2(pos_i - pos_ij);
float cos_i = abs(dot(pos_i - pos_ij, normal_i) / sqrtf(dis2_ij));
if (dis2_ij < min_dis2 && cos_i < 0.5f)
{
min_dis2 = dis2_ij;
nearest_pos = pos_ij;
}
}
}
}
}
projected_position[vid] = nearest_pos;
}
}
void cudaUpdateClosestPointfromDepthImageWithNormalConstraint(
pcl::gpu::DeviceArray<float3> projected_position,
const pcl::gpu::DeviceArray<float3> position_RT,
const pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<unsigned short> is_front,
const cudaTextureObject_t depth_image,
const msfr::intrinsics camera_intr,
const int width, const int height,
const float threshold)
{
const float threshold_sq = threshold * threshold;
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position_RT.size(), block.x));
kernelUpdateClosestPointfromDepthImageWithNormalConstraint<<<grid, block>>>(projected_position,
position_RT, normal_R, is_front, depth_image,
camera_intr, width, height, threshold_sq);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__device__ __forceinline__ bool is_in_0_1(float a, float delta = 0.0f) {
return a >= -delta && a <= 1.0f + delta;
}
__global__ void kernelRenderMesh(pcl::gpu::PtrStepSz<float> canvas,
const pcl::gpu::PtrSz<float3> position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_,
const msfr::intrinsics camera_)
{
__shared__ msfr::intrinsics camera;
__shared__ float rotation[9];
__shared__ float3 translation;
__shared__ int width, height;
int id = threadIdx.x;
if (id == 1)
{
camera = camera_;
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
width = canvas.cols;
height = canvas.rows;
}
if (id < 9)
{
rotation[id] = rotation_[id] * translation_[3];
}
__syncthreads();
id += blockDim.x*blockIdx.x;
if (id < position.size)
{
float3 pos = M33xV3(rotation, position[id]) + translation;
int2 uv = getProjectIndex(camera, pos);
if (uv.x>=0 && uv.x< width && uv.y>=0 && uv.y< height)
canvas(uv.y, uv.x) += pos.z;
}
}
void cudaRenderMesh(pcl::gpu::DeviceArray2D<float> canvas,
const pcl::gpu::DeviceArray<float3> position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation,
const msfr::intrinsics & camera)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position.size(), block.x));
clearCudaMem(canvas);
kernelRenderMesh<<<grid, block>>>(canvas, position,
rotation, translation, camera);
}
__global__ void kernelUpdateInvSRTTargetPosition(pcl::gpu::PtrSz<float3> target_position_inv_sRT,
const pcl::gpu::PtrSz<float3> target_position,
const pcl::gpu::PtrSz<float> lambda_position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9];
__shared__ float3 translation;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
}
if (id < 9)
{
rotation[id] = rotation_[id] / translation_[3];
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < target_position.size)
{
float lambda_i = __ldg(&lambda_position[id]);
if (lambda_i > 0.0f)
{
target_position_inv_sRT[id] = M33TxV3(rotation, target_position[id]
- lambda_i * translation);
}
else
{
target_position_inv_sRT[id] = make_float3(0.0f, 0.0f, 0.0f);
}
}
}
void cudaUpdateInvSRTTargetPosition(pcl::gpu::DeviceArray<float3> target_position_inv_sRT,
const pcl::gpu::DeviceArray<float3> target_position,
const pcl::gpu::DeviceArray<float> lambda_position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(target_position.size(), block.x));
kernelUpdateInvSRTTargetPosition<<<grid, block>>>(target_position_inv_sRT, target_position,
lambda_position, rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelUpdateInvSRTProjectionPosition(pcl::gpu::PtrSz<float3> projection_position_inv_sRT,
const pcl::gpu::PtrSz<float3> projection_position,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9];
__shared__ float3 translation;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
}
if (id < 9)
{
rotation[id] = rotation_[id] / translation_[3];
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < projection_position_inv_sRT.size)
{
if (projection_position[id].z > 0.0f)
{
projection_position_inv_sRT[id] = M33TxV3(rotation, projection_position[id]
- translation);
}
else
{
projection_position_inv_sRT[id] = make_float3(0.0f, 0.0f, 0.0f);
}
}
}
void cudaUpdateInvSRTProjectionPosition(pcl::gpu::DeviceArray<float3> projection_position_inv_sRT,
const pcl::gpu::DeviceArray<float3> projection_position,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(projection_position.size(), block.x));
kernelUpdateInvSRTProjectionPosition<<<grid, block>>>(projection_position_inv_sRT, projection_position,
rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelSRTPositionNormal(pcl::gpu::PtrSz<float3> position_RT,
pcl::gpu::PtrSz<float3> normal_R,
const pcl::gpu::PtrSz<float3> position,
const pcl::gpu::PtrSz<float3> normal,
const pcl::gpu::PtrSz<float> rotation_,
const pcl::gpu::PtrSz<float> translation_)
{
__shared__ float rotation[9], rotation_scale[9];
__shared__ float3 translation;
__shared__ float scale;
int id = threadIdx.x;
if (id == 0)
{
translation.x = translation_[0];
translation.y = translation_[1];
translation.z = translation_[2];
scale = translation_[3];
}
if (id < 9)
{
rotation[id] = rotation_[id];
rotation_scale[id] = rotation[id] * scale;
}
__syncthreads();
id += blockDim.x * blockIdx.x;
if (id < position.size)
{
position_RT[id] = M33xV3(rotation_scale, position[id]) + translation;
normal_R[id] = M33xV3(rotation, normal[id]);
}
}
void cudaUpdateSRTPositionNormal(pcl::gpu::DeviceArray<float3> position_RT,
pcl::gpu::DeviceArray<float3> normal_R,
const pcl::gpu::DeviceArray<float3> position,
const pcl::gpu::DeviceArray<float3> normal,
const pcl::gpu::DeviceArray<float> rotation,
const pcl::gpu::DeviceArray<float> translation)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(position.size(), block.x));
kernelSRTPositionNormal<<<grid, block>>>(position_RT, normal_R,
position, normal, rotation, translation);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
//std::vector<float3> host_position_RT, host_position;
//std::vector<float> host_rotation, host_translation;
//position_RT.download(host_position_RT);
//position.download(host_position);
//rotation.download(host_rotation);
//translation.download(host_translation);
}
// Dense Matrix Compute: The grid number equal to the size of x
__device__ __forceinline__ void computeAx_i(float & xi,
const pcl::gpu::PtrSz<float> &x,
const pcl::gpu::PtrSz<float> &A)
{
int threadDim = (gridDim.x + 31) >> 5; /// x.size equals to gridDim.x
int beginId = threadDim * threadIdx.x;
int endId = min(gridDim.x, beginId + threadDim);
xi = 0;
for (int i = beginId, offset = blockIdx.x*x.size; i < endId; ++i)
{
xi += A[offset + i] * x[i];
}
__syncthreads();
xi = warp_scan(xi);
}
__global__ void prepare_r_p(pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
const pcl::gpu::PtrSz<float> x,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> b)
{
float xi;
computeAx_i(xi, x, A);
if (threadIdx.x == 31)
{
r[blockIdx.x] = b[blockIdx.x] - xi;
p[blockIdx.x] = r[blockIdx.x];
}
}
__global__ void kernelComputeAx(pcl::gpu::PtrSz<float> Ax,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> x)
{
float xi;
computeAx_i(xi, x, A);
if (threadIdx.x == 31)
{
Ax[blockIdx.x] = xi;
}
}
__device__ __forceinline__
float warp_up_scan8(float data)
{
data += __shfl_up(data, 1);
data += __shfl_up(data, 2);
data += __shfl_up(data, 4);
return data;
}
__device__ __forceinline__
void compute_uTvi(float & uTv, const pcl::gpu::PtrSz<float> & u,
const pcl::gpu::PtrSz<float> & v)
{
__shared__ float partial_sum[8];
uTv = 0;
if (threadIdx.x < u.size)
{
uTv = u[threadIdx.x] * v[threadIdx.x];
}
__syncthreads();
uTv = warp_scan(uTv);
if ((threadIdx.x & 31) == 31)
{
partial_sum[threadIdx.x >> 5] = uTv;
}
__syncthreads();
if (threadIdx.x < 8)
{
uTv = partial_sum[threadIdx.x];
uTv = warp_up_scan8(uTv);
}
}
__global__ void kernelCGIter(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
const pcl::gpu::PtrSz<float> Ap)
{
__shared__ float alpha, beta;
float rTri, pTApi;
compute_uTvi(rTri, r, r);
compute_uTvi(pTApi, p, Ap);
if (threadIdx.x == 7)
{
alpha = rTri / pTApi;
}
__syncthreads();
if (threadIdx.x < x.size)
{
x[threadIdx.x] += alpha*p[threadIdx.x];
r[threadIdx.x] -= alpha*Ap[threadIdx.x];
}
float new_rTri;
compute_uTvi(new_rTri, r, r);
if (threadIdx.x == 7)
{
beta = new_rTri / rTri;
}
__syncthreads();
if (threadIdx.x < x.size)
{
p[threadIdx.x] = beta * p[threadIdx.x] + r[threadIdx.x];
}
}
void cudaCGSolver(pcl::gpu::DeviceArray<float> x,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
int nIters)
{
pcl::gpu::DeviceArray<float> p(b.size()), r(b.size()), Ap(b.size());
dim3 block(32);
dim3 grid(x.size());
prepare_r_p<<<grid, block>>>(r, p, x, A, b);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
for (int i = 0; i < nIters; i++)
{
kernelComputeAx<<<grid, block>>>(Ap, A, p);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
kernelCGIter<<<1, 256>>>(x, r ,p, Ap);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
}
#define USE_PRECONDITION 0 // fixme why? why?!!
#define PCG_EVAL_RESIDUAL 0
__global__ void prepare_r_p_z(pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
pcl::gpu::PtrSz<float> z,
const pcl::gpu::PtrSz<float> x,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> b,
pcl::gpu::PtrSz<float> j) {
float xi;
computeAx_i(xi, x, A);
if(threadIdx.x==31) {
auto& rTmp = r[blockIdx.x];
rTmp = b[blockIdx.x] - xi;
p[blockIdx.x] = rTmp;
#if USE_PRECONDITION
j[blockIdx.x] = A[blockIdx.x*(x.size + 1)];
z[blockIdx.x] = rTmp / j[blockIdx.x];
#else
z[blockIdx.x] = rTmp;
#endif
}
}
__global__ void kernelPCGIter(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> r,
pcl::gpu::PtrSz<float> p,
pcl::gpu::PtrSz<float> z,
const pcl::gpu::PtrSz<float> j,
const pcl::gpu::PtrSz<float> Ap) {
__shared__ float alpha, beta;
float zTri, pTApi;
compute_uTvi(zTri, z, r);
compute_uTvi(pTApi, p, Ap);
if (threadIdx.x == 7)
{
alpha = zTri / pTApi;
}
__syncthreads();
if (threadIdx.x < x.size) {
auto& ri = r[threadIdx.x];
x[threadIdx.x] += alpha * p[threadIdx.x];
ri -= alpha * Ap[threadIdx.x];
#if USE_PRECONDITION
z[threadIdx.x] = ri / j[threadIdx.x];
#else
z[threadIdx.x] = ri;
#endif
}
float new_zTri;
compute_uTvi(new_zTri, z, r);
if (threadIdx.x == 7)
{
beta = new_zTri / zTri;
}
__syncthreads();
if (threadIdx.x < x.size)
{
p[threadIdx.x] = beta * p[threadIdx.x] + z[threadIdx.x];
}
}
void cudaPCGSolver(pcl::gpu::DeviceArray<float> x,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
int nIters,
cudaStream_t stream) {
pcl::gpu::DeviceArray<float> p(b.size()), r(b.size()), Ap(b.size()),z(b.size()), j(x.size());
dim3 block(32);
dim3 grid(x.size());
prepare_r_p_z << <grid, block, 0, stream >> > (r, p, z, x, A, b, j);
for (int k = 0; k < nIters; k++) {
kernelComputeAx << <grid, block, 0, stream >> > (Ap, A, p);
kernelPCGIter<<<1,256,0,stream>>>(x, r, p, z, j, Ap);
}
}
__global__ void kernelShootingSolve(pcl::gpu::PtrSz<float> x,
pcl::gpu::PtrSz<float> S,
const pcl::gpu::PtrSz<float> ATA,
const float lambda)
{
const int x_size = x.size;
const int tId = threadIdx.x;
__shared__ float delta_x_i;
if (tId < x_size)
{
for (int i = 0; i < 100; ++i)
{
for (int j = 0; j < x_size; ++j)
{
if (tId == j)
{
float ATA_jj = __ldg(&ATA[j * x_size + j]);
float x_i = __ldg(&x[j]);
float prev_x_i = x_i;
float S_0 = -2 * (__ldg(&S[j]) + x_i * ATA_jj);
if (S_0 > lambda)
{
x_i = (lambda - S_0) / (2 * ATA_jj);
}
else if (S_0 < -lambda)
{
x_i = -(lambda + S_0) / (2 * ATA_jj);
}
else
{
x_i = 0.0f;
}
if (x_i < 0.0f)
{
x_i = 0.0f;
}
if (x_i > 1.f)
{
x_i = 1.0f;
}
delta_x_i = x_i - prev_x_i;
x[j] = x_i;
}
__syncthreads();
S[tId] -= delta_x_i * __ldg(&ATA[tId * x_size + j]);
}
}
}
}
void cudaShootingSolve(pcl::gpu::DeviceArray<float> x,
pcl::gpu::DeviceArray<float> S,
const pcl::gpu::DeviceArray<float> ATA,
const float lambda)
{
/* cudaSafeCall(cudaStreamSynchronize(0));
std::vector<float> host_x, host_S, host_ATA;
x.download(host_x); S.download(host_S); ATA.download(host_ATA);
for (int i = 0; i < 10; ++i)
{
for (int j = 0; j < host_x.size(); ++j)
{
float x_i = host_x[j];
float ATA_jj = host_ATA[j*x.size() + j];
float prev_x_i = x_i;
float S_0 = - 2 * (host_S[j] + x_i * ATA_jj);
if (S_0 > lambda)
{
x_i = (lambda - S_0) / (2 * ATA_jj);
}
else if (S_0 < -lambda)
{
x_i = -(lambda + S_0) / (2 * ATA_jj);
}
else
{
x_i = 0.0f;
}
if (x_i < 0.0f)
{
x_i = 0.0f;
}
if (x_i > 1.f)
{
x_i = 1.0f;
}
host_x[j] = x_i;
float delta_x_i = x_i - prev_x_i;
for (int k = 0; k < x.size(); ++k)
{
host_S[k] -= delta_x_i * host_ATA[k*x.size() + j];
}
}
}*/
kernelShootingSolve<<<1, x.size()>>> (x, S, ATA, lambda);
std::cout << lambda << std::endl;
}
__global__ void kernalSolveRMxb(pcl::gpu::PtrSz<float> B,
const int mB, const int ldB,
const pcl::gpu::PtrSz<float> R,
const int nR, const int ldR,
const pcl::gpu::PtrSz<float> Mask,
const int R_colIdx)
{
extern __shared__ float R_col[];
const int B_colIdx = threadIdx.x + blockDim.x * blockIdx.x;
if (threadIdx.x < nR)
{
R_col[threadIdx.x] = __ldg(&R[ldR*R_colIdx + threadIdx.x]);
}
__syncthreads();
if (B_colIdx < mB)
{
const float Mask_ii = __ldg(&Mask[nR * B_colIdx + R_colIdx]);
float * B_col = &B[ldB * B_colIdx];
if (Mask_ii > 0.0f)
{
float b_ii = B_col[R_colIdx] /= R_col[R_colIdx];
for (int i = 0; i < R_colIdx; ++i)
{
B_col[i] -= b_ii * R_col[i];
}
}
}
}
void cudaSolveRMxb(pcl::gpu::DeviceArray<float> B,
const int mB, const int ldB,
const pcl::gpu::DeviceArray<float> R,
const int nR, const int ldR,
const pcl::gpu::DeviceArray<float> Mask,
const cudaStream_t stream)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(mB, block.x));
for (int i = 0; i < nR; ++i)
{
//std::cout << i << ": " << nR << std::endl;
kernalSolveRMxb<<<grid, block, nR*sizeof(float), stream>>>(B, mB, ldB, R, nR, ldR, Mask, i);
cudaSafeCall(cudaStreamSynchronize(stream));
}
}
__global__ void kernelSORUpdateA(pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> A_,
const int n,
const float omega)
{
const int rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
const int colIdx = threadIdx.y + blockDim.y * blockIdx.y;
if (rowIdx < n && colIdx < A_.size)
{
if (rowIdx == colIdx)
{
A[colIdx * n + rowIdx] = __ldg(&A_[colIdx * n + rowIdx]);
}
else
{
A[colIdx * n + rowIdx] = __ldg(&A_[colIdx * n + rowIdx]) * omega;
}
}
}
__global__ void kernelSORUpdateb(pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> b_,
const pcl::gpu::PtrSz<float> D,
const pcl::gpu::PtrSz<float> B,
const int n,
const float omega)
{
const int rowIdx = threadIdx.x + blockDim.x * blockIdx.x;
const int colIdx = threadIdx.y + blockDim.y * blockIdx.y;
const int D_colIdx = colIdx / 3;
if (rowIdx < n && colIdx*n<b_.size)
{
const int index = colIdx * n + rowIdx;
b[index] = (__ldg(&b_[index]) + __ldg(&D[D_colIdx * n + rowIdx]) * __ldg(&B[index])) * omega;
}
}
void cudaSORUpdateAb(pcl::gpu::DeviceArray<float> A,
pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> A_,
const pcl::gpu::DeviceArray<float> b_,
const pcl::gpu::DeviceArray<float> D,
const pcl::gpu::DeviceArray<float> B,
const int n,
const float omega,
const cudaStream_t stream)
{
{
dim3 block(8, 8);
dim3 grid(pcl::gpu::divUp(n, block.x), pcl::gpu::divUp(n, block.y));
kernelSORUpdateA<<<grid, block, 0, stream>>>(A, A_, n, omega);
//cudaSafeCall(cudaStreamSynchronize(0));
}
{
dim3 block(8, 8);
dim3 grid(pcl::gpu::divUp(n, block.x), pcl::gpu::divUp(b.size()/n, block.y));
kernelSORUpdateb<<<grid, block, 0, stream>>>(b, b_, D, B, n, omega);
//cudaSafeCall(cudaStreamSynchronize(0));
}
}
__global__ void kernelSORIter(pcl::gpu::PtrSz<float> X_,
pcl::gpu::PtrSz<float> X_assist,
const pcl::gpu::PtrSz<float> X,
const pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> D,
const pcl::gpu::PtrSz<float> error,
const int n,
const float omega)
//pcl::gpu::PtrSz<float> temp_diag)
{
extern __shared__ float A_row[];
float diag;
const int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
const int X_offset = colIdx * n;
const int D_colIdx_offset = colIdx / 3 * n;
const int col_max = X.size / n;
float result, comp;
float error_i = 1.0f;
if (colIdx < col_max)
{
error_i = sqrtf(__ldg(&error[colIdx]) / n);
}
for (int i = 0; i < n; ++i)
{
if (threadIdx.x < n)
{
A_row[threadIdx.x] = __ldg(&A[i*n + threadIdx.x]);
}
__syncthreads();
if (colIdx < col_max)
{
if (error_i > 1e-7f)
{
diag = __ldg(&D[D_colIdx_offset + i]) + A_row[i];
result = 0.0f;
comp = 0.0f;
for (int j = i + 1; j < n; ++j)
{
float t;
//result -= A_row[j] * __ldg(&X[X_offset + j]);
comp -= -A_row[j] * __ldg(&X[X_offset + j]);
t = result - comp;
comp = (t - result) + comp;
result = t;
}
for (int j = 0; j < i; ++j)
{
float t;
comp -= -A_row[j] * X_[X_offset + j];
t = result - comp;
comp = (t - result) + comp;
result = t;
}
{
float t;
//result += __ldg(&b[X_offset + i]);// +X_assist[X_offset + i];
comp -= __ldg(&b[X_offset + i]);
t = result - comp;
comp = (t - result) + comp;
result = t;
result /= diag;
result = result + (1.0f - omega) * __ldg(&X[X_offset + i]);
//result += (1.0f - omega) * __ldg(&X[X_offset + i]);
//comp -= (1.0f - omega) * diag * __ldg(&X[X_offset + i]);
//t = result - comp;
//comp = (t - result) + comp;
//result = t;
}
//for (int j = i + 1; j < n; ++j)
//{
// X_assist[X_offset + j] -= A_row[j] * result;
//}
X_[X_offset + i] = result;
//temp_diag[X_offset + i] = diag;
}
else
{
X_[X_offset + i] = __ldg(&X[X_offset + i]);
}
}
__syncthreads();
}
}
void cudaSORIter(pcl::gpu::DeviceArray<float> & X,
pcl::gpu::DeviceArray<float> & X_,
pcl::gpu::DeviceArray<float> X_assist,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> D,
const pcl::gpu::DeviceArray<float> error,
const int n, const float omega,
const cudaStream_t stream)
{
{
//std::vector<float> host_X, host_X_, host_diag, host_D;
//X.download(host_X);
//clearCudaMemAsync(X_assist, stream);
//pcl::gpu::DeviceArray<float> diag(X.size());
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(X_.size() / n, block.x));
kernelSORIter<<<grid, block, n * sizeof(float), stream>>>(X_, X_assist, X, b, A, D, error, n, omega);
cudaSafeCall(cudaStreamSynchronize(stream));
X_.swap(X);
//
//X.download(host_X_);
////diag.download(host_diag);
//D.download(host_D);
//for (int i = 0; i < n; ++i)
//{
// std::cout << i << " " << host_D[32352 * n + i] << std::endl;
//}
//std::cout << 1;
}
}
__device__ __forceinline__ float square(float x)
{
return x*x;
}
__global__ void kernelSORComputeError(pcl::gpu::PtrSz<float> error,
const pcl::gpu::PtrSz<float> A,
const pcl::gpu::PtrSz<float> X,
const pcl::gpu::PtrSz<float> b,
const pcl::gpu::PtrSz<float> D,
const int n)
{
extern __shared__ float A_row[];
const int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
const int b_offset = colIdx * n;
const int D_colIdx_offset = colIdx / 3 * n;
const int col_max = X.size / n;
float result, comp, error_i = 0.0f;
for (int i = 0; i < n; ++i)
{
if (threadIdx.x < n)
{
A_row[threadIdx.x] = __ldg(&A[i*n + threadIdx.x]);
}
__syncthreads();
if (colIdx < col_max)
{
float diag = __ldg(&D[D_colIdx_offset + i]);
result = 0.0f;
comp = 0.0f;
for (int j = 0; j < n; ++j)
{
float t;
//result += A_row[j] * __ldg(&X[b_offset + j]);
comp -= A_row[j] * __ldg(&X[b_offset + j]);
t = result - comp;
comp = (t - result) + comp;
result = t;
}
float t;
//result += diag * __ldg(&X[b_offset + i]);
comp -= diag * __ldg(&X[b_offset + i]);
t = result - comp;
comp = (t - result) + comp;
result = t;
error_i += square(result - __ldg(&b[b_offset + i]));
}
__syncthreads();
}
if (colIdx < col_max)
{
error[colIdx] = error_i;
}
}
void cudaSORComputeError(pcl::gpu::DeviceArray<float> error,
const pcl::gpu::DeviceArray<float> A,
const pcl::gpu::DeviceArray<float> X,
const pcl::gpu::DeviceArray<float> b,
const pcl::gpu::DeviceArray<float> D,
const int n,
const cudaStream_t stream)
{
dim3 block(1024);
dim3 grid(pcl::gpu::divUp(error.size(), block.x));
kernelSORComputeError<<<grid, block, n * sizeof(float), stream>>>(error, A, X, b, D, n);
cudaSafeCall(cudaStreamSynchronize(stream));
//std::vector<float> host_error;
//error.download(host_error);
//float sum = 0.0f;
//for (auto &iter : host_error)
//{
// sum += iter;
//}
//std::cout << sqrtf(host_error[32352 * 3]/n) << std::endl;
}
#endif // USE_CUDA
|
d5a6e603d1b74a072d6b6e0d3f9fb89126427253.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
#define OP(name, expr) \
template <class T> \
struct OP_##name { \
__device__ __inline__ T operator()(T a, T b) const { \
return (expr); \
} \
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T>(), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(size_t output_rank, const int64_t* lhs_padded_strides, const T* lhs_data, const int64_t* rhs_padded_strides, const T* rhs_data, const fast_divmod* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for op and impl
#define BINARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
BINARY_ELEMENTWISE_IMPL(name)
BINARY_OPS()
#undef BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Greater)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Max)
} // namespace cuda
} // namespace onnxruntime
| d5a6e603d1b74a072d6b6e0d3f9fb89126427253.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
namespace onnxruntime {
namespace cuda {
#define OP(name, expr) \
template <class T> \
struct OP_##name { \
__device__ __inline__ T operator()(T a, T b) const { \
return (expr); \
} \
};
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T>(), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(size_t output_rank, const int64_t* lhs_padded_strides, const T* lhs_data, const int64_t* rhs_padded_strides, const T* rhs_data, const fast_divmod* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for op and impl
#define BINARY_OP_NAME_EXPR(name, expr) \
OP(name, expr) \
BINARY_ELEMENTWISE_IMPL(name)
BINARY_OPS()
#undef BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Greater)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Max)
} // namespace cuda
} // namespace onnxruntime
|
300f177da17935f062eeda2335077a389415f0c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyp2p.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int4 __restrict__ *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
int4 const __restrict__ *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
size_t num_elems = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyp2p), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,num_elems);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyp2p), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,num_elems);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyp2p), dim3(gridBlock),dim3(threadBlock), 0, 0, dest,src,num_elems);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 300f177da17935f062eeda2335077a389415f0c5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyp2p.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int4 __restrict__ *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
int4 const __restrict__ *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
size_t num_elems = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyp2p<<<gridBlock,threadBlock>>>(dest,src,num_elems);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyp2p<<<gridBlock,threadBlock>>>(dest,src,num_elems);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyp2p<<<gridBlock,threadBlock>>>(dest,src,num_elems);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e155b02e23edea0859617d745692f661752f1b9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/hip/TensorModeKernel.cuh>
#include <ATen/native/hip/TensorModeKernel.h>
#include <ATen/Dispatch.h>
#include <ATen/native/NonEmptyUtils.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/ThrustAllocator.h>
#include <c10/core/DeviceArray.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace at {
namespace native {
template <typename scalar_t>
void calculate_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
std::vector<int64_t>& position,
int dim) {
at::cuda::ThrustAllocator thrust_allocator;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto policy = thrust::hip::par(thrust_allocator).on(stream);
TORCH_INTERNAL_ASSERT(self.is_contiguous());
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t* data = self.data_ptr<scalar_t>();
for (int64_t i = 0; i < position.size(); i++) {
data += position[i] * ensure_nonempty_stride(self, i);
}
int64_t ndim = ensure_nonempty_dim(self.dim());
int64_t n_element = ensure_nonempty_size(self, ndim - 1);
scalar_t* iter_begin = data;
scalar_t* iter_end = data + n_element;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto sort_buffer = c10::DeviceArray<int64_t>(*cuda_allocator, n_element);
auto sort_buffer_ptr = thrust::device_pointer_cast(sort_buffer.get());
auto count_from_zero_iter = thrust::make_counting_iterator(int64_t{0});
thrust::copy_n(policy, count_from_zero_iter, n_element, sort_buffer_ptr);
// Sort the input data. The original indices of the data are stored in
// sort_buffer_ptr
thrust::sort_by_key(policy, iter_begin, iter_end, sort_buffer_ptr);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 +
thrust::inner_product(
policy,
iter_begin,
iter_end - 1,
iter_begin + 1,
0,
thrust::plus<int>(),
thrust::not_equal_to<scalar_t>());
// Count frequency of each element
auto keys = c10::DeviceArray<scalar_t>(*cuda_allocator, unique);
auto counts = c10::DeviceArray<int64_t>(*cuda_allocator, unique);
auto keys_ptr = thrust::device_pointer_cast(keys.get());
auto counts_ptr = thrust::device_pointer_cast(counts.get());
thrust::reduce_by_key(
policy,
iter_begin,
iter_end,
thrust::constant_iterator<int>(1),
keys_ptr,
counts_ptr);
// Find index of maximum count
auto it = thrust::max_element(policy, counts_ptr, counts_ptr + unique);
scalar_t mode = keys_ptr[it - counts_ptr];
// Find first index within which it occurs
auto position_iter = thrust::find(policy, iter_begin, iter_end, mode);
TORCH_INTERNAL_ASSERT(position_iter != iter_end);
int64_t index = sort_buffer_ptr[position_iter - iter_begin];
// Place mode, index in output
scalar_t* values_data = values.data_ptr<scalar_t>();
int64_t* indices_data = indices.data_ptr<int64_t>();
for (int64_t i = 0; i < position.size(); i++) {
int64_t pos = position[i];
values_data += ensure_nonempty_stride(values, i) * pos;
indices_data += ensure_nonempty_stride(indices, i) * pos;
}
AT_CUDA_CHECK(hipMemcpyAsync(
values_data, &mode, sizeof(scalar_t), hipMemcpyHostToDevice, stream));
//memcpy_and_sync will synchronize results
at::cuda::memcpy_and_sync(indices_data, &index, sizeof(scalar_t), hipMemcpyHostToDevice, stream);
}
template <typename scalar_t>
void apply_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
std::vector<int64_t>& position,
int dim,
int curDim) {
// Because we have transposed the Tensor, the data for the dimension we are
// mode'ing along is always in the innermost dimension
int64_t ndim = ensure_nonempty_dim(self.dim());
if (curDim == ndim - 1) {
calculate_mode<scalar_t>(values, indices, self, position, dim);
} else {
for (int i = 0; i < ensure_nonempty_size(self, curDim); ++i) {
position[curDim] = i;
apply_mode<scalar_t>(values, indices, self, position, dim, curDim + 1);
}
}
}
template <int64_t size, typename scalar_t>
void handle_fused_mode(
dim3 grid,
const TensorBase& self,
cuda::detail::TensorInfo<scalar_t, unsigned int>& ti_values,
cuda::detail::TensorInfo<int64_t, unsigned int>& ti_indices,
int64_t slice_size,
int64_t slices) {
constexpr int num_threads = size / 2;
int warp_size = at::cuda::warp_size();
TORCH_INTERNAL_ASSERT(num_threads % warp_size == 0 &&
num_threads <= cuda_utils::kCUDABlockReduceMaxThreads, "");
const auto memsize =
(sizeof(scalar_t) * size) + (2 * size * sizeof(unsigned int));
hipLaunchKernelGGL(( compute_mode<scalar_t, size>)
, dim3(grid), dim3(num_threads), memsize, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), ti_values, ti_indices, slice_size, slices);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void fused_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
int64_t slice_size,
int64_t slices) {
// Set-up TensorInfo structs for passing to kernel
auto ti_values = cuda::detail::getTensorInfo<scalar_t, unsigned int>(values);
auto ti_indices = cuda::detail::getTensorInfo<int64_t, unsigned int>(indices);
// The number of blocks is the number of slices that we need to calculate
// the mode for. Each block is responsible for computing a single mode
dim3 grid;
getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power
// of 2
auto ceilPowerOf2 = nextHighestPowerOf2(slice_size);
// Tradeoff between compilation time and the number of specializations.
// Ideally we would have one handle_fused_mode for each power of 2
switch (ceilPowerOf2) {
case 2048:
handle_fused_mode<2048, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 1024:
case 512:
case 256:
handle_fused_mode<1024, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 128:
case 64:
case 32:
case 16:
case 8:
case 4:
case 2:
handle_fused_mode<128, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 1:
default:
TORCH_INTERNAL_ASSERT(false);
}
AT_CUDA_CHECK(hipGetLastError());
}
void launch_fused_mode_kernel(
const TensorBase &values, const TensorBase &indices, const TensorBase &self,
int64_t slice_size, int64_t slices) {
AT_DISPATCH_ALL_TYPES_AND3(kBool, kBFloat16, kHalf, self.scalar_type(), "cuda_mode", [&] {
fused_mode<scalar_t>(values, indices, self, slice_size, slices);
});
}
void launch_apply_mode_kernel(const TensorBase &values, const TensorBase &indices,
const TensorBase &self, int64_t dim, int64_t ndim) {
AT_DISPATCH_ALL_TYPES_AND3(kBool, kBFloat16, kHalf, self.scalar_type(), "cuda_mode", [&] {
// Position will store the dimension values we are processing
std::vector<int64_t> position(ndim - 1, 0);
apply_mode<scalar_t>(values, indices, self, position, dim, 0);
});
}
} // namespace native
} // namespace at
| e155b02e23edea0859617d745692f661752f1b9d.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/cuda/TensorModeKernel.cuh>
#include <ATen/native/cuda/TensorModeKernel.h>
#include <ATen/Dispatch.h>
#include <ATen/native/NonEmptyUtils.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/ThrustAllocator.h>
#include <c10/core/DeviceArray.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
namespace at {
namespace native {
template <typename scalar_t>
void calculate_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
std::vector<int64_t>& position,
int dim) {
at::cuda::ThrustAllocator thrust_allocator;
auto stream = at::cuda::getCurrentCUDAStream();
auto policy = thrust::cuda::par(thrust_allocator).on(stream);
TORCH_INTERNAL_ASSERT(self.is_contiguous());
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t* data = self.data_ptr<scalar_t>();
for (int64_t i = 0; i < position.size(); i++) {
data += position[i] * ensure_nonempty_stride(self, i);
}
int64_t ndim = ensure_nonempty_dim(self.dim());
int64_t n_element = ensure_nonempty_size(self, ndim - 1);
scalar_t* iter_begin = data;
scalar_t* iter_end = data + n_element;
auto cuda_allocator = at::cuda::getCUDADeviceAllocator();
auto sort_buffer = c10::DeviceArray<int64_t>(*cuda_allocator, n_element);
auto sort_buffer_ptr = thrust::device_pointer_cast(sort_buffer.get());
auto count_from_zero_iter = thrust::make_counting_iterator(int64_t{0});
thrust::copy_n(policy, count_from_zero_iter, n_element, sort_buffer_ptr);
// Sort the input data. The original indices of the data are stored in
// sort_buffer_ptr
thrust::sort_by_key(policy, iter_begin, iter_end, sort_buffer_ptr);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 +
thrust::inner_product(
policy,
iter_begin,
iter_end - 1,
iter_begin + 1,
0,
thrust::plus<int>(),
thrust::not_equal_to<scalar_t>());
// Count frequency of each element
auto keys = c10::DeviceArray<scalar_t>(*cuda_allocator, unique);
auto counts = c10::DeviceArray<int64_t>(*cuda_allocator, unique);
auto keys_ptr = thrust::device_pointer_cast(keys.get());
auto counts_ptr = thrust::device_pointer_cast(counts.get());
thrust::reduce_by_key(
policy,
iter_begin,
iter_end,
thrust::constant_iterator<int>(1),
keys_ptr,
counts_ptr);
// Find index of maximum count
auto it = thrust::max_element(policy, counts_ptr, counts_ptr + unique);
scalar_t mode = keys_ptr[it - counts_ptr];
// Find first index within which it occurs
auto position_iter = thrust::find(policy, iter_begin, iter_end, mode);
TORCH_INTERNAL_ASSERT(position_iter != iter_end);
int64_t index = sort_buffer_ptr[position_iter - iter_begin];
// Place mode, index in output
scalar_t* values_data = values.data_ptr<scalar_t>();
int64_t* indices_data = indices.data_ptr<int64_t>();
for (int64_t i = 0; i < position.size(); i++) {
int64_t pos = position[i];
values_data += ensure_nonempty_stride(values, i) * pos;
indices_data += ensure_nonempty_stride(indices, i) * pos;
}
AT_CUDA_CHECK(cudaMemcpyAsync(
values_data, &mode, sizeof(scalar_t), cudaMemcpyHostToDevice, stream));
//memcpy_and_sync will synchronize results
at::cuda::memcpy_and_sync(indices_data, &index, sizeof(scalar_t), cudaMemcpyHostToDevice, stream);
}
template <typename scalar_t>
void apply_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
std::vector<int64_t>& position,
int dim,
int curDim) {
// Because we have transposed the Tensor, the data for the dimension we are
// mode'ing along is always in the innermost dimension
int64_t ndim = ensure_nonempty_dim(self.dim());
if (curDim == ndim - 1) {
calculate_mode<scalar_t>(values, indices, self, position, dim);
} else {
for (int i = 0; i < ensure_nonempty_size(self, curDim); ++i) {
position[curDim] = i;
apply_mode<scalar_t>(values, indices, self, position, dim, curDim + 1);
}
}
}
template <int64_t size, typename scalar_t>
void handle_fused_mode(
dim3 grid,
const TensorBase& self,
cuda::detail::TensorInfo<scalar_t, unsigned int>& ti_values,
cuda::detail::TensorInfo<int64_t, unsigned int>& ti_indices,
int64_t slice_size,
int64_t slices) {
constexpr int num_threads = size / 2;
int warp_size = at::cuda::warp_size();
TORCH_INTERNAL_ASSERT(num_threads % warp_size == 0 &&
num_threads <= cuda_utils::kCUDABlockReduceMaxThreads, "");
const auto memsize =
(sizeof(scalar_t) * size) + (2 * size * sizeof(unsigned int));
compute_mode<scalar_t, size>
<<<grid, num_threads, memsize, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), ti_values, ti_indices, slice_size, slices);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void fused_mode(
const TensorBase& values,
const TensorBase& indices,
const TensorBase& self,
int64_t slice_size,
int64_t slices) {
// Set-up TensorInfo structs for passing to kernel
auto ti_values = cuda::detail::getTensorInfo<scalar_t, unsigned int>(values);
auto ti_indices = cuda::detail::getTensorInfo<int64_t, unsigned int>(indices);
// The number of blocks is the number of slices that we need to calculate
// the mode for. Each block is responsible for computing a single mode
dim3 grid;
getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power
// of 2
auto ceilPowerOf2 = nextHighestPowerOf2(slice_size);
// Tradeoff between compilation time and the number of specializations.
// Ideally we would have one handle_fused_mode for each power of 2
switch (ceilPowerOf2) {
case 2048:
handle_fused_mode<2048, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 1024:
case 512:
case 256:
handle_fused_mode<1024, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 128:
case 64:
case 32:
case 16:
case 8:
case 4:
case 2:
handle_fused_mode<128, scalar_t>(
grid, self, ti_values, ti_indices, slice_size, slices);
break;
case 1:
default:
TORCH_INTERNAL_ASSERT(false);
}
AT_CUDA_CHECK(cudaGetLastError());
}
void launch_fused_mode_kernel(
const TensorBase &values, const TensorBase &indices, const TensorBase &self,
int64_t slice_size, int64_t slices) {
AT_DISPATCH_ALL_TYPES_AND3(kBool, kBFloat16, kHalf, self.scalar_type(), "cuda_mode", [&] {
fused_mode<scalar_t>(values, indices, self, slice_size, slices);
});
}
void launch_apply_mode_kernel(const TensorBase &values, const TensorBase &indices,
const TensorBase &self, int64_t dim, int64_t ndim) {
AT_DISPATCH_ALL_TYPES_AND3(kBool, kBFloat16, kHalf, self.scalar_type(), "cuda_mode", [&] {
// Position will store the dimension values we are processing
std::vector<int64_t> position(ndim - 1, 0);
apply_mode<scalar_t>(values, indices, self, position, dim, 0);
});
}
} // namespace native
} // namespace at
|
5158889e820f4c0bfabe98b00c7dab06317e133f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorTransformations.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
});
return out_tensor;
}
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
});
return out_tensor;
}
}} // namespace at::native
| 5158889e820f4c0bfabe98b00c7dab06317e133f.cu | #include <ATen/native/TensorTransformations.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
kernel_pointwise_flip_apply2<scalar_t, int64_t>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
});
return out_tensor;
}
template <typename scalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
3b0c9d87916b2aafd0ca399789837efc10218a98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half));
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( hipGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( truncate_depth_kernel), dim3(grid), dim3(block), 0, 0, depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( hipGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x);
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y));
hipLaunchKernelGGL(( pyramid_kernel), dim3(grid), dim3(block), 0, 0, source, pyramid, sigma_depth * 3);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( compute_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, reproj, normals);
cudaSafeCall ( hipGetLastError () );
hipLaunchKernelGGL(( mask_depth_kernel), dim3(grid), dim3(block), 0, 0, normals, depth);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f);
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( points_normals_kernel), dim3(grid), dim3(block), 0, 0, reproj, depth, points, normals);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x;
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( compute_dists_kernel), dim3(grid), dim3(block), 0, 0, depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_depth_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, normals, depth_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_points_normals_kernel), dim3(grid), dim3(block), 0, 0, points, normals, points_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
hipLaunchKernelGGL(( tangent_colors_kernel), dim3(grid), dim3(block), 0, 0, normals, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
hipLaunchKernelGGL(( mergePointNormalKernel), dim3(divUp (total, block)), dim3(block), 0, 0, cloud, normals, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 3b0c9d87916b2aafd0ca399789837efc10218a98.cu | #include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half));
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( cudaGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( cudaGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x);
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y));
pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_normals_kernel<<<grid, block>>>(depth, reproj, normals);
cudaSafeCall ( cudaGetLastError () );
mask_depth_kernel<<<grid, block>>>(normals, depth);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f);
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x;
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
tangent_colors_kernel<<<grid, block>>>(normals, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
84e520faf82d58d92ba48be1fe31984bf58e4b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void helloFromGPU()
{
printf("Hello, World from GPU!\n");
}
void helloFromCPU()
{
printf("Hello, World from CPU!\n");
}
int main()
{
helloFromCPU();
hipLaunchKernelGGL(( helloFromGPU), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
helloFromCPU();
} | 84e520faf82d58d92ba48be1fe31984bf58e4b0d.cu | #include <stdio.h>
__global__ void helloFromGPU()
{
printf("Hello, World from GPU!\n");
}
void helloFromCPU()
{
printf("Hello, World from CPU!\n");
}
int main()
{
helloFromCPU();
helloFromGPU<<<1,1>>>();
cudaDeviceSynchronize();
helloFromCPU();
} |
e977d61c47c84aa09bf48fd1c6a0d5c4b38cb709.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "prepare_boundary_potential_on_device.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_potential_dot_dot_acoustic = NULL;
hipMalloc(&d_potential_dot_dot_acoustic, XSIZE*YSIZE);
float *d_send_potential_dot_dot_buffer = NULL;
hipMalloc(&d_send_potential_dot_dot_buffer, XSIZE*YSIZE);
const int num_interfaces = 1;
const int max_nibool_interfaces = 1;
const int *d_nibool_interfaces = NULL;
hipMalloc(&d_nibool_interfaces, XSIZE*YSIZE);
const int *d_ibool_interfaces = NULL;
hipMalloc(&d_ibool_interfaces, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
prepare_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
prepare_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
prepare_boundary_potential_on_device), dim3(gridBlock),dim3(threadBlock), 0, 0, d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e977d61c47c84aa09bf48fd1c6a0d5c4b38cb709.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "prepare_boundary_potential_on_device.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_potential_dot_dot_acoustic = NULL;
cudaMalloc(&d_potential_dot_dot_acoustic, XSIZE*YSIZE);
float *d_send_potential_dot_dot_buffer = NULL;
cudaMalloc(&d_send_potential_dot_dot_buffer, XSIZE*YSIZE);
const int num_interfaces = 1;
const int max_nibool_interfaces = 1;
const int *d_nibool_interfaces = NULL;
cudaMalloc(&d_nibool_interfaces, XSIZE*YSIZE);
const int *d_ibool_interfaces = NULL;
cudaMalloc(&d_ibool_interfaces, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
prepare_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
prepare_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
prepare_boundary_potential_on_device<<<gridBlock,threadBlock>>>(d_potential_dot_dot_acoustic,d_send_potential_dot_dot_buffer,num_interfaces,max_nibool_interfaces,d_nibool_interfaces,d_ibool_interfaces);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d9c4c1bd7d8027fc9514ae9914738515e8735e20.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include "bnei_set.h"
using namespace std;
/**
* \file bnei_set.cu
* \brief Finds neighboring bins for each bin in the simulation
*
* Each bin i has bdimx*bdimy*bdimz neighboring bins. This function
* finds the index of all neighbors and store in bnei.
*
* \param bnei list of neighboring bins for each bin
* \param nxbinpt pointer to nxbin, the number of bins in the x-direction
* \param nybinpt pointer to nybin, the number of bins in the y-direction
* \param nzbinpt pointer to nzbin, the number of bins in the z-direction
* \param bdimxpt pointer to bdimx, the number of sub-bins in the x-direction
* \param bdimypt pointer to bdimy, the number of sub-bins in the y-direction
* \param bdimzpt pointer to bdimz, the number of sub-bins in the z-direction
*
*/
__global__ void bnei_set(int *bnei, int *nxbinpt, int *nybinpt, int *nzbinpt,
int *bdimxpt, int *bdimypt, int *bdimzpt){
int nxbin = *nxbinpt;
int nybin = *nybinpt;
int nzbin = *nzbinpt;
int bdimx = *bdimxpt;
int bdimy = *bdimypt;
int bdimz = *bdimzpt;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int xbin, ybin, zbin, xcen, ycen, zcen;
int xpos, ypos, zpos;
int xind, yind, zind;
int xdiff, ydiff, zdiff;
int ind;
int tid2;
xcen = (bdimx - 1) / 2;
ycen = (bdimy - 1) / 2;
zcen = (bdimz - 1) / 2;
zbin = tid / (nxbin*nybin);
ybin = (tid - zbin*nxbin*nybin) / nxbin;
xbin = tid - ybin*nxbin - zbin*nxbin*nybin;
for (tid2 = 0; tid2 < bdimx*bdimy*bdimz; tid2++){
zpos = tid2 / (bdimx*bdimy);
ypos = (tid2 - zpos*bdimx*bdimy) / bdimx;
xpos = tid2 - ypos*bdimx - zpos*bdimx*bdimy;
xdiff = xpos - xcen;
xind = xbin + xdiff;
if (xind < 0){
xind += nxbin;
}
if (xind >= nxbin){
xind -= nxbin;
}
ydiff = ypos - ycen;
yind = ybin + ydiff;
if (yind < 0){
yind += nybin;
}
if (yind >= nybin){
yind -= nybin;
}
zdiff = zpos - zcen;
zind = zbin + zdiff;
if (zind < 0){
zind += nzbin;
}
if (zind >= nzbin){
zind -= nzbin;
}
ind = xind + yind*nxbin + zind*nxbin*nybin;
if ((xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin) >= 0 && (xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin) < nxbin*nybin*nzbin*bdimx*bdimy*bdimz){
bnei[xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin] = ind;
}
else{
printf("tid2 %4d\n", tid2);
}
}
}
| d9c4c1bd7d8027fc9514ae9914738515e8735e20.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include "bnei_set.h"
using namespace std;
/**
* \file bnei_set.cu
* \brief Finds neighboring bins for each bin in the simulation
*
* Each bin i has bdimx*bdimy*bdimz neighboring bins. This function
* finds the index of all neighbors and store in bnei.
*
* \param bnei list of neighboring bins for each bin
* \param nxbinpt pointer to nxbin, the number of bins in the x-direction
* \param nybinpt pointer to nybin, the number of bins in the y-direction
* \param nzbinpt pointer to nzbin, the number of bins in the z-direction
* \param bdimxpt pointer to bdimx, the number of sub-bins in the x-direction
* \param bdimypt pointer to bdimy, the number of sub-bins in the y-direction
* \param bdimzpt pointer to bdimz, the number of sub-bins in the z-direction
*
*/
__global__ void bnei_set(int *bnei, int *nxbinpt, int *nybinpt, int *nzbinpt,
int *bdimxpt, int *bdimypt, int *bdimzpt){
int nxbin = *nxbinpt;
int nybin = *nybinpt;
int nzbin = *nzbinpt;
int bdimx = *bdimxpt;
int bdimy = *bdimypt;
int bdimz = *bdimzpt;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int xbin, ybin, zbin, xcen, ycen, zcen;
int xpos, ypos, zpos;
int xind, yind, zind;
int xdiff, ydiff, zdiff;
int ind;
int tid2;
xcen = (bdimx - 1) / 2;
ycen = (bdimy - 1) / 2;
zcen = (bdimz - 1) / 2;
zbin = tid / (nxbin*nybin);
ybin = (tid - zbin*nxbin*nybin) / nxbin;
xbin = tid - ybin*nxbin - zbin*nxbin*nybin;
for (tid2 = 0; tid2 < bdimx*bdimy*bdimz; tid2++){
zpos = tid2 / (bdimx*bdimy);
ypos = (tid2 - zpos*bdimx*bdimy) / bdimx;
xpos = tid2 - ypos*bdimx - zpos*bdimx*bdimy;
xdiff = xpos - xcen;
xind = xbin + xdiff;
if (xind < 0){
xind += nxbin;
}
if (xind >= nxbin){
xind -= nxbin;
}
ydiff = ypos - ycen;
yind = ybin + ydiff;
if (yind < 0){
yind += nybin;
}
if (yind >= nybin){
yind -= nybin;
}
zdiff = zpos - zcen;
zind = zbin + zdiff;
if (zind < 0){
zind += nzbin;
}
if (zind >= nzbin){
zind -= nzbin;
}
ind = xind + yind*nxbin + zind*nxbin*nybin;
if ((xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin) >= 0 && (xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin) < nxbin*nybin*nzbin*bdimx*bdimy*bdimz){
bnei[xbin + ybin*nxbin + zbin*nxbin*nybin + tid2 * nxbin*nybin*nzbin] = ind;
}
else{
printf("tid2 %4d\n", tid2);
}
}
}
|
7838d38d1a6a21235b9cc5a37e50bdf085de3fec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch_hip.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(hipfftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, hipfftComplex** dmrs1_d, hipfftComplex** dmrs2_d, hipfftComplex* x_q_d)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_d, x_q_d);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_d, x_q_d);
//Determine w vector
int w_vector;
if(!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
generate_reference_signal << < 2, 1024 >> >(*dmrs2_d, w_vector, M_sc_rb);
} | 7838d38d1a6a21235b9cc5a37e50bdf085de3fec.cu | /*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
*/
#include "generate_dmrs_pusch.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(cufftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, cufftComplex** dmrs1_d, cufftComplex** dmrs2_d, cufftComplex* x_q_d)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_d, x_q_d);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_d, x_q_d);
//Determine w vector
int w_vector;
if(!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
generate_reference_signal << < 2, 1024 >> >(*dmrs2_d, w_vector, M_sc_rb);
} |
95aed9dae242c0e36bd027cddedf02630dbceed1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Furthest point sampling GPU implementation
* Original author: Haoqiang Fan
* Modified by Charles R. Qi
* All Rights Reserved. 2017.
*/
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void batchgatherpointKernel(int b,int n,int k,int f,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y;j<n;j+=gridDim.y){
for (int ij = threadIdx.x; ij<k; ij+=blockDim.x){
int a=idx[i*n*k+j*k+ij];
for(int fi = 0; fi < f; fi++){
out[(i*n*k+j*k+ij)*f+fi]=inp[(i*n+a)*f+fi];
}
}
}
}
}
__global__ void scatteraddpointKernel(int b,int n,int m,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
atomicAdd(&inp_g[(i*n+a)*3+0],out_g[(i*m+j)*3+0]);
atomicAdd(&inp_g[(i*n+a)*3+1],out_g[(i*m+j)*3+1]);
atomicAdd(&inp_g[(i*n+a)*3+2],out_g[(i*m+j)*3+2]);
}
}
}
__global__ void batchscatteraddpointKernel(int b,int n,int k,int f,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y;j<n;j+=gridDim.y){
for (int ij = threadIdx.x; ij<k; ij+=blockDim.x){
int a=idx[i*n*k+j*k+ij];
for(int fi = 0; fi < f; fi++){
atomicAdd(&inp_g[(i*n+a)*f+fi],out_g[(i*n*k+j*k+ij)*f+fi]);
}
}
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp_p,temp);
hipLaunchKernelGGL(( binarysearchKernel), dim3(dim3(32,8,1)),dim3(512), 0, 0, b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
hipLaunchKernelGGL(( gatherpointKernel), dim3(dim3(2,8,1)),dim3(512), 0, 0, b,n,m,inp,idx,out);
}
void batchgatherpointLauncher(int b,int n,int k,int f,const float * inp,const int * idx,float * out){
hipLaunchKernelGGL(( batchgatherpointKernel), dim3(dim3(2,128,1)),dim3(512), 0, 0, b,n,k,f,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){
hipLaunchKernelGGL(( scatteraddpointKernel), dim3(dim3(2,8,1)),dim3(512), 0, 0, b,n,m,out_g,idx,inp_g);
}
void batchscatteraddpointLauncher(int b,int n,int k,int f,const float * out_g,const int * idx,float * inp_g){
hipLaunchKernelGGL(( batchscatteraddpointKernel), dim3(dim3(2,128,1)),dim3(512), 0, 0, b,n,k,f,out_g,idx,inp_g);
}
| 95aed9dae242c0e36bd027cddedf02630dbceed1.cu | /* Furthest point sampling GPU implementation
* Original author: Haoqiang Fan
* Modified by Charles R. Qi
* All Rights Reserved. 2017.
*/
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void batchgatherpointKernel(int b,int n,int k,int f,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y;j<n;j+=gridDim.y){
for (int ij = threadIdx.x; ij<k; ij+=blockDim.x){
int a=idx[i*n*k+j*k+ij];
for(int fi = 0; fi < f; fi++){
out[(i*n*k+j*k+ij)*f+fi]=inp[(i*n+a)*f+fi];
}
}
}
}
}
__global__ void scatteraddpointKernel(int b,int n,int m,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
atomicAdd(&inp_g[(i*n+a)*3+0],out_g[(i*m+j)*3+0]);
atomicAdd(&inp_g[(i*n+a)*3+1],out_g[(i*m+j)*3+1]);
atomicAdd(&inp_g[(i*n+a)*3+2],out_g[(i*m+j)*3+2]);
}
}
}
__global__ void batchscatteraddpointKernel(int b,int n,int k,int f,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y;j<n;j+=gridDim.y){
for (int ij = threadIdx.x; ij<k; ij+=blockDim.x){
int a=idx[i*n*k+j*k+ij];
for(int fi = 0; fi < f; fi++){
atomicAdd(&inp_g[(i*n+a)*f+fi],out_g[(i*n*k+j*k+ij)*f+fi]);
}
}
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
gatherpointKernel<<<dim3(2,8,1),512>>>(b,n,m,inp,idx,out);
}
void batchgatherpointLauncher(int b,int n,int k,int f,const float * inp,const int * idx,float * out){
batchgatherpointKernel<<<dim3(2,128,1),512>>>(b,n,k,f,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){
scatteraddpointKernel<<<dim3(2,8,1),512>>>(b,n,m,out_g,idx,inp_g);
}
void batchscatteraddpointLauncher(int b,int n,int k,int f,const float * out_g,const int * idx,float * inp_g){
batchscatteraddpointKernel<<<dim3(2,128,1),512>>>(b,n,k,f,out_g,idx,inp_g);
}
|
3c3249c1aa53076e8d560e56fbbd5f82e78df289.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudaCompress/RunLength.h>
#include <cassert>
#include <string>
#include <hip/hip_runtime.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/scan/scan_app.cui>
#include "RunLengthKernels.cui"
namespace cudaCompress {
size_t runLengthGetRequiredMemory(const Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
size_t sizeDecode = 0;
// dpValidSymbolIndices
sizeDecode += getAlignedSize(streamCountMax * symbolCountMax * sizeof(uint), 128);
// dpUploads
sizeDecode += getAlignedSize(streamCountMax * (2 * sizeof(Symbol16*) + sizeof(uint)), 128);
size_t sizeEncode = 0;
// dpValidSymbolIndices
sizeEncode += getAlignedSize(symbolCountMax * sizeof(uint), 128);
// dpOutputIndices
sizeEncode += getAlignedSize(streamCountMax * (symbolCountMax + 1) * sizeof(uint), 128);
// dpScanTotal
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint), 128);
size_t size = std::max<size_t>(sizeEncode, sizeDecode);
return size;
}
bool runLengthInit(Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
cudaSafeCall(hipHostMalloc(&pInstance->RunLength.pReadback, streamCountMax * sizeof(uint)));
pInstance->RunLength.syncEventsReadback.resize(streamCountMax);
for(uint stream = 0; stream < streamCountMax; stream++) {
cudaSafeCall(hipEventCreateWithFlags(&pInstance->RunLength.syncEventsReadback[stream], hipEventDisableTiming));
}
cudaSafeCall(hipHostMalloc(&pInstance->RunLength.pUpload, streamCountMax * (2 * sizeof(Symbol16*) + sizeof(uint))));
cudaSafeCall(hipEventCreateWithFlags(&pInstance->RunLength.syncEventUpload, hipEventDisableTiming));
cudaSafeCall(hipEventRecord(pInstance->RunLength.syncEventUpload));
return true;
}
bool runLengthShutdown(Instance* pInstance)
{
cudaSafeCall(hipEventDestroy(pInstance->RunLength.syncEventUpload));
pInstance->RunLength.syncEventUpload = 0;
cudaSafeCall(hipHostFree(pInstance->RunLength.pUpload));
pInstance->RunLength.pUpload = NULL;
for(uint stream = 0; stream < pInstance->RunLength.syncEventsReadback.size(); stream++) {
cudaSafeCall(hipEventDestroy(pInstance->RunLength.syncEventsReadback[stream]));
}
pInstance->RunLength.syncEventsReadback.clear();
cudaSafeCall(hipHostFree(pInstance->RunLength.pReadback));
pInstance->RunLength.pReadback = NULL;
return true;
}
template<typename Symbol>
bool runLengthEncode(Instance* pInstance, Symbol** pdpSymbolsCompact, Symbol** pdpZeroCounts, const Symbol** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
assert(streamCount <= pInstance->m_streamCountMax);
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
size_t outputIndicesStride = getAlignedSize((symbolCountMax + 1) * sizeof(uint), 128) / sizeof(uint);
uint* dpOutputIndicesAll = pInstance->getBuffer<uint>(streamCountMax * outputIndicesStride);
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(symbolCountMax);
uint* dpScanTotal = pInstance->getBuffer<uint>(streamCountMax);
uint blockSize = 0;
uint blockCount = 0;
for(uint stream = 0; stream < streamCount; stream++) {
assert(pSymbolCount[stream] <= symbolCountMax);
uint* dpOutputIndices = dpOutputIndicesAll + stream * outputIndicesStride;
util::CudaScopedTimer timer(pInstance->RunLength.timerEncode);
timer("Scan Valid Flags");
// run prefix sum on symbol non-zero flags to get output indices
//TODO ballot scan!
scanArray<Symbol, uint, true, FunctorFlagTrue<Symbol, uint> >(dpOutputIndices, pdpSymbols[stream], pSymbolCount[stream] + 1, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthEncode: Error in scanArray");
// last element of outputindices == compact symbol count, start readback
uint* dpCompactSymbolCount = dpOutputIndices + pSymbolCount[stream];
cudaSafeCall(hipMemcpyAsync(pInstance->RunLength.pReadback + stream, dpCompactSymbolCount, sizeof(uint), hipMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(hipEventRecord(pInstance->RunLength.syncEventsReadback[stream], pInstance->m_stream));
}
for(uint stream = 0; stream < streamCount; stream++) {
uint* dpOutputIndices = dpOutputIndicesAll + stream * outputIndicesStride;
uint* dpCompactSymbolCount = dpOutputIndices + pSymbolCount[stream];
util::CudaScopedTimer timer(pInstance->RunLength.timerEncode);
timer("Get Valid Symbol Indices");
// get indices of valid (non-zero) symbols
blockSize = 256;
blockCount = min((pSymbolCount[stream] + blockSize - 1) / blockSize, 256u);
hipLaunchKernelGGL(( runLengthEncodeGetValidSymbolIndices), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpOutputIndices, dpValidSymbolIndices, pSymbolCount[stream]);
cudaCheckMsg("runLengthEncodeGetValidSymbolIndices execution failed");
timer("Get # Extra Zeros");
// compute number of extra zero symbols to insert in order to respect zeroCountMax
// choose blockCount based on original (non-compact) symbol count, so we can wait a bit longer before syncing on the download
blockSize = 256;
blockCount = min((pSymbolCount/*Compact*/[stream] + blockSize - 1) / blockSize, 256u);
hipLaunchKernelGGL(( runLengthEncodeExtraZeroSymbolCountsKernel), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpValidSymbolIndices, dpOutputIndices, dpCompactSymbolCount, zeroCountMax);
cudaCheckMsg("runLengthEncodeExtraZeroSymbolCountsKernel execution failed");
timer("Sync Readback");
// wait for download of compacted symbol count - need it for the next scan
cudaSafeCall(hipEventSynchronize(pInstance->RunLength.syncEventsReadback[stream]));
pSymbolCountCompact[stream] = pInstance->RunLength.pReadback[stream];
timer("Scan # Extra Zeros");
// run prefix sum on extra zero symbol counts to get output offsets
scanArray<uint, uint, true>(dpOutputIndices, dpOutputIndices, pSymbolCountCompact[stream] + 1, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthEncode: Error in scanArray");
timer("Download # Extra Zeros");
// last write offset == total number of extra zeroes to be inserted
cudaSafeCall(hipMemcpyAsync(dpScanTotal + stream, dpOutputIndices + pSymbolCountCompact[stream], sizeof(uint), hipMemcpyDeviceToDevice, pInstance->m_stream));
// if this was the last stream, start readback to cpu
if(stream == streamCount - 1) {
cudaSafeCall(hipMemcpyAsync(pInstance->RunLength.pReadback, dpScanTotal, streamCount * sizeof(uint), hipMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(hipEventRecord(pInstance->RunLength.syncEventsReadback[0], pInstance->m_stream));
}
// if there are no non-zero symbols, we can bail out here
if(pSymbolCountCompact[stream] == 0) {
continue;
}
timer("Compact");
// copy non-zero symbols to output, pad with extra zero symbols where necessary
blockSize = 256;
blockCount = min((pSymbolCountCompact[stream] + blockSize - 1) / blockSize, 256u);
hipLaunchKernelGGL(( runLengthEncodeCompactKernel<Symbol>), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, pdpSymbols[stream], dpValidSymbolIndices, dpOutputIndices, pdpSymbolsCompact[stream], pdpZeroCounts[stream], pSymbolCountCompact[stream], zeroCountMax);
cudaCheckMsg("runLengthEncodeCompactKernel execution failed");
}
// add extra zeros to compacted symbol count
cudaSafeCall(hipEventSynchronize(pInstance->RunLength.syncEventsReadback[0]));
for(uint stream = 0; stream < streamCount; stream++) {
pSymbolCountCompact[stream] += pInstance->RunLength.pReadback[stream];
}
pInstance->releaseBuffers(3);
return true;
}
template<typename Symbol>
bool runLengthDecode(Instance* pInstance, const Symbol** pdpSymbolsCompact, const Symbol** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
assert(streamCount <= pInstance->m_streamCountMax);
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(streamCount * symbolCountMax);
byte* dpUploads = pInstance->getBuffer<byte>(streamCount * (sizeof(Symbol*) + sizeof(uint)));
util::CudaScopedTimer timer(pInstance->RunLength.timerDecode);
{
timer("Scan Zero Counts");
for(uint i = 0; i < streamCount; i++) {
// if there are no symbols, we're done here
if(pSymbolCountCompact[i] == 0) {
continue;
}
// run prefix sum on zero counts to get valid symbol indices
assert(pSymbolCountCompact[i] < symbolCountMax);
scanArray<Symbol, uint, false>(dpValidSymbolIndices + i * symbolCountMax, pdpZeroCounts[i], pSymbolCountCompact[i], pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthDecode: Error in scanArray");
}
}
uint symbolCountCompactMax = 0;
for(uint i = 0; i < streamCount; i++) {
symbolCountCompactMax = max(symbolCountCompactMax, pSymbolCountCompact[i]);
}
if(symbolCountCompactMax > 0)
{
timer("Scatter Symbols");
// upload symbol stream pointers and compact symbol counts
Symbol** ppSymbolsCompactUpload = (Symbol**)pInstance->RunLength.pUpload;
Symbol** ppSymbolsUpload = (Symbol**)(ppSymbolsCompactUpload + streamCount);
uint* pSymbolCountCompactUpload = (uint*) (ppSymbolsUpload + streamCount);
cudaSafeCall(hipEventSynchronize(pInstance->RunLength.syncEventUpload));
memcpy(ppSymbolsCompactUpload, pdpSymbolsCompact, streamCount * sizeof(Symbol*));
memcpy(ppSymbolsUpload, pdpSymbols, streamCount * sizeof(Symbol*));
memcpy(pSymbolCountCompactUpload, pSymbolCountCompact, streamCount * sizeof(uint));
cudaSafeCall(hipMemcpyAsync(dpUploads, pInstance->RunLength.pUpload, streamCount * (2 * sizeof(Symbol*) + sizeof(uint)), hipMemcpyHostToDevice, pInstance->m_stream));
cudaSafeCall(hipEventRecord(pInstance->RunLength.syncEventUpload, pInstance->m_stream));
// expand symbol stream - scattered write of non-zero symbols
Symbol** dppSymbolsCompact = (Symbol**)dpUploads;
Symbol** dppSymbols = (Symbol**)(dppSymbolsCompact + streamCount);
uint* dpSymbolCountCompact = (uint*) (dppSymbols + streamCount);
uint blockSize = 256;
dim3 blockCount(min((symbolCountCompactMax + blockSize - 1) / blockSize, 256u), streamCount);
hipLaunchKernelGGL(( runLengthDecodeMultiScatterKernel), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, (const Symbol**)dppSymbolsCompact, dpValidSymbolIndices, symbolCountMax, dpSymbolCountCompact, dppSymbols);
cudaCheckMsg("runLengthDecodeMultiScatterKernel execution failed");
}
pInstance->releaseBuffers(2);
return true;
}
template<typename Symbol>
bool runLengthDecode(Instance* pInstance, const Symbol* dpSymbolsCompact, const Symbol* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol** pdpSymbols, uint symbolCount, uint streamCount)
{
assert(streamCount <= pInstance->m_streamCountMax);
//assert(stride <= pInstance->m_elemCountPerStreamMax);
//TODO make version of scanArray that takes separate input and output stride, and then alloc only streamCount * symbolCount here
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(streamCount * stride);
byte* dpUploads = pInstance->getBuffer<byte>(streamCount * (sizeof(Symbol*) + sizeof(uint)));
util::CudaScopedTimer timer(pInstance->RunLength.timerDecode);
uint symbolCountCompactMax = 0;
for(uint i = 0; i < streamCount; i++) {
symbolCountCompactMax = max(symbolCountCompactMax, pSymbolCountCompact[i]);
}
if(symbolCountCompactMax > 0) {
timer("Scan Zero Counts");
// run prefix sum on zero counts to get valid symbol indices
// combine scans below cutoff into multi-row scans
const uint cutoff = 64 * 1024; // chosen quite arbitrarily; TODO: benchmark scanArray...
for(uint streamStart = 0; streamStart < streamCount; ) {
uint elemCount = pSymbolCountCompact[streamStart];
if(elemCount == 0) { streamStart++; continue; }
uint streamEnd = streamStart + 1;
if(elemCount <= cutoff) {
while(streamEnd < streamCount && pSymbolCountCompact[streamEnd] <= cutoff) {
elemCount = max(elemCount, pSymbolCountCompact[streamEnd]);
streamEnd++;
}
}
if(elemCount > 0) {
uint offset = streamStart * stride;
scanArray<Symbol, uint, false>(dpValidSymbolIndices + offset, dpZeroCounts + offset, elemCount, streamEnd - streamStart, stride, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthDecode: Error in scanArray");
}
streamStart = streamEnd;
}
//// simple version that just scans all streams at once
//scanArray<Symbol, uint, false>(dpValidSymbolIndices, dpZeroCounts, symbolCountCompactMax, streamCount, stride, pInstance->m_pScanPlan, pInstance->m_stream);
//cudaCheckMsg("runLengthDecode: Error in scanArray");
timer("Scatter Symbols");
// upload symbol stream pointers and compact symbol counts
Symbol** ppSymbolsUpload = (Symbol**)pInstance->RunLength.pUpload;
uint* pSymbolCountCompactUpload = (uint*)(ppSymbolsUpload + streamCount);
cudaSafeCall(hipEventSynchronize(pInstance->RunLength.syncEventUpload));
memcpy(ppSymbolsUpload, pdpSymbols, streamCount * sizeof(Symbol*));
memcpy(pSymbolCountCompactUpload, pSymbolCountCompact, streamCount * sizeof(uint));
cudaSafeCall(hipMemcpyAsync(dpUploads, pInstance->RunLength.pUpload, streamCount * (sizeof(Symbol*) + sizeof(uint)), hipMemcpyHostToDevice, pInstance->m_stream));
cudaSafeCall(hipEventRecord(pInstance->RunLength.syncEventUpload, pInstance->m_stream));
// expand symbol stream - scattered write of non-zero symbols
Symbol** dppSymbols = (Symbol**)dpUploads;
uint* dpSymbolCountCompact = (uint*)(dppSymbols + streamCount);
uint blockSize = 256;
dim3 blockCount(min((symbolCountCompactMax + blockSize - 1) / blockSize, 256u), streamCount);
hipLaunchKernelGGL(( runLengthDecodeMultiScatterKernel), dim3(blockCount), dim3(blockSize), 0, pInstance->m_stream, dpSymbolsCompact, stride, dpValidSymbolIndices, stride, dpSymbolCountCompact, dppSymbols);
cudaCheckMsg("runLengthDecodeMultiScatterKernel execution failed");
}
pInstance->releaseBuffers(2);
return true;
}
bool runLengthEncode(Instance* pInstance, Symbol16** pdpSymbolsCompact, Symbol16** pdpZeroCounts, const Symbol16** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
return runLengthEncode<Symbol16>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pdpSymbols, pSymbolCount, streamCount, zeroCountMax, pSymbolCountCompact);
}
bool runLengthDecode(Instance* pInstance, const Symbol16** pdpSymbolsCompact, const Symbol16** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol16** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
return runLengthDecode<Symbol16>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pSymbolCountCompact, pdpSymbols, pSymbolCount, streamCount);
}
bool runLengthDecode(Instance* pInstance, const Symbol16* dpSymbolsCompact, const Symbol16* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol16** pdpSymbols, uint symbolCount, uint streamCount)
{
return runLengthDecode<Symbol16>(pInstance, dpSymbolsCompact, dpZeroCounts, pSymbolCountCompact, stride, pdpSymbols, symbolCount, streamCount);
}
bool runLengthEncode(Instance* pInstance, Symbol32** pdpSymbolsCompact, Symbol32** pdpZeroCounts, const Symbol32** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
return runLengthEncode<Symbol32>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pdpSymbols, pSymbolCount, streamCount, zeroCountMax, pSymbolCountCompact);
}
bool runLengthDecode(Instance* pInstance, const Symbol32** pdpSymbolsCompact, const Symbol32** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol32** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
return runLengthDecode<Symbol32>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pSymbolCountCompact, pdpSymbols, pSymbolCount, streamCount);
}
bool runLengthDecode(Instance* pInstance, const Symbol32* dpSymbolsCompact, const Symbol32* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol32** pdpSymbols, uint symbolCount, uint streamCount)
{
return runLengthDecode<Symbol32>(pInstance, dpSymbolsCompact, dpZeroCounts, pSymbolCountCompact, stride, pdpSymbols, symbolCount, streamCount);
}
}
| 3c3249c1aa53076e8d560e56fbbd5f82e78df289.cu | #include <cudaCompress/RunLength.h>
#include <cassert>
#include <string>
#include <cuda_runtime.h>
#include <cudaCompress/cudaUtil.h>
#include <cudaCompress/util.h>
#include <cudaCompress/InstanceImpl.h>
#include <cudaCompress/scan/scan_app.cui>
#include "RunLengthKernels.cui"
namespace cudaCompress {
size_t runLengthGetRequiredMemory(const Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
size_t sizeDecode = 0;
// dpValidSymbolIndices
sizeDecode += getAlignedSize(streamCountMax * symbolCountMax * sizeof(uint), 128);
// dpUploads
sizeDecode += getAlignedSize(streamCountMax * (2 * sizeof(Symbol16*) + sizeof(uint)), 128);
size_t sizeEncode = 0;
// dpValidSymbolIndices
sizeEncode += getAlignedSize(symbolCountMax * sizeof(uint), 128);
// dpOutputIndices
sizeEncode += getAlignedSize(streamCountMax * (symbolCountMax + 1) * sizeof(uint), 128);
// dpScanTotal
sizeEncode += getAlignedSize(streamCountMax * sizeof(uint), 128);
size_t size = std::max<size_t>(sizeEncode, sizeDecode);
return size;
}
bool runLengthInit(Instance* pInstance)
{
uint streamCountMax = pInstance->m_streamCountMax;
cudaSafeCall(cudaMallocHost(&pInstance->RunLength.pReadback, streamCountMax * sizeof(uint)));
pInstance->RunLength.syncEventsReadback.resize(streamCountMax);
for(uint stream = 0; stream < streamCountMax; stream++) {
cudaSafeCall(cudaEventCreateWithFlags(&pInstance->RunLength.syncEventsReadback[stream], cudaEventDisableTiming));
}
cudaSafeCall(cudaMallocHost(&pInstance->RunLength.pUpload, streamCountMax * (2 * sizeof(Symbol16*) + sizeof(uint))));
cudaSafeCall(cudaEventCreateWithFlags(&pInstance->RunLength.syncEventUpload, cudaEventDisableTiming));
cudaSafeCall(cudaEventRecord(pInstance->RunLength.syncEventUpload));
return true;
}
bool runLengthShutdown(Instance* pInstance)
{
cudaSafeCall(cudaEventDestroy(pInstance->RunLength.syncEventUpload));
pInstance->RunLength.syncEventUpload = 0;
cudaSafeCall(cudaFreeHost(pInstance->RunLength.pUpload));
pInstance->RunLength.pUpload = NULL;
for(uint stream = 0; stream < pInstance->RunLength.syncEventsReadback.size(); stream++) {
cudaSafeCall(cudaEventDestroy(pInstance->RunLength.syncEventsReadback[stream]));
}
pInstance->RunLength.syncEventsReadback.clear();
cudaSafeCall(cudaFreeHost(pInstance->RunLength.pReadback));
pInstance->RunLength.pReadback = NULL;
return true;
}
template<typename Symbol>
bool runLengthEncode(Instance* pInstance, Symbol** pdpSymbolsCompact, Symbol** pdpZeroCounts, const Symbol** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
assert(streamCount <= pInstance->m_streamCountMax);
uint streamCountMax = pInstance->m_streamCountMax;
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
size_t outputIndicesStride = getAlignedSize((symbolCountMax + 1) * sizeof(uint), 128) / sizeof(uint);
uint* dpOutputIndicesAll = pInstance->getBuffer<uint>(streamCountMax * outputIndicesStride);
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(symbolCountMax);
uint* dpScanTotal = pInstance->getBuffer<uint>(streamCountMax);
uint blockSize = 0;
uint blockCount = 0;
for(uint stream = 0; stream < streamCount; stream++) {
assert(pSymbolCount[stream] <= symbolCountMax);
uint* dpOutputIndices = dpOutputIndicesAll + stream * outputIndicesStride;
util::CudaScopedTimer timer(pInstance->RunLength.timerEncode);
timer("Scan Valid Flags");
// run prefix sum on symbol non-zero flags to get output indices
//TODO ballot scan!
scanArray<Symbol, uint, true, FunctorFlagTrue<Symbol, uint> >(dpOutputIndices, pdpSymbols[stream], pSymbolCount[stream] + 1, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthEncode: Error in scanArray");
// last element of outputindices == compact symbol count, start readback
uint* dpCompactSymbolCount = dpOutputIndices + pSymbolCount[stream];
cudaSafeCall(cudaMemcpyAsync(pInstance->RunLength.pReadback + stream, dpCompactSymbolCount, sizeof(uint), cudaMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(cudaEventRecord(pInstance->RunLength.syncEventsReadback[stream], pInstance->m_stream));
}
for(uint stream = 0; stream < streamCount; stream++) {
uint* dpOutputIndices = dpOutputIndicesAll + stream * outputIndicesStride;
uint* dpCompactSymbolCount = dpOutputIndices + pSymbolCount[stream];
util::CudaScopedTimer timer(pInstance->RunLength.timerEncode);
timer("Get Valid Symbol Indices");
// get indices of valid (non-zero) symbols
blockSize = 256;
blockCount = min((pSymbolCount[stream] + blockSize - 1) / blockSize, 256u);
runLengthEncodeGetValidSymbolIndices<<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpOutputIndices, dpValidSymbolIndices, pSymbolCount[stream]);
cudaCheckMsg("runLengthEncodeGetValidSymbolIndices execution failed");
timer("Get # Extra Zeros");
// compute number of extra zero symbols to insert in order to respect zeroCountMax
// choose blockCount based on original (non-compact) symbol count, so we can wait a bit longer before syncing on the download
blockSize = 256;
blockCount = min((pSymbolCount/*Compact*/[stream] + blockSize - 1) / blockSize, 256u);
runLengthEncodeExtraZeroSymbolCountsKernel<<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpValidSymbolIndices, dpOutputIndices, dpCompactSymbolCount, zeroCountMax);
cudaCheckMsg("runLengthEncodeExtraZeroSymbolCountsKernel execution failed");
timer("Sync Readback");
// wait for download of compacted symbol count - need it for the next scan
cudaSafeCall(cudaEventSynchronize(pInstance->RunLength.syncEventsReadback[stream]));
pSymbolCountCompact[stream] = pInstance->RunLength.pReadback[stream];
timer("Scan # Extra Zeros");
// run prefix sum on extra zero symbol counts to get output offsets
scanArray<uint, uint, true>(dpOutputIndices, dpOutputIndices, pSymbolCountCompact[stream] + 1, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthEncode: Error in scanArray");
timer("Download # Extra Zeros");
// last write offset == total number of extra zeroes to be inserted
cudaSafeCall(cudaMemcpyAsync(dpScanTotal + stream, dpOutputIndices + pSymbolCountCompact[stream], sizeof(uint), cudaMemcpyDeviceToDevice, pInstance->m_stream));
// if this was the last stream, start readback to cpu
if(stream == streamCount - 1) {
cudaSafeCall(cudaMemcpyAsync(pInstance->RunLength.pReadback, dpScanTotal, streamCount * sizeof(uint), cudaMemcpyDeviceToHost, pInstance->m_stream));
cudaSafeCall(cudaEventRecord(pInstance->RunLength.syncEventsReadback[0], pInstance->m_stream));
}
// if there are no non-zero symbols, we can bail out here
if(pSymbolCountCompact[stream] == 0) {
continue;
}
timer("Compact");
// copy non-zero symbols to output, pad with extra zero symbols where necessary
blockSize = 256;
blockCount = min((pSymbolCountCompact[stream] + blockSize - 1) / blockSize, 256u);
runLengthEncodeCompactKernel<Symbol><<<blockCount, blockSize, 0, pInstance->m_stream>>>(pdpSymbols[stream], dpValidSymbolIndices, dpOutputIndices, pdpSymbolsCompact[stream], pdpZeroCounts[stream], pSymbolCountCompact[stream], zeroCountMax);
cudaCheckMsg("runLengthEncodeCompactKernel execution failed");
}
// add extra zeros to compacted symbol count
cudaSafeCall(cudaEventSynchronize(pInstance->RunLength.syncEventsReadback[0]));
for(uint stream = 0; stream < streamCount; stream++) {
pSymbolCountCompact[stream] += pInstance->RunLength.pReadback[stream];
}
pInstance->releaseBuffers(3);
return true;
}
template<typename Symbol>
bool runLengthDecode(Instance* pInstance, const Symbol** pdpSymbolsCompact, const Symbol** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
assert(streamCount <= pInstance->m_streamCountMax);
uint symbolCountMax = pInstance->m_elemCountPerStreamMax;
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(streamCount * symbolCountMax);
byte* dpUploads = pInstance->getBuffer<byte>(streamCount * (sizeof(Symbol*) + sizeof(uint)));
util::CudaScopedTimer timer(pInstance->RunLength.timerDecode);
{
timer("Scan Zero Counts");
for(uint i = 0; i < streamCount; i++) {
// if there are no symbols, we're done here
if(pSymbolCountCompact[i] == 0) {
continue;
}
// run prefix sum on zero counts to get valid symbol indices
assert(pSymbolCountCompact[i] < symbolCountMax);
scanArray<Symbol, uint, false>(dpValidSymbolIndices + i * symbolCountMax, pdpZeroCounts[i], pSymbolCountCompact[i], pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthDecode: Error in scanArray");
}
}
uint symbolCountCompactMax = 0;
for(uint i = 0; i < streamCount; i++) {
symbolCountCompactMax = max(symbolCountCompactMax, pSymbolCountCompact[i]);
}
if(symbolCountCompactMax > 0)
{
timer("Scatter Symbols");
// upload symbol stream pointers and compact symbol counts
Symbol** ppSymbolsCompactUpload = (Symbol**)pInstance->RunLength.pUpload;
Symbol** ppSymbolsUpload = (Symbol**)(ppSymbolsCompactUpload + streamCount);
uint* pSymbolCountCompactUpload = (uint*) (ppSymbolsUpload + streamCount);
cudaSafeCall(cudaEventSynchronize(pInstance->RunLength.syncEventUpload));
memcpy(ppSymbolsCompactUpload, pdpSymbolsCompact, streamCount * sizeof(Symbol*));
memcpy(ppSymbolsUpload, pdpSymbols, streamCount * sizeof(Symbol*));
memcpy(pSymbolCountCompactUpload, pSymbolCountCompact, streamCount * sizeof(uint));
cudaSafeCall(cudaMemcpyAsync(dpUploads, pInstance->RunLength.pUpload, streamCount * (2 * sizeof(Symbol*) + sizeof(uint)), cudaMemcpyHostToDevice, pInstance->m_stream));
cudaSafeCall(cudaEventRecord(pInstance->RunLength.syncEventUpload, pInstance->m_stream));
// expand symbol stream - scattered write of non-zero symbols
Symbol** dppSymbolsCompact = (Symbol**)dpUploads;
Symbol** dppSymbols = (Symbol**)(dppSymbolsCompact + streamCount);
uint* dpSymbolCountCompact = (uint*) (dppSymbols + streamCount);
uint blockSize = 256;
dim3 blockCount(min((symbolCountCompactMax + blockSize - 1) / blockSize, 256u), streamCount);
runLengthDecodeMultiScatterKernel<<<blockCount, blockSize, 0, pInstance->m_stream>>>((const Symbol**)dppSymbolsCompact, dpValidSymbolIndices, symbolCountMax, dpSymbolCountCompact, dppSymbols);
cudaCheckMsg("runLengthDecodeMultiScatterKernel execution failed");
}
pInstance->releaseBuffers(2);
return true;
}
template<typename Symbol>
bool runLengthDecode(Instance* pInstance, const Symbol* dpSymbolsCompact, const Symbol* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol** pdpSymbols, uint symbolCount, uint streamCount)
{
assert(streamCount <= pInstance->m_streamCountMax);
//assert(stride <= pInstance->m_elemCountPerStreamMax);
//TODO make version of scanArray that takes separate input and output stride, and then alloc only streamCount * symbolCount here
uint* dpValidSymbolIndices = pInstance->getBuffer<uint>(streamCount * stride);
byte* dpUploads = pInstance->getBuffer<byte>(streamCount * (sizeof(Symbol*) + sizeof(uint)));
util::CudaScopedTimer timer(pInstance->RunLength.timerDecode);
uint symbolCountCompactMax = 0;
for(uint i = 0; i < streamCount; i++) {
symbolCountCompactMax = max(symbolCountCompactMax, pSymbolCountCompact[i]);
}
if(symbolCountCompactMax > 0) {
timer("Scan Zero Counts");
// run prefix sum on zero counts to get valid symbol indices
// combine scans below cutoff into multi-row scans
const uint cutoff = 64 * 1024; // chosen quite arbitrarily; TODO: benchmark scanArray...
for(uint streamStart = 0; streamStart < streamCount; ) {
uint elemCount = pSymbolCountCompact[streamStart];
if(elemCount == 0) { streamStart++; continue; }
uint streamEnd = streamStart + 1;
if(elemCount <= cutoff) {
while(streamEnd < streamCount && pSymbolCountCompact[streamEnd] <= cutoff) {
elemCount = max(elemCount, pSymbolCountCompact[streamEnd]);
streamEnd++;
}
}
if(elemCount > 0) {
uint offset = streamStart * stride;
scanArray<Symbol, uint, false>(dpValidSymbolIndices + offset, dpZeroCounts + offset, elemCount, streamEnd - streamStart, stride, pInstance->m_pScanPlan, pInstance->m_stream);
cudaCheckMsg("runLengthDecode: Error in scanArray");
}
streamStart = streamEnd;
}
//// simple version that just scans all streams at once
//scanArray<Symbol, uint, false>(dpValidSymbolIndices, dpZeroCounts, symbolCountCompactMax, streamCount, stride, pInstance->m_pScanPlan, pInstance->m_stream);
//cudaCheckMsg("runLengthDecode: Error in scanArray");
timer("Scatter Symbols");
// upload symbol stream pointers and compact symbol counts
Symbol** ppSymbolsUpload = (Symbol**)pInstance->RunLength.pUpload;
uint* pSymbolCountCompactUpload = (uint*)(ppSymbolsUpload + streamCount);
cudaSafeCall(cudaEventSynchronize(pInstance->RunLength.syncEventUpload));
memcpy(ppSymbolsUpload, pdpSymbols, streamCount * sizeof(Symbol*));
memcpy(pSymbolCountCompactUpload, pSymbolCountCompact, streamCount * sizeof(uint));
cudaSafeCall(cudaMemcpyAsync(dpUploads, pInstance->RunLength.pUpload, streamCount * (sizeof(Symbol*) + sizeof(uint)), cudaMemcpyHostToDevice, pInstance->m_stream));
cudaSafeCall(cudaEventRecord(pInstance->RunLength.syncEventUpload, pInstance->m_stream));
// expand symbol stream - scattered write of non-zero symbols
Symbol** dppSymbols = (Symbol**)dpUploads;
uint* dpSymbolCountCompact = (uint*)(dppSymbols + streamCount);
uint blockSize = 256;
dim3 blockCount(min((symbolCountCompactMax + blockSize - 1) / blockSize, 256u), streamCount);
runLengthDecodeMultiScatterKernel<<<blockCount, blockSize, 0, pInstance->m_stream>>>(dpSymbolsCompact, stride, dpValidSymbolIndices, stride, dpSymbolCountCompact, dppSymbols);
cudaCheckMsg("runLengthDecodeMultiScatterKernel execution failed");
}
pInstance->releaseBuffers(2);
return true;
}
bool runLengthEncode(Instance* pInstance, Symbol16** pdpSymbolsCompact, Symbol16** pdpZeroCounts, const Symbol16** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
return runLengthEncode<Symbol16>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pdpSymbols, pSymbolCount, streamCount, zeroCountMax, pSymbolCountCompact);
}
bool runLengthDecode(Instance* pInstance, const Symbol16** pdpSymbolsCompact, const Symbol16** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol16** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
return runLengthDecode<Symbol16>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pSymbolCountCompact, pdpSymbols, pSymbolCount, streamCount);
}
bool runLengthDecode(Instance* pInstance, const Symbol16* dpSymbolsCompact, const Symbol16* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol16** pdpSymbols, uint symbolCount, uint streamCount)
{
return runLengthDecode<Symbol16>(pInstance, dpSymbolsCompact, dpZeroCounts, pSymbolCountCompact, stride, pdpSymbols, symbolCount, streamCount);
}
bool runLengthEncode(Instance* pInstance, Symbol32** pdpSymbolsCompact, Symbol32** pdpZeroCounts, const Symbol32** pdpSymbols, const uint* pSymbolCount, uint streamCount, uint zeroCountMax, uint* pSymbolCountCompact)
{
return runLengthEncode<Symbol32>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pdpSymbols, pSymbolCount, streamCount, zeroCountMax, pSymbolCountCompact);
}
bool runLengthDecode(Instance* pInstance, const Symbol32** pdpSymbolsCompact, const Symbol32** pdpZeroCounts, const uint* pSymbolCountCompact, Symbol32** pdpSymbols, const uint* pSymbolCount, uint streamCount)
{
return runLengthDecode<Symbol32>(pInstance, pdpSymbolsCompact, pdpZeroCounts, pSymbolCountCompact, pdpSymbols, pSymbolCount, streamCount);
}
bool runLengthDecode(Instance* pInstance, const Symbol32* dpSymbolsCompact, const Symbol32* dpZeroCounts, const uint* pSymbolCountCompact, uint stride, Symbol32** pdpSymbols, uint symbolCount, uint streamCount)
{
return runLengthDecode<Symbol32>(pInstance, dpSymbolsCompact, dpZeroCounts, pSymbolCountCompact, stride, pdpSymbols, symbolCount, streamCount);
}
}
|
effa9f1144c83830718b1754cc4d19cda4418b84.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CuDeviceArrayCopyFromTexture.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int dstStep = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CuDeviceArrayCopyFromTexture), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dstStep,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CuDeviceArrayCopyFromTexture), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dstStep,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CuDeviceArrayCopyFromTexture), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,dstStep,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | effa9f1144c83830718b1754cc4d19cda4418b84.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CuDeviceArrayCopyFromTexture.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int dstStep = 1;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CuDeviceArrayCopyFromTexture<<<gridBlock,threadBlock>>>(dst,dstStep,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CuDeviceArrayCopyFromTexture<<<gridBlock,threadBlock>>>(dst,dstStep,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CuDeviceArrayCopyFromTexture<<<gridBlock,threadBlock>>>(dst,dstStep,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
92155eb4ba6f11b1ecac25a2a0e2639e53c0828e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeConvexPolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron
template hipError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeConvexPolyhedron >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeConvexPolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeConvexPolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 92155eb4ba6f11b1ecac25a2a0e2639e53c0828e.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeConvexPolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron
template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeConvexPolyhedron >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeConvexPolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeConvexPolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeConvexPolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
1a84b9299a89c4d126c755ce323726bbdfd32bd5.hip | // !!! This is a file automatically generated by hipify!!!
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander / Anyone is free to add their own pair potentials here
/*! \file AllDriverPotentialBondGPU.cu
\brief Defines the driver functions for computing all types of bond forces on the GPU
*/
#include "EvaluatorBondFENENOLJ.h"
#include "AllDriverPotentialBondExtGPU_hip.cuh"
hipError_t gpu_compute_fenenolj_forces(const bond_args_t& bond_args,
const float3 *d_params,
unsigned int *d_flags)
{
return gpu_compute_bond_forces<EvaluatorBondFENENOLJ>(bond_args,
d_params,
d_flags);
}
| 1a84b9299a89c4d126c755ce323726bbdfd32bd5.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander / Anyone is free to add their own pair potentials here
/*! \file AllDriverPotentialBondGPU.cu
\brief Defines the driver functions for computing all types of bond forces on the GPU
*/
#include "EvaluatorBondFENENOLJ.h"
#include "AllDriverPotentialBondExtGPU.cuh"
cudaError_t gpu_compute_fenenolj_forces(const bond_args_t& bond_args,
const float3 *d_params,
unsigned int *d_flags)
{
return gpu_compute_bond_forces<EvaluatorBondFENENOLJ>(bond_args,
d_params,
d_flags);
}
|
557528c31f9de8e75395572ee6f43a4d1b0979ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2020 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, const RegTree::Node* tree,
common::Span<FeatureType const> split_types,
common::Span<RegTree::Segment const> d_cat_ptrs,
common::Span<uint32_t const> d_categories,
Loader* loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
// Missing value
if (common::CheckNAN(fvalue)) {
nidx = n.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(split_types, nidx)) {
auto categories = d_categories.subspan(d_cat_ptrs[nidx].beg,
d_cat_ptrs[nidx].size);
go_left = Decision(categories, common::AsCat(fvalue));
} else {
go_left = fvalue < n.SplitCond();
}
if (go_left) {
nidx = n.LeftChild();
} else {
nidx = n.RightChild();
}
}
n = tree[nidx];
}
return tree[nidx].LeafValue();
}
template <typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, const RegTree::Node* tree,
Loader const& loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader.GetElement(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
nidx = n.DefaultChild();
n = tree[nidx];
} else {
if (fvalue < n.SplitCond()) {
nidx = n.LeftChild();
n = tree[nidx];
} else {
nidx = n.RightChild();
n = tree[nidx];
}
}
}
return nidx;
}
template <typename Loader, typename Data>
__global__ void PredictLeafKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start);
for (int tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto leaf = GetLeafIndex(ridx, d_tree, loader);
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
auto tree_split_types =
d_tree_split_types.subspan(d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
float leaf = GetLeafWeight(global_idx, d_tree, tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, d_tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(dh::device_vector<gpu_treeshap::PathElement>* paths,
const gbm::GBTreeModel& model, size_t tree_limit,
int gpu_id) {
DeviceModel device_model;
device_model.Init(model, 0, tree_limit, gpu_id);
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = paths->data().get();
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
dh::LaunchN(gpu_id, info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = d_nodes[parent_idx];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = is_left_path ? -inf : parent.SplitCond();
float upper_bound = is_left_path ? parent.SplitCond() : inf;
d_paths[output_position--] = {
idx, parent.SplitIndex(), group, lower_bound,
upper_bound, is_missing_path, zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, -inf, inf, false, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes_);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(), model_.tree_group.ConstDeviceSpan(),
model_.split_types.ConstDeviceSpan(),
model_.categories_tree_segments.ConstDeviceSpan(),
model_.categories_node_segments.ConstDeviceSpan(),
model_.categories.ConstDeviceSpan(), model_.tree_beg_, model_.tree_end_,
num_features, num_rows, entry_start, use_shared, model_.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model_.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(), model_.tree_group.ConstDeviceSpan(),
model_.split_types.ConstDeviceSpan(),
model_.categories_tree_segments.ConstDeviceSpan(),
model_.categories_node_segments.ConstDeviceSpan(),
model_.categories.ConstDeviceSpan(), model_.tree_beg_, model_.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model_.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
if (tree_end - tree_begin == 0) {
return;
}
model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, model.learner_model_param->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id),
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
// This function is duplicated with CPU predictor PredictBatch, see comments in there.
// FIXME(trivialfis): Remove the duplication.
std::lock_guard<std::mutex> const guard(lock_);
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
ConfigureDevice(device);
CHECK_EQ(tree_begin, 0);
auto* out_preds = &predts->predictions;
CHECK_GE(predts->version, tree_begin);
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (predts->version == 0) {
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
uint32_t const output_groups = model.learner_model_param->num_output_group;
CHECK_NE(output_groups, 0);
uint32_t real_ntree_limit = ntree_limit * output_groups;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups;
if (predts->version > end_version) {
CHECK_NE(ntree_limit, 0);
this->InitOutPredictions(dmat->Info(), out_preds, model);
predts->version = 0;
}
uint32_t const beg_version = predts->version;
CHECK_LE(beg_version, end_version);
if (beg_version < end_version) {
this->DevicePredictInternal(dmat, out_preds, model,
beg_version * output_groups,
end_version * output_groups);
}
uint32_t delta = end_version - beg_version;
CHECK_LE(delta, model.trees.size());
predts->Update(delta);
CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ ||
out_preds->Size() == dmat->Info().num_row_);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x,
const gbm::GBTreeModel &model, float,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
uint32_t const output_groups = model.learner_model_param->num_output_group;
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id);
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
MetaInfo info;
info.num_col_ = m->NumColumns();
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS));
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
info.num_row_, entry_start, use_shared, output_groups);
}
void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
float missing, PredictionCacheEntry *out_preds,
uint32_t tree_begin, unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, model, missing, out_preds, tree_begin, tree_end);
} else {
LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor.";
}
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>*,
bool approximate, int,
unsigned) override {
if (approximate) {
LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor.";
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
uint32_t real_ntree_limit =
ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, real_ntree_limit,
generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShap(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
std::vector<bst_float>*,
bool approximate) override {
if (approximate) {
LOG(FATAL) << "[Internal error]: " << __func__
<< " approximate is not implemented in GPU Predictor.";
}
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
uint32_t real_ntree_limit =
ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, real_ntree_limit,
generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShapInteractions(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix* p_fmat, HostDeviceVector<bst_float>* predictions,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes =
SharedMemoryBytes<kBlockThreads>(info.num_col_, max_shared_memory_bytes_);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
uint32_t real_ntree_limit = ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * real_ntree_limit);
model_.Init(model, 0, real_ntree_limit, generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
void ConfigureDevice(int device) {
if (device >= 0) {
max_shared_memory_bytes_ = dh::MaxSharedMemory(device);
}
}
std::mutex lock_;
DeviceModel model_;
size_t max_shared_memory_bytes_ { 0 };
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
| 557528c31f9de8e75395572ee6f43a4d1b0979ef.cu | /*!
* Copyright 2017-2020 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, const RegTree::Node* tree,
common::Span<FeatureType const> split_types,
common::Span<RegTree::Segment const> d_cat_ptrs,
common::Span<uint32_t const> d_categories,
Loader* loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
// Missing value
if (common::CheckNAN(fvalue)) {
nidx = n.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(split_types, nidx)) {
auto categories = d_categories.subspan(d_cat_ptrs[nidx].beg,
d_cat_ptrs[nidx].size);
go_left = Decision(categories, common::AsCat(fvalue));
} else {
go_left = fvalue < n.SplitCond();
}
if (go_left) {
nidx = n.LeftChild();
} else {
nidx = n.RightChild();
}
}
n = tree[nidx];
}
return tree[nidx].LeafValue();
}
template <typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, const RegTree::Node* tree,
Loader const& loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader.GetElement(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
nidx = n.DefaultChild();
n = tree[nidx];
} else {
if (fvalue < n.SplitCond()) {
nidx = n.LeftChild();
n = tree[nidx];
} else {
nidx = n.RightChild();
n = tree[nidx];
}
}
}
return nidx;
}
template <typename Loader, typename Data>
__global__ void PredictLeafKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start);
for (int tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto leaf = GetLeafIndex(ridx, d_tree, loader);
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
auto tree_split_types =
d_tree_split_types.subspan(d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
float leaf = GetLeafWeight(global_idx, d_tree, tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
auto tree_cat_ptrs = d_cat_node_segments.subspan(
d_tree_segments[tree_idx - tree_begin],
d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin]);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, d_tree_split_types,
tree_cat_ptrs,
tree_categories,
&loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(dh::device_vector<gpu_treeshap::PathElement>* paths,
const gbm::GBTreeModel& model, size_t tree_limit,
int gpu_id) {
DeviceModel device_model;
device_model.Init(model, 0, tree_limit, gpu_id);
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = paths->data().get();
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
dh::LaunchN(gpu_id, info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = d_nodes[parent_idx];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = is_left_path ? -inf : parent.SplitCond();
float upper_bound = is_left_path ? parent.SplitCond() : inf;
d_paths[output_position--] = {
idx, parent.SplitIndex(), group, lower_bound,
upper_bound, is_missing_path, zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, -inf, inf, false, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes_);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(), model_.tree_group.ConstDeviceSpan(),
model_.split_types.ConstDeviceSpan(),
model_.categories_tree_segments.ConstDeviceSpan(),
model_.categories_node_segments.ConstDeviceSpan(),
model_.categories.ConstDeviceSpan(), model_.tree_beg_, model_.tree_end_,
num_features, num_rows, entry_start, use_shared, model_.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model_.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(), model_.tree_group.ConstDeviceSpan(),
model_.split_types.ConstDeviceSpan(),
model_.categories_tree_segments.ConstDeviceSpan(),
model_.categories_node_segments.ConstDeviceSpan(),
model_.categories.ConstDeviceSpan(), model_.tree_beg_, model_.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model_.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
if (tree_end - tree_begin == 0) {
return;
}
model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
out_preds->SetDevice(generic_param_->gpu_id);
auto const& info = dmat->Info();
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, model.learner_model_param->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id),
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0 && generic_param_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
// This function is duplicated with CPU predictor PredictBatch, see comments in there.
// FIXME(trivialfis): Remove the duplication.
std::lock_guard<std::mutex> const guard(lock_);
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
ConfigureDevice(device);
CHECK_EQ(tree_begin, 0);
auto* out_preds = &predts->predictions;
CHECK_GE(predts->version, tree_begin);
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (predts->version == 0) {
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
uint32_t const output_groups = model.learner_model_param->num_output_group;
CHECK_NE(output_groups, 0);
uint32_t real_ntree_limit = ntree_limit * output_groups;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups;
if (predts->version > end_version) {
CHECK_NE(ntree_limit, 0);
this->InitOutPredictions(dmat->Info(), out_preds, model);
predts->version = 0;
}
uint32_t const beg_version = predts->version;
CHECK_LE(beg_version, end_version);
if (beg_version < end_version) {
this->DevicePredictInternal(dmat, out_preds, model,
beg_version * output_groups,
end_version * output_groups);
}
uint32_t delta = end_version - beg_version;
CHECK_LE(delta, model.trees.size());
predts->Update(delta);
CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ ||
out_preds->Size() == dmat->Info().num_row_);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x,
const gbm::GBTreeModel &model, float,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
uint32_t const output_groups = model.learner_model_param->num_output_group;
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id);
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
MetaInfo info;
info.num_col_ = m->NumColumns();
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS));
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
info.num_row_, entry_start, use_shared, output_groups);
}
void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
float missing, PredictionCacheEntry *out_preds,
uint32_t tree_begin, unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, model, missing, out_preds, tree_begin, tree_end);
} else {
LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor.";
}
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>*,
bool approximate, int,
unsigned) override {
if (approximate) {
LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor.";
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
uint32_t real_ntree_limit =
ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, real_ntree_limit,
generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShap(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
std::vector<bst_float>*,
bool approximate) override {
if (approximate) {
LOG(FATAL) << "[Internal error]: " << __func__
<< " approximate is not implemented in GPU Predictor.";
}
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
out_contribs->SetDevice(generic_param_->gpu_id);
uint32_t real_ntree_limit =
ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement> device_paths;
ExtractPaths(&device_paths, model, real_ntree_limit,
generic_param_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
gpu_treeshap::GPUTreeShapInteractions(
X, device_paths.begin(), device_paths.end(), ngroup,
phis.data() + batch.base_rowid * contributions_columns, phis.size());
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(generic_param_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
generic_param_->gpu_id,
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param->base_score);
}
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix* p_fmat, HostDeviceVector<bst_float>* predictions,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
ConfigureDevice(generic_param_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes =
SharedMemoryBytes<kBlockThreads>(info.num_col_, max_shared_memory_bytes_);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
uint32_t real_ntree_limit = ntree_limit * model.learner_model_param->num_output_group;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(generic_param_->gpu_id);
predictions->Resize(num_rows * real_ntree_limit);
model_.Init(model, 0, real_ntree_limit, generic_param_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(generic_param_->gpu_id);
batch.offset.SetDevice(generic_param_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>()) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(generic_param_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
model_.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model_.tree_segments.ConstDeviceSpan(),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared);
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
void ConfigureDevice(int device) {
if (device >= 0) {
max_shared_memory_bytes_ = dh::MaxSharedMemory(device);
}
}
std::mutex lock_;
DeviceModel model_;
size_t max_shared_memory_bytes_ { 0 };
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
eea8da7b4db32029c105ad80fe5c248532f1d668.hip | // !!! This is a file automatically generated by hipify!!!
/*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
HIPSPARSE_MV_ALG_DEFAULT = 0,
HIPSPARSE_COOMV_ALG = 1,
HIPSPARSE_CSRMV_ALG1 = 2,
HIPSPARSE_CSRMV_ALG2 = 3
} hipsparseSpMVAlg_t;
typedef enum {
HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1,
HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2,
HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
HIPSPARSE_SPMM_COO_ALG1 = 1,
HIPSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
HIPSPARSE_CSRMM_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} hipsparseSpMMAlg_t;
typedef enum {
HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} hipsparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "hipsparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","hipsparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","hipsparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle)
{
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatSeqAIJCUSPARSESetGenerateTranspose - Sets the flag to explicitly generate the transpose matrix before calling MatMultTranspose
Collective on mat
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
- transgen - the boolean flag
Level: intermediate
.seealso: MATSEQAIJCUSPARSE, MatAIJCUSPARSESetGenerateTranspose()
@*/
PetscErrorCode MatSeqAIJCUSPARSESetGenerateTranspose(Mat A,PetscBool transgen)
{
PetscErrorCode ierr;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
ierr = PetscObjectTypeCompare(((PetscObject)A),MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (flg) {
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
cusp->transgen = transgen;
if (!transgen) { /* need to destroy the transpose matrix if present to prevent from logic errors if transgen is set to true later */
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
PetscBool transgen = cusparsestruct->transgen;
ierr = PetscOptionsBool("-mat_cusparse_transgen","Generate explicit transpose for MatMultTranspose","MatSeqAIJCUSPARSESetGenerateTranspose",transgen,&transgen,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A,transgen);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparsestruct->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"hipsparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
if (flg && HIPSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
cusparsestruct->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"hipsparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSRMM_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
cusparsestruct->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1;
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"hipsparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && HIPSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = hipHostFree(AiLo);CHKERRCUDA(cerr);
cerr = hipHostFree(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = hipHostMalloc((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
hipsparseStatus_t stat;
PetscErrorCode ierr;
hipError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = hipHostFree(AiUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = hipHostFree(AAUp);CHKERRCUDA(cerr);
cerr = hipHostFree(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipsparseMatrixType_t matrixType;
hipsparseFillMode_t fillMode;
hipsparseDiagType_t diagType;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ?
HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC, indexBase,
HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
hipsparseStatus_t stat;
hipsparseIndexBase_t indexBase;
hipError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!cusparsestruct->transgen || cusparsestruct->matTranspose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = hipMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT= new CsrMatrix;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
/* compute the transpose, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &cusparsestruct->csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = hipMalloc(&cusparsestruct->csr2cscBuffer,cusparsestruct->csr2cscBufferSize);CHKERRCUDA(err);
#endif
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, cusparsestruct->csr2cscBuffer
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
matstructT->mat = matrixT;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the tranposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, reorder with the column permutation */
thrust::copy(thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
hipsparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = hipMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscBool both = PETSC_TRUE;
hipError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) {
/* Copy values only */
CsrMatrix *matrix,*matrixT;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
/* Update matT when it was built before */
if (cusparsestruct->matTranspose) {
hipsparseIndexBase_t indexBase = cusparseGetMatIndexBase(cusparsestruct->mat->descr);
matrixT = (CsrMatrix*)cusparsestruct->matTranspose->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
HIPSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, cusparsestruct->csr2cscBuffer
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
HIPSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
}
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->matTranspose,cusparsestruct->format);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = hipMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = hipMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
err = hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err);
stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = hipsparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
hipsparseDnMatDescr_t matBDescr;
hipsparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
hipsparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
hipError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = hipFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = hipsparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->mmBuffer) { cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = hipFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
if (mmdata->matBDescr) { stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
hipsparseStatus_t stat;
hipsparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
hipError_t cerr;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!cusp->transgen) {
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allcoate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = hipsparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = hipsparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = hipsparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = hipsparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,HIPSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = hipsparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
HIPSPARSE_INDEX_32I,HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
HIPSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
cerr = hipFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = hipMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = hipsparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = hipsparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do hipsparseSpMM, which supports transpose on B */
stat = hipsparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,HIPBLAS_OP_T,HIPBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
hipError_t cerr = hipMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipsparseSpMatDescr_t BmatSpDescr;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
hipsparseStatus_t stat;
hipError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
size_t bufSize2;
hipsparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
/* ask bufferSize bytes for external memory */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = hipMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = hipsparseSpGEMM_compute(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = hipsparseSpGEMM_copy(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = hipsparseXcsrgemmNnz(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(hipPeekAtLastError()); /* catch out of memory errors */
stat = hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = hipMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
hipError_t cerr;
hipsparseStatus_t stat;
hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !cusparsestruct->transgen) {
opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = hipsparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEGenerateTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
}
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it?
if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0);
if (d_mat) {
A->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL;
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
if (d_mat) {
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
hipError_t err;
PetscSplitCSRDataStructure h_mat;
ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr);
err = hipMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), hipMemcpyDeviceToHost);CHKERRCUDA(err);
if (a->compressedrow.use) {
err = hipFree(h_mat.diag.i);CHKERRCUDA(err);
}
err = hipFree(d_mat);CHKERRCUDA(err);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
hipError_t cerr;
PetscFunctionBegin;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
if (str == SUBSET_NONZERO_PATTERN) {
hipsparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = hipMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = hipFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatAXPY_SeqAIJ(Y,a,X,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
spptr->format = MAT_CUSPARSE_CSR;
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
B->spptr = spptr;
spptr->deviceMat = NULL;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = hipsparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)B);CHKERRQ(ierr);
ierr = MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionsObject,B);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
if ((*cusparsestruct)->handle) {stat = hipsparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
hipError_t cerr = hipFree((*cusparsestruct)->csr2cscBuffer);CHKERRCUDA(cerr);
#endif
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
hipsparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {hipError_t cerr = hipFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {hipError_t cerr = hipHostFree((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {hipError_t cerr = hipFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
hipsparseStatus_t stat;
hipError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=hipFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=hipFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=hipFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = hipsparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = hipFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
hipsparseHandle_t handle;
hipsparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = hipsparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
hipError_t cerr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) {
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals());
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare());
*cusp->cooPerm_a = d_i;
THRUSTINTARRAY w = d_j;
auto nekey = thrust::unique(fkey, ekey, IJEqual());
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* I couldn't come up with a more elegant algorithm */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff());
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff());
(*cusp->cooPerm_a)[0] = 0;
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum());
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>());
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(),
search_begin, search_begin + A->rmap->n,
ii.begin());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0;
cerr = hipMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = hipMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
hipsparseStatus_t stat;
PetscInt i,m,n,zero = 0;
hipError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = hipsparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = hipsparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = hipsparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (Acusp->transgen && Bcusp->transgen) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
Ccusp->transgen = PETSC_TRUE;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = hipsparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = hipMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = hipMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = hipMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
if (Acusp->transgen && Bcusp->transgen && Ccusp->transgen) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
hipError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = hipMemcpy(v,w->data().get(),n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = hipMemcpy(v,av,n*sizeof(PetscScalar),dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| eea8da7b4db32029c105ad80fe5c248532f1d668.cu | /*
Defines the basic matrix operations for the AIJ (compressed row)
matrix storage format using the CUSPARSE library,
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <petscconf.h>
#include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/sbaij/seq/sbaij.h>
#include <../src/vec/vec/impls/dvecimpl.h>
#include <petsc/private/vecimpl.h>
#undef VecType
#include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h>
const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0};
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in
0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them.
typedef enum {
CUSPARSE_MV_ALG_DEFAULT = 0,
CUSPARSE_COOMV_ALG = 1,
CUSPARSE_CSRMV_ALG1 = 2,
CUSPARSE_CSRMV_ALG2 = 3
} cusparseSpMVAlg_t;
typedef enum {
CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0,
CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1,
CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2,
CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3,
CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4,
CUSPARSE_SPMM_ALG_DEFAULT = 0,
CUSPARSE_SPMM_COO_ALG1 = 1,
CUSPARSE_SPMM_COO_ALG2 = 2,
CUSPARSE_SPMM_COO_ALG3 = 3,
CUSPARSE_SPMM_COO_ALG4 = 5,
CUSPARSE_SPMM_CSR_ALG1 = 4,
CUSPARSE_SPMM_CSR_ALG2 = 6,
} cusparseSpMMAlg_t;
typedef enum {
CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministc
CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministc
} cusparseCsr2CscAlg_t;
*/
const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT","COOMV_ALG", "CSRMV_ALG1","CSRMV_ALG2", "cusparseSpMVAlg_t","CUSPARSE_",0};
const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT","COO_ALG1","COO_ALG2","COO_ALG3","CSR_ALG1","COO_ALG4","CSR_ALG2","cusparseSpMMAlg_t","CUSPARSE_SPMM_",0};
const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID"/*cusparse does not have enum 0! We created one*/,"ALG1","ALG2","cusparseCsr2CscAlg_t","CUSPARSE_CSR2CSC_",0};
#endif
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*);
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*);
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec);
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat);
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat,PetscScalar,Mat,MatStructure);
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec);
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec);
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec,PetscBool,PetscBool);
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**);
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**);
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**);
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat);
PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],const PetscInt[]);
PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat,const PetscScalar[],InsertMode);
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat,PetscInt,const PetscInt[],PetscScalar[]);
PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
cusparsestruct->stream = stream;
stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle)
{
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
if (!cusparsestruct) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing spptr");
if (cusparsestruct->handle != handle) {
if (cusparsestruct->handle) {
stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSPARSE(stat);
}
cusparsestruct->handle = handle;
}
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSEClearHandle(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscBool flg;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg || !cusparsestruct) PetscFunctionReturn(0);
if (cusparsestruct->handle) cusparsestruct->handle = 0;
PetscFunctionReturn(0);
}
PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat A,MatSolverType *type)
{
PetscFunctionBegin;
*type = MATSOLVERCUSPARSE;
PetscFunctionReturn(0);
}
/*MC
MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices
on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported
algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer
performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the
CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these
algorithms are not recommended. This class does NOT support direct solver operations.
Level: beginner
.seealso: PCFactorSetMatSolverType(), MatSolverType, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B)
{
PetscErrorCode ierr;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr);
(*B)->factortype = ftype;
(*B)->useordering = PETSC_TRUE;
ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) {
ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr);
(*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE;
} else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) {
(*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE;
(*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE;
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types");
ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverType_C",MatFactorGetSolverType_seqaij_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT:
cusparsestruct->format = format;
break;
case MAT_CUSPARSE_ALL:
cusparsestruct->format = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
/*@
MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular
operation. Only the MatMult operation can use different GPU storage formats
for MPIAIJCUSPARSE matrices.
Not Collective
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
. op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL.
- format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2)
Output Parameter:
Level: intermediate
.seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
@*/
PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A, MAT_CLASSID,1);
ierr = PetscTryMethod(A,"MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatSeqAIJCUSPARSESetGenerateTranspose - Sets the flag to explicitly generate the transpose matrix before calling MatMultTranspose
Collective on mat
Input Parameters:
+ A - Matrix of type SEQAIJCUSPARSE
- transgen - the boolean flag
Level: intermediate
.seealso: MATSEQAIJCUSPARSE, MatAIJCUSPARSESetGenerateTranspose()
@*/
PetscErrorCode MatSeqAIJCUSPARSESetGenerateTranspose(Mat A,PetscBool transgen)
{
PetscErrorCode ierr;
PetscBool flg;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
ierr = PetscObjectTypeCompare(((PetscObject)A),MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (flg) {
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (A->factortype) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix");
cusp->transgen = transgen;
if (!transgen) { /* need to destroy the transpose matrix if present to prevent from logic errors if transgen is set to true later */
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
PetscErrorCode ierr;
MatCUSPARSEStorageFormat format;
PetscBool flg;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_NONE) {
PetscBool transgen = cusparsestruct->transgen;
ierr = PetscOptionsBool("-mat_cusparse_transgen","Generate explicit transpose for MatMultTranspose","MatSeqAIJCUSPARSESetGenerateTranspose",transgen,&transgen,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatSeqAIJCUSPARSESetGenerateTranspose(A,transgen);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr);}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparsestruct->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */
ierr = PetscOptionsEnum("-mat_cusparse_spmv_alg","sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)",
"cusparseSpMVAlg_t",MatCUSPARSESpMVAlgorithms,(PetscEnum)cusparsestruct->spmvAlg,(PetscEnum*)&cusparsestruct->spmvAlg,&flg);CHKERRQ(ierr);
/* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */
if (flg && CUSPARSE_CSRMV_ALG1 != 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly");
cusparsestruct->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */
ierr = PetscOptionsEnum("-mat_cusparse_spmm_alg","sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)",
"cusparseSpMMAlg_t",MatCUSPARSESpMMAlgorithms,(PetscEnum)cusparsestruct->spmmAlg,(PetscEnum*)&cusparsestruct->spmmAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_SPMM_CSR_ALG1 != 4) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly");
cusparsestruct->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1;
ierr = PetscOptionsEnum("-mat_cusparse_csr2csc_alg","sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices",
"cusparseCsr2CscAlg_t",MatCUSPARSECsr2CscAlgorithms,(PetscEnum)cusparsestruct->csr2cscAlg,(PetscEnum*)&cusparsestruct->csr2cscAlg,&flg);CHKERRQ(ierr);
if (flg && CUSPARSE_CSR2CSC_ALG1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly");
#endif
}
ierr = PetscOptionsTail();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr);
B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)B->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors);CHKERRQ(ierr);
ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr);
B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *ai = a->i,*aj = a->j,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiLo, *AjLo;
PetscInt i,nz, nzLower, offset, rowOffset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */
nzLower=n+ai[n]-ai[1];
if (!loTriFactor) {
PetscScalar *AALo;
cerr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the lower triangular matrix */
cerr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the lower triangular matrix */
AiLo[0] = (PetscInt) 0;
AiLo[n] = nzLower;
AjLo[0] = (PetscInt) 0;
AALo[0] = (MatScalar) 1.0;
v = aa;
vi = aj;
offset = 1;
rowOffset= 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
/* additional 1 for the term on the diagonal */
AiLo[i] = rowOffset;
rowOffset += nz+1;
ierr = PetscArraycpy(&(AjLo[offset]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AALo[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
AjLo[offset] = (PetscInt) i;
AALo[offset] = (MatScalar) 1.0;
offset += 1;
v += nz;
vi += nz;
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = n;
loTriFactor->csrMat->num_cols = n;
loTriFactor->csrMat->num_entries = nzLower;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower);
loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower);
loTriFactor->csrMat->values = new THRUSTARRAY(nzLower);
loTriFactor->csrMat->values->assign(AALo, AALo+nzLower);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
loTriFactor->AA_h = AALo;
cerr = cudaFreeHost(AiLo);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjLo);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzLower)*sizeof(int)+nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
} else { /* update values only */
if (!loTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &loTriFactor->AA_h, nzLower*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the lower triangular matrix */
loTriFactor->AA_h[0] = 1.0;
v = aa;
vi = aj;
offset = 1;
for (i=1; i<n; i++) {
nz = ai[i+1] - ai[i];
ierr = PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz);CHKERRQ(ierr);
offset += nz;
loTriFactor->AA_h[offset] = 1.0;
offset += 1;
v += nz;
}
loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h+nzLower);
ierr = PetscLogCpuToGpu(nzLower*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt n = A->rmap->n;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
const PetscInt *aj = a->j,*adiag = a->diag,*vi;
const MatScalar *aa = a->a,*v;
PetscInt *AiUp, *AjUp;
PetscInt i,nz, nzUpper, offset;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
/* next, figure out the number of nonzeros in the upper triangular matrix. */
nzUpper = adiag[0]-adiag[n];
if (!upTriFactor) {
PetscScalar *AAUp;
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
vi = aj + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1./v[nz];
AiUp[i] = AiUp[i+1] - (nz+1);
ierr = PetscArraycpy(&(AjUp[offset+1]), vi, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset+1]), v, nz);CHKERRQ(ierr);
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = n;
upTriFactor->csrMat->num_cols = n;
upTriFactor->csrMat->num_entries = nzUpper;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper);
upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper);
upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
upTriFactor->AA_h = AAUp;
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
ierr = PetscLogCpuToGpu((n+1+nzUpper)*sizeof(int)+nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
} else {
if (!upTriFactor->AA_h) {
cerr = cudaMallocHost((void**) &upTriFactor->AA_h, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
/* Fill the upper triangular matrix */
offset = nzUpper;
for (i=n-1; i>=0; i--) {
v = aa + adiag[i+1] + 1;
/* number of elements NOT on the diagonal */
nz = adiag[i] - adiag[i+1]-1;
/* decrement the offset */
offset -= (nz+1);
/* first, set the diagonal elements */
upTriFactor->AA_h[offset] = 1./v[nz];
ierr = PetscArraycpy(&(upTriFactor->AA_h[offset+1]), v, nz);CHKERRQ(ierr);
}
upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h+nzUpper);
ierr = PetscLogCpuToGpu(nzUpper*sizeof(PetscScalar));CHKERRQ(ierr);
}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS isrow = a->row,iscol = a->icol;
PetscBool row_identity,col_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=a->nz;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
if (!row_identity && !cusparseTriFactors->rpermIndices) {
const PetscInt *r;
ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(r, r+n);
ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
/* upper triangular indices */
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (!col_identity && !cusparseTriFactors->cpermIndices) {
const PetscInt *c;
ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(c, c+n);
ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
cusparseStatus_t stat;
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt *AiUp, *AjUp;
PetscScalar *AAUp;
PetscScalar *AALo;
PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j;
Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data;
const PetscInt *ai = b->i,*aj = b->j,*vj;
const MatScalar *aa = b->a,*v;
PetscFunctionBegin;
if (!n) PetscFunctionReturn(0);
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
try {
cerr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(cerr);
if (!upTriFactor && !loTriFactor) {
/* Allocate Space for the upper triangular matrix */
cerr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(cerr);
cerr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(cerr);
/* Fill the upper triangular matrix */
AiUp[0]=(PetscInt) 0;
AiUp[n]=nzUpper;
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
vj = aj + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AjUp[offset] = (PetscInt) i;
AAUp[offset] = (MatScalar)1.0/v[nz];
AiUp[i] = offset;
AALo[offset] = (MatScalar)1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AjUp[offset]), vj, nz);CHKERRQ(ierr);
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
/* allocate space for the triangular factor information */
ierr = PetscNew(&upTriFactor);CHKERRQ(ierr);
upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSPARSE(stat);
/* set the matrix */
upTriFactor->csrMat = new CsrMatrix;
upTriFactor->csrMat->num_rows = A->rmap->n;
upTriFactor->csrMat->num_cols = A->cmap->n;
upTriFactor->csrMat->num_entries = a->nz;
upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
upTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
/* set the operation */
upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo,
&upTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->solveBuffer,upTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor;
/* allocate space for the triangular factor information */
ierr = PetscNew(&loTriFactor);CHKERRQ(ierr);
loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSPARSE(stat);
#endif
stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE;
/* set the matrix */
loTriFactor->csrMat = new CsrMatrix;
loTriFactor->csrMat->num_rows = A->rmap->n;
loTriFactor->csrMat->num_cols = A->cmap->n;
loTriFactor->csrMat->num_entries = a->nz;
loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1);
loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz);
loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz);
loTriFactor->csrMat->values = new THRUSTARRAY(a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactor->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo,
&loTriFactor->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->solveBuffer,loTriFactor->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor;
ierr = PetscLogCpuToGpu(2*(((A->rmap->n+1)+(a->nz))*sizeof(int)+(a->nz)*sizeof(PetscScalar)));CHKERRQ(ierr);
cerr = cudaFreeHost(AiUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AjUp);CHKERRCUDA(cerr);
} else {
/* Fill the upper triangular matrix */
offset = 0;
for (i=0; i<n; i++) {
/* set the pointers */
v = aa + ai[i];
nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */
/* first, set the diagonal elements */
AAUp[offset] = 1.0/v[nz];
AALo[offset] = 1.0/v[nz];
offset+=1;
if (nz>0) {
ierr = PetscArraycpy(&(AAUp[offset]), v, nz);CHKERRQ(ierr);
for (j=offset; j<offset+nz; j++) {
AAUp[j] = -AAUp[j];
AALo[j] = AAUp[j]/v[nz];
}
offset+=nz;
}
}
if (!upTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
if (!loTriFactor) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz);
loTriFactor->csrMat->values->assign(AALo, AALo+a->nz);
ierr = PetscLogCpuToGpu(2*(a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
}
cerr = cudaFreeHost(AAUp);CHKERRCUDA(cerr);
cerr = cudaFreeHost(AALo);CHKERRCUDA(cerr);
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
IS ip = a->row;
PetscBool perm_identity;
PetscInt n = A->rmap->n;
PetscFunctionBegin;
if (!cusparseTriFactors) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cusparseTriFactors");
ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr);
if (!cusparseTriFactors->workVector) { cusparseTriFactors->workVector = new THRUSTARRAY(n); }
cusparseTriFactors->nnz=(a->nz-n)*2 + n;
A->offloadmask = PETSC_OFFLOAD_BOTH;
/* lower triangular indices */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (!perm_identity) {
IS iip;
const PetscInt *irip,*rip;
ierr = ISInvertPermutation(ip,PETSC_DECIDE,&iip);CHKERRQ(ierr);
ierr = ISGetIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr);
cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->rpermIndices->assign(rip, rip+n);
cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n);
cusparseTriFactors->cpermIndices->assign(irip, irip+n);
ierr = ISRestoreIndices(iip,&irip);CHKERRQ(ierr);
ierr = ISDestroy(&iip);CHKERRQ(ierr);
ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr);
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS isrow = b->row,iscol = b->col;
PetscBool row_identity,col_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr);
ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr);
if (row_identity && col_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info)
{
Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data;
IS ip = b->row;
PetscBool perm_identity;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr);
B->offloadmask = PETSC_OFFLOAD_CPU;
/* determine which version of MatSolve needs to be used. */
ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr);
if (perm_identity) {
B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
} else {
B->ops->solve = MatSolve_SeqAIJCUSPARSE;
B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE;
B->ops->matsolve = NULL;
B->ops->matsolvetranspose = NULL;
}
/* get the triangular factors */
ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A)
{
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cusparseMatrixType_t matrixType;
cusparseFillMode_t fillMode;
cusparseDiagType_t diagType;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
/* allocate space for the transpose of the lower triangular factor */
ierr = PetscNew(&loTriFactorT);CHKERRQ(ierr);
loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the lower triangular factor */
matrixType = cusparseGetMatType(loTriFactor->descr);
indexBase = cusparseGetMatIndexBase(loTriFactor->descr);
fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(loTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the lower triangular factor*/
loTriFactorT->csrMat = new CsrMatrix;
loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols;
loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows;
loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries;
loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows+1);
loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries);
loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries);
/* compute the transpose of the lower triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactor->csr2cscBuffer,loTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows,
loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer
#else
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&loTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo,
&loTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&loTriFactorT->solveBuffer,loTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT;
/*********************************************/
/* Now the Transpose of the Upper Tri Factor */
/*********************************************/
/* allocate space for the transpose of the upper triangular factor */
ierr = PetscNew(&upTriFactorT);CHKERRQ(ierr);
upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
/* set the matrix descriptors of the upper triangular factor */
matrixType = cusparseGetMatType(upTriFactor->descr);
indexBase = cusparseGetMatIndexBase(upTriFactor->descr);
fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ?
CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER;
diagType = cusparseGetMatDiagType(upTriFactor->descr);
/* Create the matrix description */
stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSPARSE(stat);
stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSPARSE(stat);
stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSPARSE(stat);
/* set the operation */
upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE;
/* allocate GPU space for the CSC of the upper triangular factor*/
upTriFactorT->csrMat = new CsrMatrix;
upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols;
upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows;
upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries;
upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows+1);
upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries);
upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries);
/* compute the transpose of the upper triangular factor, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle,upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactor->csr2cscBuffer,upTriFactor->csr2cscBufferSize);CHKERRCUDA(cerr);
#endif
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows,
upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactorT->csrMat->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC, indexBase,
CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer
#else
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* Create the solve analysis information */
ierr = PetscLogEventBegin(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_create_analysis_info(&upTriFactorT->solveInfo);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
stat = cusparse_get_svbuffsize(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo,
&upTriFactorT->solveBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&upTriFactorT->solveBuffer,upTriFactorT->solveBufferSize);CHKERRCUDA(cerr);
#endif
/* perform the solve analysis */
stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(MAT_CUSPARSESolveAnalysis,A,0,0,0);CHKERRQ(ierr);
/* assign the pointer */
((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
cusparseStatus_t stat;
cusparseIndexBase_t indexBase;
cudaError_t err;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!cusparsestruct->transgen || cusparsestruct->matTranspose || !A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* create cusparse matrix */
matstructT = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSPARSE(stat);
indexBase = cusparseGetMatIndexBase(matstruct->descr);
stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
/* set alpha and beta */
err = cudaMalloc((void **)&(matstructT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstructT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
CsrMatrix *matrix = (CsrMatrix*)matstruct->mat;
CsrMatrix *matrixT= new CsrMatrix;
matrixT->num_rows = A->cmap->n;
matrixT->num_cols = A->rmap->n;
matrixT->num_entries = a->nz;
matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows+1);
matrixT->column_indices = new THRUSTINTARRAY32(a->nz);
matrixT->values = new THRUSTARRAY(a->nz);
if (!cusparsestruct->rowoffsets_gpu) { cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n+1); }
cusparsestruct->rowoffsets_gpu->assign(a->i,a->i+A->rmap->n+1);
/* compute the transpose, i.e. the CSC */
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, &cusparsestruct->csr2cscBufferSize);CHKERRCUSPARSE(stat);
err = cudaMalloc(&cusparsestruct->csr2cscBuffer,cusparsestruct->csr2cscBufferSize);CHKERRCUDA(err);
#endif
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, cusparsestruct->csr2cscBuffer
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
matstructT->mat = matrixT;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&matstructT->matDescr,
matrixT->num_rows, matrixT->num_cols, matrixT->num_entries,
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(),
matrixT->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */
indexBase,cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *temp = new CsrMatrix;
CsrMatrix *tempT = new CsrMatrix;
/* First convert HYB to CSR */
temp->num_rows = A->rmap->n;
temp->num_cols = A->cmap->n;
temp->num_entries = a->nz;
temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
temp->column_indices = new THRUSTINTARRAY32(a->nz);
temp->values = new THRUSTARRAY(a->nz);
stat = cusparse_hyb2csr(cusparsestruct->handle,
matstruct->descr, (cusparseHybMat_t)matstruct->mat,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get());CHKERRCUSPARSE(stat);
/* Next, convert CSR to CSC (i.e. the matrix transpose) */
tempT->num_rows = A->rmap->n;
tempT->num_cols = A->cmap->n;
tempT->num_entries = a->nz;
tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1);
tempT->column_indices = new THRUSTINTARRAY32(a->nz);
tempT->values = new THRUSTARRAY(a->nz);
stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows,
temp->num_cols, temp->num_entries,
temp->values->data().get(),
temp->row_offsets->data().get(),
temp->column_indices->data().get(),
tempT->values->data().get(),
tempT->column_indices->data().get(),
tempT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSPARSE(stat);
/* Last, convert CSC to HYB */
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n,
matstructT->descr, tempT->values->data().get(),
tempT->row_offsets->data().get(),
tempT->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstructT->mat = hybMat;
/* delete temporaries */
if (tempT) {
if (tempT->values) delete (THRUSTARRAY*) tempT->values;
if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices;
if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets;
delete (CsrMatrix*) tempT;
}
if (temp) {
if (temp->values) delete (THRUSTARRAY*) temp->values;
if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices;
if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets;
delete (CsrMatrix*) temp;
}
#endif
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
/* the compressed row indices is not used for matTranspose */
matstructT->cprowIndices = NULL;
/* assign the pointer */
((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT;
PetscFunctionReturn(0);
}
/* Why do we need to analyze the tranposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
PetscInt n = xx->map->n;
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()),
xGPU);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */
thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()),
tempGPU->begin());
/* Copy the temporary to the full solution. */
thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Analyze the matrix and create the transpose ... on the fly */
if (!loTriFactorT && !upTriFactorT) {
ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr);
loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose;
upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose;
}
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp,
upTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactorT->descr,
upTriFactorT->csrMat->values->data().get(),
upTriFactorT->csrMat->row_offsets->data().get(),
upTriFactorT->csrMat->column_indices->data().get(),
upTriFactorT->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactorT->solvePolicy, upTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp,
loTriFactorT->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactorT->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactorT->descr,
loTriFactorT->csrMat->values->data().get(),
loTriFactorT->csrMat->row_offsets->data().get(),
loTriFactorT->csrMat->column_indices->data().get(),
loTriFactorT->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactorT->solvePolicy, loTriFactorT->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* restore */
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
thrust::device_ptr<const PetscScalar> bGPU;
thrust::device_ptr<PetscScalar> xGPU;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
xGPU = thrust::device_pointer_cast(xarray);
bGPU = thrust::device_pointer_cast(barray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, reorder with the row permutation */
thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()),
thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()),
tempGPU->begin());
/* Next, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Then, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
xarray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Last, reorder with the column permutation */
thrust::copy(thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()),
thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()),
xGPU);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx)
{
const PetscScalar *barray;
PetscScalar *xarray;
cusparseStatus_t stat;
Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr;
Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr;
Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr;
THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
/* Get the GPU pointers */
ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
/* First, solve L */
stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp,
loTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
loTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, loTriFactor->descr,
loTriFactor->csrMat->values->data().get(),
loTriFactor->csrMat->row_offsets->data().get(),
loTriFactor->csrMat->column_indices->data().get(),
loTriFactor->solveInfo,
barray, tempGPU->data().get()
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,loTriFactor->solvePolicy, loTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
/* Next, solve U */
stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp,
upTriFactor->csrMat->num_rows,
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
upTriFactor->csrMat->num_entries,
#endif
&PETSC_CUSPARSE_ONE, upTriFactor->descr,
upTriFactor->csrMat->values->data().get(),
upTriFactor->csrMat->row_offsets->data().get(),
upTriFactor->csrMat->column_indices->data().get(),
upTriFactor->solveInfo,
tempGPU->data().get(), xarray
#if PETSC_PKG_CUDA_VERSION_GE(9,0,0)
,upTriFactor->solvePolicy, upTriFactor->solveBuffer
#endif
);CHKERRCUSPARSE(stat);
ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr);
ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
CsrMatrix *matrix = (CsrMatrix*)cusp->mat->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
cerr = cudaMemcpy(a->a, matrix->values->data().get(), a->nz*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuToCpu(a->nz*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A,PetscScalar *array[])
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
*array = a->a;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A)
{
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt m = A->rmap->n,*ii,*ridx,tmp;
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscBool both = PETSC_TRUE;
cudaError_t err;
PetscFunctionBegin;
if (A->boundtocpu) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot copy to GPU");
if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) {
if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) {
/* Copy values only */
CsrMatrix *matrix,*matrixT;
matrix = (CsrMatrix*)cusparsestruct->mat->mat;
if (a->nz && !a->a) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR values");
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
matrix->values->assign(a->a, a->a+a->nz);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogCpuToGpu((a->nz)*sizeof(PetscScalar));CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
/* Update matT when it was built before */
if (cusparsestruct->matTranspose) {
cusparseIndexBase_t indexBase = cusparseGetMatIndexBase(cusparsestruct->mat->descr);
matrixT = (CsrMatrix*)cusparsestruct->matTranspose->mat;
ierr = PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n,
A->cmap->n, matrix->num_entries,
matrix->values->data().get(),
cusparsestruct->rowoffsets_gpu->data().get(),
matrix->column_indices->data().get(),
matrixT->values->data().get(),
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype,
CUSPARSE_ACTION_NUMERIC,indexBase,
cusparsestruct->csr2cscAlg, cusparsestruct->csr2cscBuffer
#else
matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(),
CUSPARSE_ACTION_NUMERIC, indexBase
#endif
);CHKERRCUSPARSE(stat);
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose,A,0,0,0);CHKERRQ(ierr);
}
} else {
PetscInt nnz;
ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat,cusparsestruct->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->matTranspose,cusparsestruct->format);CHKERRQ(ierr);
delete cusparsestruct->workVector;
delete cusparsestruct->rowoffsets_gpu;
try {
if (a->compressedrow.use) {
m = a->compressedrow.nrows;
ii = a->compressedrow.i;
ridx = a->compressedrow.rindex;
} else {
m = A->rmap->n;
ii = a->i;
ridx = NULL;
}
if (!ii) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR row data");
if (m && !a->j) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing CSR column data");
if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; }
else nnz = a->nz;
/* create cusparse matrix */
cusparsestruct->nrows = m;
matstruct = new Mat_SeqAIJCUSPARSEMultStruct;
stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
err = cudaMalloc((void **)&(matstruct->alpha_one),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_zero),sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar));CHKERRCUDA(err);
err = cudaMemcpy(matstruct->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
err = cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err);
stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* Build a hybrid/ellpack matrix if this option is chosen for the storage */
if (cusparsestruct->format==MAT_CUSPARSE_CSR) {
/* set the matrix */
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
/* assign the pointer */
matstruct->mat = mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mat->num_rows) { /* cusparse errors on empty matrices! */
stat = cusparseCreateCsr(&matstruct->matDescr,
mat->num_rows, mat->num_cols, mat->num_entries,
mat->row_offsets->data().get(), mat->column_indices->data().get(),
mat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
#endif
} else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
CsrMatrix *mat= new CsrMatrix;
mat->num_rows = m;
mat->num_cols = A->cmap->n;
mat->num_entries = nnz;
mat->row_offsets = new THRUSTINTARRAY32(m+1);
mat->row_offsets->assign(ii, ii + m+1);
mat->column_indices = new THRUSTINTARRAY32(nnz);
mat->column_indices->assign(a->j, a->j+nnz);
mat->values = new THRUSTARRAY(nnz);
if (a->a) mat->values->assign(a->a, a->a+nnz);
cusparseHybMat_t hybMat;
stat = cusparseCreateHybMat(&hybMat);CHKERRCUSPARSE(stat);
cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ?
CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO;
stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols,
matstruct->descr, mat->values->data().get(),
mat->row_offsets->data().get(),
mat->column_indices->data().get(),
hybMat, 0, partition);CHKERRCUSPARSE(stat);
/* assign the pointer */
matstruct->mat = hybMat;
if (mat) {
if (mat->values) delete (THRUSTARRAY*)mat->values;
if (mat->column_indices) delete (THRUSTINTARRAY32*)mat->column_indices;
if (mat->row_offsets) delete (THRUSTINTARRAY32*)mat->row_offsets;
delete (CsrMatrix*)mat;
}
#endif
}
/* assign the compressed row indices */
if (a->compressedrow.use) {
cusparsestruct->workVector = new THRUSTARRAY(m);
matstruct->cprowIndices = new THRUSTINTARRAY(m);
matstruct->cprowIndices->assign(ridx,ridx+m);
tmp = m;
} else {
cusparsestruct->workVector = NULL;
matstruct->cprowIndices = NULL;
tmp = 0;
}
ierr = PetscLogCpuToGpu(((m+1)+(a->nz))*sizeof(int)+tmp*sizeof(PetscInt)+(3+(a->nz))*sizeof(PetscScalar));CHKERRQ(ierr);
/* assign the pointer */
cusparsestruct->mat = matstruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
err = WaitForCUDA();CHKERRCUDA(err);
ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr);
cusparsestruct->nonzerostate = A->nonzerostate;
}
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
struct VecCUDAPlusEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t);
}
};
struct VecCUDAEquals
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<1>(t) = thrust::get<0>(t);
}
};
struct VecCUDAEqualsReverse
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = thrust::get<1>(t);
}
};
struct MatMatCusparse {
PetscBool cisdense;
PetscScalar *Bt;
Mat X;
PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */
PetscLogDouble flops;
CsrMatrix *Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t matSpBDescr;
PetscBool initialized; /* C = alpha op(A) op(B) + beta C */
cusparseDnMatDescr_t matBDescr;
cusparseDnMatDescr_t matCDescr;
PetscInt Blda,Clda; /* Record leading dimensions of B and C here to detect changes*/
size_t mmBufferSize;
void *mmBuffer;
void *mmBuffer2; /* SpGEMM WorkEstimation buffer */
cusparseSpGEMMDescr_t spgemmDesc;
#endif
};
static PetscErrorCode MatDestroy_MatMatCusparse(void *data)
{
PetscErrorCode ierr;
MatMatCusparse *mmdata = (MatMatCusparse *)data;
cudaError_t cerr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseStatus_t stat;
#endif
PetscFunctionBegin;
cerr = cudaFree(mmdata->Bt);CHKERRCUDA(cerr);
delete mmdata->Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (mmdata->matSpBDescr) { stat = cusparseDestroySpMat(mmdata->matSpBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->mmBuffer) { cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr); }
if (mmdata->mmBuffer2) { cerr = cudaFree(mmdata->mmBuffer2);CHKERRCUDA(cerr); }
if (mmdata->matBDescr) { stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); }
if (mmdata->matCDescr) { stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); }
if (mmdata->spgemmDesc) { stat = cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc);CHKERRCUSPARSE(stat); }
#endif
ierr = MatDestroy(&mmdata->X);CHKERRQ(ierr);
ierr = PetscFree(data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat,Mat,Mat,PetscBool,PetscBool);
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n,blda,clda;
PetscBool flg,biscuda;
Mat_SeqAIJCUSPARSE *cusp;
cusparseStatus_t stat;
cusparseOperation_t opA;
const PetscScalar *barray;
PetscScalar *carray;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSEMultStruct *mat;
CsrMatrix *csrmat;
cudaError_t cerr;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
mmdata = (MatMatCusparse*)product->data;
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
/* currently CopyToGpu does not copy if the matrix is bound to CPU
Instead of silently accepting the wrong answer, I prefer to raise the error */
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_PtAP:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
if (!cusp->transgen) {
mat = cusp->mat;
opA = CUSPARSE_OPERATION_TRANSPOSE;
} else {
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
mat = cusp->matTranspose;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
}
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
case MATPRODUCT_RARt:
mat = cusp->mat;
opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
m = A->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
if (!mat) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing Mat_SeqAIJCUSPARSEMultStruct");
csrmat = (CsrMatrix*)mat->mat;
/* if the user passed a CPU matrix, copy the data to the GPU */
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr);
if (!biscuda) {ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);}
ierr = MatDenseCUDAGetArrayRead(B,&barray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDAGetArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(mmdata->X,&clda);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(C,&carray);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
/* (re)allcoate mmBuffer if not initialized or LDAs are different */
if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) {
size_t mmBufferSize;
if (mmdata->initialized && mmdata->Blda != blda) {stat = cusparseDestroyDnMat(mmdata->matBDescr);CHKERRCUSPARSE(stat); mmdata->matBDescr = NULL;}
if (!mmdata->matBDescr) {
stat = cusparseCreateDnMat(&mmdata->matBDescr,B->rmap->n,B->cmap->n,blda,(void*)barray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Blda = blda;
}
if (mmdata->initialized && mmdata->Clda != clda) {stat = cusparseDestroyDnMat(mmdata->matCDescr);CHKERRCUSPARSE(stat); mmdata->matCDescr = NULL;}
if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */
stat = cusparseCreateDnMat(&mmdata->matCDescr,m,n,clda,(void*)carray,cusparse_scalartype,CUSPARSE_ORDER_COL);CHKERRCUSPARSE(stat);
mmdata->Clda = clda;
}
if (!mat->matDescr) {
stat = cusparseCreateCsr(&mat->matDescr,
csrmat->num_rows, csrmat->num_cols, csrmat->num_entries,
csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(),
csrmat->values->data().get(),
CUSPARSE_INDEX_32I,CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */
CUSPARSE_INDEX_BASE_ZERO,cusparse_scalartype);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMM_bufferSize(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,&mmBufferSize);CHKERRCUSPARSE(stat);
if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) {
cerr = cudaFree(mmdata->mmBuffer);CHKERRCUDA(cerr);
cerr = cudaMalloc(&mmdata->mmBuffer,mmBufferSize);CHKERRCUDA(cerr);
mmdata->mmBufferSize = mmBufferSize;
}
mmdata->initialized = PETSC_TRUE;
} else {
/* to be safe, always update pointers of the mats */
stat = cusparseSpMatSetValues(mat->matDescr,csrmat->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matBDescr,(void*)barray);CHKERRCUSPARSE(stat);
stat = cusparseDnMatSetValues(mmdata->matCDescr,(void*)carray);CHKERRCUSPARSE(stat);
}
/* do cusparseSpMM, which supports transpose on B */
stat = cusparseSpMM(cusp->handle,opA,opB,mat->alpha_one,
mat->matDescr,mmdata->matBDescr,mat->beta_zero,
mmdata->matCDescr,cusparse_scalartype,
cusp->spmmAlg,mmdata->mmBuffer);CHKERRCUSPARSE(stat);
#else
PetscInt k;
/* cusparseXcsrmm does not support transpose on B */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cublasHandle_t cublasv2handle;
cublasStatus_t cerr;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
cerr = cublasXgeam(cublasv2handle,CUBLAS_OP_T,CUBLAS_OP_T,
B->cmap->n,B->rmap->n,
&PETSC_CUSPARSE_ONE ,barray,blda,
&PETSC_CUSPARSE_ZERO,barray,blda,
mmdata->Bt,B->cmap->n);CHKERRCUBLAS(cerr);
blda = B->cmap->n;
k = B->cmap->n;
} else {
k = B->rmap->n;
}
/* perform the MatMat operation, op(A) is m x k, op(B) is k x n */
stat = cusparse_csr_spmm(cusp->handle,opA,m,n,k,
csrmat->num_entries,mat->alpha_one,mat->descr,
csrmat->values->data().get(),
csrmat->row_offsets->data().get(),
csrmat->column_indices->data().get(),
mmdata->Bt ? mmdata->Bt : barray,blda,mat->beta_zero,
carray,clda);CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(n*2.0*csrmat->num_entries);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&barray);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
} else if (product->type == MATPRODUCT_PtAP) {
ierr = MatDenseCUDARestoreArrayWrite(mmdata->X,&carray);CHKERRQ(ierr);
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(B,mmdata->X,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(C,&carray);CHKERRQ(ierr);
}
if (mmdata->cisdense) {
ierr = MatConvert(C,MATSEQDENSE,MAT_INPLACE_MATRIX,&C);CHKERRQ(ierr);
}
if (!biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
PetscInt m,n;
PetscBool cisdense,flg;
PetscErrorCode ierr;
MatMatCusparse *mmdata;
Mat_SeqAIJCUSPARSE *cusp;
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (cusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
switch (product->type) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
break;
case MATPRODUCT_PtAP:
m = B->cmap->n;
n = B->cmap->n;
break;
case MATPRODUCT_RARt:
m = B->rmap->n;
n = B->rmap->n;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
/* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQDENSE,&cisdense);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQDENSECUDA);CHKERRQ(ierr);
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
mmdata->cisdense = cisdense;
#if PETSC_PKG_CUDA_VERSION_LT(11,0,0)
/* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */
if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) {
cudaError_t cerr = cudaMalloc((void**)&mmdata->Bt,(size_t)B->rmap->n*(size_t)B->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
}
#endif
/* for these products we need intermediate storage */
if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) {
ierr = MatCreate(PetscObjectComm((PetscObject)C),&mmdata->X);CHKERRQ(ierr);
ierr = MatSetType(mmdata->X,MATSEQDENSECUDA);CHKERRQ(ierr);
if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->rmap->n,A->rmap->n,B->rmap->n);CHKERRQ(ierr);
} else {
ierr = MatSetSizes(mmdata->X,A->rmap->n,B->cmap->n,A->rmap->n,B->cmap->n);CHKERRQ(ierr);
}
}
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cusparseSpMatDescr_t BmatSpDescr;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
ierr = PetscObjectTypeCompare((PetscObject)C,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for C of type %s",((PetscObject)C)->type_name);
mmdata = (MatMatCusparse*)C->product->data;
A = product->A;
B = product->B;
if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */
mmdata->reusesym = PETSC_FALSE;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
Cmat = Ccusp->mat;
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[C->product->type]);
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
goto finalize;
}
if (!c->nz) goto finalize;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
if (A->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
if (B->boundtocpu) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONG,"Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases");
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Ccusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
switch (ptype) {
case MATPRODUCT_AB:
Amat = Acusp->mat;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_AtB:
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
break;
case MATPRODUCT_ABt:
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
Cmat = Ccusp->mat;
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
if (!Cmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix*)Bmat->mat; /* B may be in compressed row storage */
Ccsr = (CsrMatrix*)Cmat->mat;
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
if (!Ccsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing C CSR struct");
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
C->offloadmask = PETSC_OFFLOAD_GPU;
finalize:
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(C,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",C->rmap->n,C->cmap->n,c->nz);CHKERRQ(ierr);
ierr = PetscInfo(C,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(C,"Maximum nonzeros in any row is %D\n",c->rmax);CHKERRQ(ierr);
c->reallocs = 0;
C->info.mallocs += 0;
C->info.nz_unneeded = 0;
C->assembled = C->was_assembled = PETSC_TRUE;
C->num_ass++;
PetscFunctionReturn(0);
}
static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C)
{
Mat_Product *product = C->product;
Mat A,B;
Mat_SeqAIJCUSPARSE *Acusp,*Bcusp,*Ccusp;
Mat_SeqAIJ *a,*b,*c;
Mat_SeqAIJCUSPARSEMultStruct *Amat,*Bmat,*Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt i,j,m,n,k;
PetscBool flg;
PetscErrorCode ierr;
cusparseStatus_t stat;
cudaError_t cerr;
MatProductType ptype;
MatMatCusparse *mmdata;
PetscLogDouble flops;
PetscBool biscompressed,ciscompressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
int64_t C_num_rows1, C_num_cols1, C_nnz1;
size_t bufSize2;
cusparseSpMatDescr_t BmatSpDescr;
#else
int cnz;
#endif
PetscFunctionBegin;
MatCheckProduct(C,1);
if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
A = product->A;
B = product->B;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for type %s",((PetscObject)A)->type_name);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJCUSPARSE,&flg);CHKERRQ(ierr);
if (!flg) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Not for B of type %s",((PetscObject)B)->type_name);
a = (Mat_SeqAIJ*)A->data;
b = (Mat_SeqAIJ*)B->data;
Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr;
if (Acusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
if (Bcusp->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Only for MAT_CUSPARSE_CSR format");
/* product data */
ierr = PetscNew(&mmdata);CHKERRQ(ierr);
C->product->data = mmdata;
C->product->destroy = MatDestroy_MatMatCusparse;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ptype = product->type;
if (A->symmetric && ptype == MATPRODUCT_AtB) ptype = MATPRODUCT_AB;
if (B->symmetric && ptype == MATPRODUCT_ABt) ptype = MATPRODUCT_AB;
biscompressed = PETSC_FALSE;
ciscompressed = PETSC_FALSE;
switch (ptype) {
case MATPRODUCT_AB:
m = A->rmap->n;
n = B->cmap->n;
k = A->cmap->n;
Amat = Acusp->mat;
Bmat = Bcusp->mat;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_AtB:
m = A->cmap->n;
n = B->cmap->n;
k = A->rmap->n;
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
Amat = Acusp->matTranspose;
Bmat = Bcusp->mat;
if (b->compressedrow.use) biscompressed = PETSC_TRUE;
break;
case MATPRODUCT_ABt:
m = A->rmap->n;
n = B->rmap->n;
k = A->cmap->n;
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(B);CHKERRQ(ierr);
Amat = Acusp->mat;
Bmat = Bcusp->matTranspose;
if (a->compressedrow.use) ciscompressed = PETSC_TRUE;
break;
default:
SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Unsupported product type %s",MatProductTypes[product->type]);
}
/* create cusparse matrix */
ierr = MatSetSizes(C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)C->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)C->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
c->compressedrow.use = ciscompressed;
if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */
c->compressedrow.nrows = a->compressedrow.nrows;
ierr = PetscMalloc2(c->compressedrow.nrows+1,&c->compressedrow.i,c->compressedrow.nrows,&c->compressedrow.rindex);CHKERRQ(ierr);
ierr = PetscArraycpy(c->compressedrow.rindex,a->compressedrow.rindex,c->compressedrow.nrows);CHKERRQ(ierr);
Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows);
Cmat->cprowIndices->assign(c->compressedrow.rindex,c->compressedrow.rindex + c->compressedrow.nrows);
} else {
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Cmat->cprowIndices = NULL;
}
Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = Ccusp->nrows;
Ccsr->num_cols = n;
Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows+1);
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */
thrust::fill(thrust::device,Ccsr->row_offsets->begin(),Ccsr->row_offsets->end(),0);
c->nz = 0;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
goto finalizesym;
}
if (!Amat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A mult struct for product type %s",MatProductTypes[ptype]);
if (!Bmat) SETERRQ1(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B mult struct for product type %s",MatProductTypes[ptype]);
Acsr = (CsrMatrix*)Amat->mat;
if (!biscompressed) {
Bcsr = (CsrMatrix*)Bmat->mat;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
BmatSpDescr = Bmat->matDescr;
#endif
} else { /* we need to use row offsets for the full matrix */
CsrMatrix *cBcsr = (CsrMatrix*)Bmat->mat;
Bcsr = new CsrMatrix;
Bcsr->num_rows = B->rmap->n;
Bcsr->num_cols = cBcsr->num_cols;
Bcsr->num_entries = cBcsr->num_entries;
Bcsr->column_indices = cBcsr->column_indices;
Bcsr->values = cBcsr->values;
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Bcsr->row_offsets = Bcusp->rowoffsets_gpu;
mmdata->Bcsr = Bcsr;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (Bcsr->num_rows && Bcsr->num_cols) {
stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries,
Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Bcsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
}
BmatSpDescr = mmdata->matSpBDescr;
#endif
}
if (!Acsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing A CSR struct");
if (!Bcsr) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Missing B CSR struct");
/* precompute flops count */
if (ptype == MATPRODUCT_AB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt st = a->i[i];
const PetscInt en = a->i[i+1];
for (j=st; j<en; j++) {
const PetscInt brow = a->j[j];
flops += 2.*(b->i[brow+1] - b->i[brow]);
}
}
} else if (ptype == MATPRODUCT_AtB) {
for (i=0, flops = 0; i<A->rmap->n; i++) {
const PetscInt anzi = a->i[i+1] - a->i[i];
const PetscInt bnzi = b->i[i+1] - b->i[i];
flops += (2.*anzi)*bnzi;
}
} else { /* TODO */
flops = 0.;
}
mmdata->flops = flops;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0,
NULL, NULL, NULL,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_createDescr(&mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
/* ask bufferSize bytes for external memory */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, NULL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void**) &mmdata->mmBuffer2, bufSize2);CHKERRCUDA(cerr);
/* inspect the matrices A and B to understand the memory requirement for the next step */
stat = cusparseSpGEMM_workEstimation(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2);CHKERRCUSPARSE(stat);
/* ask bufferSize again bytes for external memory */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL);CHKERRCUSPARSE(stat);
/* The CUSPARSE documentation is not clear, nor the API
We need both buffers to perform the operations properly!
mmdata->mmBuffer2 does not appear anywhere in the compute/copy API
it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address
is stored in the descriptor! What a messy API... */
cerr = cudaMalloc((void**) &mmdata->mmBuffer, mmdata->mmBufferSize);CHKERRCUDA(cerr);
/* compute the intermediate product of A * B */
stat = cusparseSpGEMM_compute(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT,
mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer);CHKERRCUSPARSE(stat);
/* get matrix C non-zero entries C_nnz1 */
stat = cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1);CHKERRCUSPARSE(stat);
c->nz = (PetscInt) C_nnz1;
ierr = PetscInfo9(C,"Buffer sizes for type %s, result %D x %D (k %D, nzA %D, nzB %D, nzC %D) are: %ldKB %ldKB\n",MatProductTypes[ptype],m,n,k,a->nz,b->nz,c->nz,bufSize2/1024,mmdata->mmBufferSize/1024);CHKERRQ(ierr);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(),
Ccsr->values->data().get());CHKERRCUSPARSE(stat);
stat = cusparseSpGEMM_copy(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr,
cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc);CHKERRCUSPARSE(stat);
#else
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
stat = cusparseXcsrgemmNnz(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->row_offsets->data().get(), &cnz);CHKERRCUSPARSE(stat);
c->nz = cnz;
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
Ccsr->values = new THRUSTARRAY(c->nz);
CHKERRCUDA(cudaPeekAtLastError()); /* catch out of memory errors */
stat = cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
/* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only.
I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when
D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */
stat = cusparse_csr_spgemm(Ccusp->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols,
Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(),
Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(),
Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get());CHKERRCUSPARSE(stat);
#endif
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(mmdata->flops);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalizesym:
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
PetscInt *d_i = c->i;
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
PetscInt *d_i = c->i;
if (ciscompressed) d_i = c->compressedrow.i;
cerr = cudaMemcpy(d_i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (ciscompressed) { /* need to expand host row offsets */
PetscInt r = 0;
c->i[0] = 0;
for (k = 0; k < c->compressedrow.nrows; k++) {
const PetscInt next = c->compressedrow.rindex[k];
const PetscInt old = c->compressedrow.i[k];
for (; r < next; r++) c->i[r+1] = old;
}
for (; r < m; r++) c->i[r+1] = c->compressedrow.i[c->compressedrow.nrows];
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (k = 0; k < m; k++) {
const PetscInt nn = c->i[k+1] - c->i[k];
c->ilen[k] = c->imax[k] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
Ccsr->num_entries = c->nz;
C->nonzerostate++;
ierr = PetscLayoutSetUp(C->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(C->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = C->nonzerostate;
C->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
C->preallocated = PETSC_TRUE;
C->assembled = PETSC_FALSE;
C->was_assembled = PETSC_FALSE;
if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */
mmdata->reusesym = PETSC_TRUE;
C->offloadmask = PETSC_OFFLOAD_GPU;
}
C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat);
/* handles sparse or dense B */
static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat)
{
Mat_Product *product = mat->product;
PetscErrorCode ierr;
PetscBool isdense = PETSC_FALSE,Biscusp = PETSC_FALSE,Ciscusp = PETSC_TRUE;
PetscFunctionBegin;
MatCheckProduct(mat,1);
ierr = PetscObjectBaseTypeCompare((PetscObject)product->B,MATSEQDENSE,&isdense);CHKERRQ(ierr);
if (!product->A->boundtocpu && !product->B->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->B,MATSEQAIJCUSPARSE,&Biscusp);CHKERRQ(ierr);
}
if (product->type == MATPRODUCT_ABC) {
Ciscusp = PETSC_FALSE;
if (!product->C->boundtocpu) {
ierr = PetscObjectTypeCompare((PetscObject)product->C,MATSEQAIJCUSPARSE,&Ciscusp);CHKERRQ(ierr);
}
}
if (isdense) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
if (product->A->boundtocpu) {
ierr = MatProductSetFromOptions_SeqAIJ_SeqDense(mat);CHKERRQ(ierr);
} else {
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA;
}
break;
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else if (Biscusp && Ciscusp) {
switch (product->type) {
case MATPRODUCT_AB:
case MATPRODUCT_AtB:
case MATPRODUCT_ABt:
mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE;
break;
case MATPRODUCT_PtAP:
case MATPRODUCT_RARt:
case MATPRODUCT_ABC:
mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic;
break;
default:
break;
}
} else { /* fallback for AIJ */
ierr = MatProductSetFromOptions_SeqAIJ(mat);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy, Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,NULL,yy,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */
static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans,PetscBool herm)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJCUSPARSEMultStruct *matstruct;
PetscScalar *xarray,*zarray,*dptr,*beta,*xptr;
PetscErrorCode ierr;
cudaError_t cerr;
cusparseStatus_t stat;
cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
PetscBool compressed;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
PetscInt nx,ny;
#endif
PetscFunctionBegin;
if (herm && !trans) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"Hermitian and not transpose not supported");
if (!a->nonzerorowcnt) {
if (!yy) {ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);}
else {ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);}
PetscFunctionReturn(0);
}
/* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
if (!trans) {
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
if (!matstruct) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_PLIB,"SeqAIJCUSPARSE does not have a 'mat' (need to fix)");
} else {
if (herm || !cusparsestruct->transgen) {
opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat;
} else {
if (!cusparsestruct->matTranspose) {ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);}
matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose;
}
}
/* Does the matrix use compressed rows (i.e., drop zero rows)? */
compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE;
try {
ierr = VecCUDAGetArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);} /* read & write zz, so need to get uptodate zarray on GPU */
else {ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr);} /* write zz, so no need to init zarray on GPU */
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
/* z = A x + beta y.
If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax.
When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call.
*/
xptr = xarray;
dptr = compressed ? cusparsestruct->workVector->data().get() : zarray;
beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
/* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is
allocated to accommodate different uses. So we get the length info directly from mat.
*/
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_cols;
ny = mat->num_rows;
}
#endif
} else {
/* z = A^T x + beta y
If A is compressed, then we need a work vector as the shorter version of x to compute A^T x.
Note A^Tx is of full length, so we set beta to 1.0 if y exists.
*/
xptr = compressed ? cusparsestruct->workVector->data().get() : xarray;
dptr = zarray;
beta = yy ? matstruct->beta_one : matstruct->beta_zero;
if (compressed) { /* Scatter x to work vector */
thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray);
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAEqualsReverse());
}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
nx = mat->num_rows;
ny = mat->num_cols;
}
#endif
}
/* csr_spmv does y = alpha op(A) x + beta y */
if (cusparsestruct->format == MAT_CUSPARSE_CSR) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if (opA < 0 || opA > 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly");
if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr,nx,xptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr,ny,dptr,cusparse_scalartype);CHKERRCUSPARSE(stat);
stat = cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one,
matstruct->matDescr,
matstruct->cuSpMV[opA].vecXDescr, beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
&matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer,matstruct->cuSpMV[opA].spmvBufferSize);CHKERRCUDA(cerr);
matstruct->cuSpMV[opA].initialized = PETSC_TRUE;
} else {
/* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr,xptr);CHKERRCUSPARSE(stat);
stat = cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr,dptr);CHKERRCUSPARSE(stat);
}
stat = cusparseSpMV(cusparsestruct->handle, opA,
matstruct->alpha_one,
matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEGenerateTransposeForMult() */
matstruct->cuSpMV[opA].vecXDescr,
beta,
matstruct->cuSpMV[opA].vecYDescr,
cusparse_scalartype,
cusparsestruct->spmvAlg,
matstruct->cuSpMV[opA].spmvBuffer);CHKERRCUSPARSE(stat);
#else
CsrMatrix *mat = (CsrMatrix*)matstruct->mat;
stat = cusparse_csr_spmv(cusparsestruct->handle, opA,
mat->num_rows, mat->num_cols,
mat->num_entries, matstruct->alpha_one, matstruct->descr,
mat->values->data().get(), mat->row_offsets->data().get(),
mat->column_indices->data().get(), xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
} else {
if (cusparsestruct->nrows) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat;
stat = cusparse_hyb_spmv(cusparsestruct->handle, opA,
matstruct->alpha_one, matstruct->descr, hybMat,
xptr, beta,
dptr);CHKERRCUSPARSE(stat);
#endif
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) {
if (yy) { /* MatMultAdd: zz = A*xx + yy */
if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); /* zz = yy */
} else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
} else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */
ierr = VecSet_SeqCUDA(zz,0);CHKERRQ(ierr);
}
/* ScatterAdd the result from work vector into the full vector when A is compressed */
if (compressed) {
thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(),
VecCUDAPlusEquals());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
} else {
if (yy && yy != zz) {
ierr = VecAXPY_SeqCUDA(zz,1.0,yy);CHKERRQ(ierr); /* zz += yy */
}
}
ierr = VecCUDARestoreArrayRead(xx,(const PetscScalar**)&xarray);CHKERRQ(ierr);
if (yy == zz) {ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);}
else {ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr);}
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex);
}
if (yy) {
ierr = PetscLogGpuFlops(2.0*a->nz);CHKERRQ(ierr);
} else {
ierr = PetscLogGpuFlops(2.0*a->nz-a->nonzerorowcnt);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAddKernel_SeqAIJCUSPARSE(A,xx,yy,zz,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
}
ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); // this does very little if assembled on GPU - call it?
if (mode == MAT_FLUSH_ASSEMBLY || A->boundtocpu) PetscFunctionReturn(0);
if (d_mat) {
A->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
/* --------------------------------------------------------------------------------*/
/*@
MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradgm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscSplitCSRDataStructure *d_mat = NULL;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
d_mat = ((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat;
((Mat_SeqAIJCUSPARSE*)A->spptr)->deviceMat = NULL;
ierr = MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr);
}
if (d_mat) {
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
cudaError_t err;
PetscSplitCSRDataStructure h_mat;
ierr = PetscInfo(A,"Have device matrix\n");CHKERRQ(ierr);
err = cudaMemcpy( &h_mat, d_mat, sizeof(PetscSplitCSRDataStructure), cudaMemcpyDeviceToHost);CHKERRCUDA(err);
if (a->compressedrow.use) {
err = cudaFree(h_mat.diag.i);CHKERRCUDA(err);
}
err = cudaFree(d_mat);CHKERRCUDA(err);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatFactorGetSolverType_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat,MatType,MatReuse,Mat*);
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat,PetscBool);
static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatDuplicate_SeqAIJ(A,cpvalues,B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y,PetscScalar a,Mat X,MatStructure str)
{
PetscErrorCode ierr;
Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data,*y = (Mat_SeqAIJ*)Y->data;
Mat_SeqAIJCUSPARSE *cy;
Mat_SeqAIJCUSPARSE *cx;
PetscScalar *ay;
const PetscScalar *ax;
CsrMatrix *csry,*csrx;
cudaError_t cerr;
PetscFunctionBegin;
if (X->ops->axpy != Y->ops->axpy) {
ierr = MatAXPY_SeqAIJ(Y,a,X,str);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* if we are here, it means both matrices are bound to GPU */
ierr = MatSeqAIJCUSPARSECopyToGPU(Y);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(X);CHKERRQ(ierr);
cy = (Mat_SeqAIJCUSPARSE*)Y->spptr;
cx = (Mat_SeqAIJCUSPARSE*)X->spptr;
if (cy->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)Y),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
if (cx->format != MAT_CUSPARSE_CSR) SETERRQ(PetscObjectComm((PetscObject)X),PETSC_ERR_PLIB,"only MAT_CUSPARSE_CSR supported");
csry = (CsrMatrix*)cy->mat->mat;
csrx = (CsrMatrix*)cx->mat->mat;
/* see if we can turn this into a cublas axpy */
if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) {
bool eq = thrust::equal(thrust::device,csry->row_offsets->begin(),csry->row_offsets->end(),csrx->row_offsets->begin());
if (eq) {
eq = thrust::equal(thrust::device,csry->column_indices->begin(),csry->column_indices->end(),csrx->column_indices->begin());
}
if (eq) str = SAME_NONZERO_PATTERN;
}
if (str == SUBSET_NONZERO_PATTERN) {
cusparseStatus_t stat;
PetscScalar b = 1.0;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
size_t bufferSize;
void *buffer;
#endif
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST);CHKERRCUSPARSE(stat);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparse_csr_spgeam_bufferSize(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),&bufferSize);CHKERRCUSPARSE(stat);
cerr = cudaMalloc(&buffer,bufferSize);CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),buffer);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
cerr = cudaFree(buffer);CHKERRCUDA(cerr);
#else
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparse_csr_spgeam(cy->handle,Y->rmap->n,Y->cmap->n,
&a,cx->mat->descr,x->nz,ax,csrx->row_offsets->data().get(),csrx->column_indices->data().get(),
&b,cy->mat->descr,y->nz,ay,csry->row_offsets->data().get(),csry->column_indices->data().get(),
cy->mat->descr, ay,csry->row_offsets->data().get(),csry->column_indices->data().get());CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(x->nz + y->nz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
stat = cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSPARSE(stat);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else if (str == SAME_NONZERO_PATTERN) {
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscBLASInt one = 1, bnz = 1;
ierr = MatSeqAIJCUSPARSEGetArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGetArray(Y,&ay);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXaxpy(cublasv2handle,bnz,&a,ax,one,ay,one);CHKERRCUBLAS(berr);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuFlops(2.0*bnz);CHKERRQ(ierr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArrayRead(X,&ax);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSERestoreArray(Y,&ay);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(Y);CHKERRQ(ierr);
} else {
ierr = MatAXPY_SeqAIJ(Y,a,X,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
PetscBool both = PETSC_FALSE;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE*)A->spptr;
if (spptr->mat) {
CsrMatrix* matrix = (CsrMatrix*)spptr->mat->mat;
if (matrix->values) {
both = PETSC_TRUE;
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
if (spptr->matTranspose) {
CsrMatrix* matrix = (CsrMatrix*)spptr->matTranspose->mat;
if (matrix->values) {
thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
}
}
}
//ierr = MatZeroEntries_SeqAIJ(A);CHKERRQ(ierr);
ierr = PetscArrayzero(a->a,a->i[A->rmap->n]);CHKERRQ(ierr);
ierr = MatSeqAIJInvalidateDiagonal(A);CHKERRQ(ierr);
if (both) A->offloadmask = PETSC_OFFLOAD_BOTH;
else A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A,PetscBool flg)
{
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype != MAT_FACTOR_NONE) PetscFunctionReturn(0);
if (flg) {
ierr = MatSeqAIJCUSPARSECopyFromGPU(A);CHKERRQ(ierr);
A->ops->axpy = MatAXPY_SeqAIJ;
A->ops->zeroentries = MatZeroEntries_SeqAIJ;
A->ops->mult = MatMult_SeqAIJ;
A->ops->multadd = MatMultAdd_SeqAIJ;
A->ops->multtranspose = MatMultTranspose_SeqAIJ;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ;
A->ops->multhermitiantranspose = NULL;
A->ops->multhermitiantransposeadd = NULL;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJ);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",NULL);CHKERRQ(ierr);
} else {
A->ops->axpy = MatAXPY_SeqAIJCUSPARSE;
A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE;
A->ops->mult = MatMult_SeqAIJCUSPARSE;
A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE;
A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE;
A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE;
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJCopySubArray_C",MatSeqAIJCopySubArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqdense_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetPreallocationCOO_C",MatSetPreallocationCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSetValuesCOO_C",MatSetValuesCOO_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatSeqAIJGetArray_C",MatSeqAIJGetArray_SeqAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C",MatProductSetFromOptions_SeqAIJCUSPARSE);CHKERRQ(ierr);
}
A->boundtocpu = flg;
a->inode.use = flg;
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType mtype, MatReuse reuse, Mat* newmat)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
Mat B;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); /* first use of CUSPARSE may be via MatConvert */
if (reuse == MAT_INITIAL_MATRIX) {
ierr = MatDuplicate(A,MAT_COPY_VALUES,newmat);CHKERRQ(ierr);
} else if (reuse == MAT_REUSE_MATRIX) {
ierr = MatCopy(A,*newmat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
if (reuse != MAT_REUSE_MATRIX && !B->spptr) {
if (B->factortype == MAT_FACTOR_NONE) {
Mat_SeqAIJCUSPARSE *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
spptr->format = MAT_CUSPARSE_CSR;
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
B->spptr = spptr;
spptr->deviceMat = NULL;
} else {
Mat_SeqAIJCUSPARSETriFactors *spptr;
ierr = PetscNew(&spptr);CHKERRQ(ierr);
stat = cusparseCreate(&spptr->handle);CHKERRCUSPARSE(stat);
B->spptr = spptr;
}
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE;
B->ops->destroy = MatDestroy_SeqAIJCUSPARSE;
B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE;
B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE;
B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE;
ierr = MatBindToCPU_SeqAIJCUSPARSE(B,PETSC_FALSE);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatCUSPARSESetFormat_C",MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr);
ierr = MatConvert_SeqAIJ_SeqAIJCUSPARSE(B,MATSEQAIJCUSPARSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)B);CHKERRQ(ierr);
ierr = MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionsObject,B);CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
Options Database Keys:
+ -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M*/
PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*);
PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
ierr = MatSolverTypeRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct)
{
PetscErrorCode ierr;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*cusparsestruct) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format);CHKERRQ(ierr);
delete (*cusparsestruct)->workVector;
delete (*cusparsestruct)->rowoffsets_gpu;
delete (*cusparsestruct)->cooPerm;
delete (*cusparsestruct)->cooPerm_a;
if ((*cusparsestruct)->handle) {stat = cusparseDestroy((*cusparsestruct)->handle);CHKERRCUSPARSE(stat);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
cudaError_t cerr = cudaFree((*cusparsestruct)->csr2cscBuffer);CHKERRCUDA(cerr);
#endif
ierr = PetscFree(*cusparsestruct);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat)
{
PetscFunctionBegin;
if (*mat) {
delete (*mat)->values;
delete (*mat)->column_indices;
delete (*mat)->row_offsets;
delete *mat;
*mat = 0;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor)
{
cusparseStatus_t stat;
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactor) {
if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSPARSE(stat); }
if ((*trifactor)->solveInfo) { stat = cusparse_destroy_analysis_info((*trifactor)->solveInfo);CHKERRCUSPARSE(stat); }
ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr);
if ((*trifactor)->solveBuffer) {cudaError_t cerr = cudaFree((*trifactor)->solveBuffer);CHKERRCUDA(cerr);}
if ((*trifactor)->AA_h) {cudaError_t cerr = cudaFreeHost((*trifactor)->AA_h);CHKERRCUDA(cerr);}
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
if ((*trifactor)->csr2cscBuffer) {cudaError_t cerr = cudaFree((*trifactor)->csr2cscBuffer);CHKERRCUDA(cerr);}
#endif
ierr = PetscFree(*trifactor);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format)
{
CsrMatrix *mat;
cusparseStatus_t stat;
cudaError_t err;
PetscFunctionBegin;
if (*matstruct) {
if ((*matstruct)->mat) {
if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) {
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0");
#else
cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat;
stat = cusparseDestroyHybMat(hybMat);CHKERRCUSPARSE(stat);
#endif
} else {
mat = (CsrMatrix*)(*matstruct)->mat;
CsrMatrix_Destroy(&mat);
}
}
if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSPARSE(stat); }
delete (*matstruct)->cprowIndices;
if ((*matstruct)->alpha_one) { err=cudaFree((*matstruct)->alpha_one);CHKERRCUDA(err); }
if ((*matstruct)->beta_zero) { err=cudaFree((*matstruct)->beta_zero);CHKERRCUDA(err); }
if ((*matstruct)->beta_one) { err=cudaFree((*matstruct)->beta_one);CHKERRCUDA(err); }
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct;
if (mdata->matDescr) {stat = cusparseDestroySpMat(mdata->matDescr);CHKERRCUSPARSE(stat);}
for (int i=0; i<3; i++) {
if (mdata->cuSpMV[i].initialized) {
err = cudaFree(mdata->cuSpMV[i].spmvBuffer);CHKERRCUDA(err);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr);CHKERRCUSPARSE(stat);
stat = cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr);CHKERRCUSPARSE(stat);
}
}
#endif
delete *matstruct;
*matstruct = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtr);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose);CHKERRQ(ierr);
delete (*trifactors)->rpermIndices;
delete (*trifactors)->cpermIndices;
delete (*trifactors)->workVector;
(*trifactors)->rpermIndices = NULL;
(*trifactors)->cpermIndices = NULL;
(*trifactors)->workVector = NULL;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors)
{
PetscErrorCode ierr;
cusparseHandle_t handle;
cusparseStatus_t stat;
PetscFunctionBegin;
if (*trifactors) {
ierr = MatSeqAIJCUSPARSETriFactors_Reset(trifactors);CHKERRQ(ierr);
if (handle = (*trifactors)->handle) {
stat = cusparseDestroy(handle);CHKERRCUSPARSE(stat);
}
ierr = PetscFree(*trifactors);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
struct IJCompare
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct IJEqual
{
__host__ __device__
inline bool operator() (const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2)
{
if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false;
return true;
}
};
struct IJDiff
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1 == t2 ? 0 : 1;
}
};
struct IJSum
{
__host__ __device__
inline PetscInt operator() (const PetscInt &t1, const PetscInt &t2)
{
return t1||t2;
}
};
#include <thrust/iterator/discard_iterator.h>
PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
THRUSTARRAY *cooPerm_v = NULL;
thrust::device_ptr<const PetscScalar> d_v;
CsrMatrix *matrix;
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt n;
PetscFunctionBegin;
if (!cusp) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE struct");
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUSPARSE CsrMatrix");
if (!cusp->cooPerm) {
ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
matrix = (CsrMatrix*)cusp->mat->mat;
if (!matrix->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
if (!v) {
if (imode == INSERT_VALUES) thrust::fill(thrust::device,matrix->values->begin(),matrix->values->end(),0.);
goto finalize;
}
n = cusp->cooPerm->size();
if (isCudaMem(v)) {
d_v = thrust::device_pointer_cast(v);
} else {
cooPerm_v = new THRUSTARRAY(n);
cooPerm_v->assign(v,v+n);
d_v = cooPerm_v->data();
ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */
if (cusp->cooPerm_a) {
THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size());
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),cooPerm_w->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
thrust::transform(cooPerm_w->begin(),cooPerm_w->end(),matrix->values->begin(),matrix->values->begin(),thrust::plus<PetscScalar>());
delete cooPerm_w;
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAPlusEquals());
}
} else {
if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */
auto vbit = thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin());
thrust::reduce_by_key(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),vbit,thrust::make_discard_iterator(),matrix->values->begin(),thrust::equal_to<PetscInt>(),thrust::plus<PetscScalar>());
} else {
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->begin()),
matrix->values->begin()));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v,cusp->cooPerm->end()),
matrix->values->end()));
thrust::for_each(zibit,zieit,VecCUDAEquals());
}
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
finalize:
delete cooPerm_v;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
/* shorter version of MatAssemblyEnd_SeqAIJ */
ierr = PetscInfo3(A,"Matrix size: %D X %D; storage space: 0 unneeded,%D used\n",A->rmap->n,A->cmap->n,a->nz);CHKERRQ(ierr);
ierr = PetscInfo(A,"Number of mallocs during MatSetValues() is 0\n");CHKERRQ(ierr);
ierr = PetscInfo1(A,"Maximum nonzeros in any row is %D\n",a->rmax);CHKERRQ(ierr);
a->reallocs = 0;
A->info.mallocs += 0;
A->info.nz_unneeded = 0;
A->assembled = A->was_assembled = PETSC_TRUE;
A->num_ass++;
PetscFunctionReturn(0);
}
#include <thrust/binary_search.h>
PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt coo_i[], const PetscInt coo_j[])
{
PetscErrorCode ierr;
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data;
PetscInt cooPerm_n, nzr = 0;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0;
if (n != cooPerm_n) {
delete cusp->cooPerm;
delete cusp->cooPerm_a;
cusp->cooPerm = NULL;
cusp->cooPerm_a = NULL;
}
if (n) {
THRUSTINTARRAY d_i(n);
THRUSTINTARRAY d_j(n);
THRUSTINTARRAY ii(A->rmap->n);
if (!cusp->cooPerm) { cusp->cooPerm = new THRUSTINTARRAY(n); }
if (!cusp->cooPerm_a) { cusp->cooPerm_a = new THRUSTINTARRAY(n); }
ierr = PetscLogCpuToGpu(2.*n*sizeof(PetscInt));CHKERRQ(ierr);
d_i.assign(coo_i,coo_i+n);
d_j.assign(coo_j,coo_j+n);
auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i.begin(),d_j.begin()));
auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i.end(),d_j.end()));
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0);
thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare());
*cusp->cooPerm_a = d_i;
THRUSTINTARRAY w = d_j;
auto nekey = thrust::unique(fkey, ekey, IJEqual());
if (nekey == ekey) { /* all entries are unique */
delete cusp->cooPerm_a;
cusp->cooPerm_a = NULL;
} else { /* I couldn't come up with a more elegant algorithm */
adjacent_difference(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),IJDiff());
adjacent_difference(w.begin(),w.end(),w.begin(),IJDiff());
(*cusp->cooPerm_a)[0] = 0;
w[0] = 0;
thrust::transform(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),w.begin(),cusp->cooPerm_a->begin(),IJSum());
thrust::inclusive_scan(cusp->cooPerm_a->begin(),cusp->cooPerm_a->end(),cusp->cooPerm_a->begin(),thrust::plus<PetscInt>());
}
thrust::counting_iterator<PetscInt> search_begin(0);
thrust::upper_bound(d_i.begin(), nekey.get_iterator_tuple().get<0>(),
search_begin, search_begin + A->rmap->n,
ii.begin());
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatSeqXAIJFreeAIJ(A,&a->a,&a->j,&a->i);CHKERRQ(ierr);
a->singlemalloc = PETSC_FALSE;
a->free_a = PETSC_TRUE;
a->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(A->rmap->n+1,&a->i);CHKERRQ(ierr);
a->i[0] = 0;
cerr = cudaMemcpy(a->i+1,ii.data().get(),A->rmap->n*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
a->nz = a->maxnz = a->i[A->rmap->n];
a->rmax = 0;
ierr = PetscMalloc1(a->nz,&a->a);CHKERRQ(ierr);
ierr = PetscMalloc1(a->nz,&a->j);CHKERRQ(ierr);
cerr = cudaMemcpy(a->j,d_j.data().get(),a->nz*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
if (!a->ilen) { ierr = PetscMalloc1(A->rmap->n,&a->ilen);CHKERRQ(ierr); }
if (!a->imax) { ierr = PetscMalloc1(A->rmap->n,&a->imax);CHKERRQ(ierr); }
for (PetscInt i = 0; i < A->rmap->n; i++) {
const PetscInt nnzr = a->i[i+1] - a->i[i];
nzr += (PetscInt)!!(nnzr);
a->ilen[i] = a->imax[i] = nnzr;
a->rmax = PetscMax(a->rmax,nnzr);
}
a->nonzerorowcnt = nzr;
A->preallocated = PETSC_TRUE;
ierr = PetscLogGpuToCpu((A->rmap->n+a->nz)*sizeof(PetscInt));CHKERRQ(ierr);
ierr = MatMarkDiagonal_SeqAIJ(A);CHKERRQ(ierr);
} else {
ierr = MatSeqAIJSetPreallocation(A,0,NULL);CHKERRQ(ierr);
}
ierr = MatSetOption(A,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
/* We want to allocate the CUSPARSE struct for matvec now.
The code is so convoluted now that I prefer to copy zeros */
ierr = PetscArrayzero(a->a,a->nz);CHKERRQ(ierr);
ierr = MatCheckCompressedRow(A,nzr,&a->compressedrow,a->i,A->rmap->n,0.6);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_CPU;
A->nonzerostate++;
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose,cusp->format);CHKERRQ(ierr);
A->assembled = PETSC_FALSE;
A->was_assembled = PETSC_FALSE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar** a)
{
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar** a)
{
Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE*)A->spptr;
CsrMatrix *csr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
if (!cusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (cusp->format == MAT_CUSPARSE_ELL || cusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
csr = (CsrMatrix*)cusp->mat->mat;
if (!csr->values) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing CUDA memory");
*a = csr->values->data().get();
A->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar** a)
{
PetscErrorCode ierr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidPointer(a,2);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
ierr = PetscObjectStateIncrease((PetscObject)A);CHKERRQ(ierr);
*a = NULL;
PetscFunctionReturn(0);
}
struct IJCompare4
{
__host__ __device__
inline bool operator() (const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2)
{
if (t1.get<0>() < t2.get<0>()) return true;
if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>();
return false;
}
};
struct Shift
{
int _shift;
Shift(int shift) : _shift(shift) {}
__host__ __device__
inline int operator() (const int &c)
{
return c + _shift;
}
};
/* merges to SeqAIJCUSPARSE matrices, [A';B']' operation in matlab notation */
PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A,Mat B,MatReuse reuse,Mat* C)
{
PetscErrorCode ierr;
Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data, *b = (Mat_SeqAIJ*)B->data, *c;
Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE*)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE*)B->spptr, *Ccusp;
Mat_SeqAIJCUSPARSEMultStruct *Cmat;
CsrMatrix *Acsr,*Bcsr,*Ccsr;
PetscInt Annz,Bnnz;
cusparseStatus_t stat;
PetscInt i,m,n,zero = 0;
cudaError_t cerr;
PetscFunctionBegin;
PetscValidHeaderSpecific(A,MAT_CLASSID,1);
PetscValidHeaderSpecific(B,MAT_CLASSID,2);
PetscValidPointer(C,4);
PetscCheckTypeName(A,MATSEQAIJCUSPARSE);
PetscCheckTypeName(B,MATSEQAIJCUSPARSE);
if (A->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",A->rmap->n,B->rmap->n);
if (reuse == MAT_INPLACE_MATRIX) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MAT_INPLACE_MATRIX not supported");
if (Acusp->format == MAT_CUSPARSE_ELL || Acusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Bcusp->format == MAT_CUSPARSE_ELL || Bcusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (reuse == MAT_INITIAL_MATRIX) {
m = A->rmap->n;
n = A->cmap->n + B->cmap->n;
ierr = MatCreate(PETSC_COMM_SELF,C);CHKERRQ(ierr);
ierr = MatSetSizes(*C,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*C,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
c = (Mat_SeqAIJ*)(*C)->data;
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
Cmat = new Mat_SeqAIJCUSPARSEMultStruct;
Ccsr = new CsrMatrix;
Cmat->cprowIndices = NULL;
c->compressedrow.use = PETSC_FALSE;
c->compressedrow.nrows = 0;
c->compressedrow.i = NULL;
c->compressedrow.rindex = NULL;
Ccusp->workVector = NULL;
Ccusp->nrows = m;
Ccusp->mat = Cmat;
Ccusp->mat->mat = Ccsr;
Ccsr->num_rows = m;
Ccsr->num_cols = n;
stat = cusparseCreateMatDescr(&Cmat->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(Cmat->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Annz = (PetscInt)Acsr->column_indices->size();
Bnnz = (PetscInt)Bcsr->column_indices->size();
c->nz = Annz + Bnnz;
Ccsr->row_offsets = new THRUSTINTARRAY32(m+1);
Ccsr->column_indices = new THRUSTINTARRAY32(c->nz);
Ccsr->values = new THRUSTARRAY(c->nz);
Ccsr->num_entries = c->nz;
Ccusp->cooPerm = new THRUSTINTARRAY(c->nz);
if (c->nz) {
auto Acoo = new THRUSTINTARRAY32(Annz);
auto Bcoo = new THRUSTINTARRAY32(Bnnz);
auto Ccoo = new THRUSTINTARRAY32(c->nz);
THRUSTINTARRAY32 *Aroff,*Broff;
if (a->compressedrow.use) { /* need full row offset */
if (!Acusp->rowoffsets_gpu) {
Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1);
Acusp->rowoffsets_gpu->assign(a->i,a->i + A->rmap->n + 1);
ierr = PetscLogCpuToGpu((A->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Aroff = Acusp->rowoffsets_gpu;
} else Aroff = Acsr->row_offsets;
if (b->compressedrow.use) { /* need full row offset */
if (!Bcusp->rowoffsets_gpu) {
Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1);
Bcusp->rowoffsets_gpu->assign(b->i,b->i + B->rmap->n + 1);
ierr = PetscLogCpuToGpu((B->rmap->n + 1)*sizeof(PetscInt));CHKERRQ(ierr);
}
Broff = Bcusp->rowoffsets_gpu;
} else Broff = Bcsr->row_offsets;
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
stat = cusparseXcsr2coo(Acusp->handle,
Aroff->data().get(),
Annz,
m,
Acoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseXcsr2coo(Bcusp->handle,
Broff->data().get(),
Bnnz,
m,
Bcoo->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
/* Issues when using bool with large matrices on SUMMIT 10.2.89 */
auto Aperm = thrust::make_constant_iterator(1);
auto Bperm = thrust::make_constant_iterator(0);
#if PETSC_PKG_CUDA_VERSION_GE(10,0,0)
auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(),Shift(A->cmap->n));
auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(),Shift(A->cmap->n));
#else
/* there are issues instantiating the merge operation using a transform iterator for the columns of B */
auto Bcib = Bcsr->column_indices->begin();
auto Bcie = Bcsr->column_indices->end();
thrust::transform(Bcib,Bcie,Bcib,Shift(A->cmap->n));
#endif
auto wPerm = new THRUSTINTARRAY32(Annz+Bnnz);
auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(),Acsr->column_indices->begin(),Acsr->values->begin(),Aperm));
auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(),Acsr->column_indices->end(),Acsr->values->end(),Aperm));
auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(),Bcib,Bcsr->values->begin(),Bperm));
auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(),Bcie,Bcsr->values->end(),Bperm));
auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(),Ccsr->column_indices->begin(),Ccsr->values->begin(),wPerm->begin()));
auto p1 = Ccusp->cooPerm->begin();
auto p2 = Ccusp->cooPerm->begin();
thrust::advance(p2,Annz);
PetscStackCallThrust(thrust::merge(thrust::device,Azb,Aze,Bzb,Bze,Czb,IJCompare4()));
#if PETSC_PKG_CUDA_VERSION_LT(10,0,0)
thrust::transform(Bcib,Bcie,Bcib,Shift(-A->cmap->n));
#endif
auto cci = thrust::make_counting_iterator(zero);
auto cce = thrust::make_counting_iterator(c->nz);
#if 0 //Errors on SUMMIT cuda 11.1.0
PetscStackCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>()));
#else
auto pred = thrust::identity<int>();
PetscStackCallThrust(thrust::copy_if(thrust::device,cci,cce,wPerm->begin(),p1,pred));
PetscStackCallThrust(thrust::remove_copy_if(thrust::device,cci,cce,wPerm->begin(),p2,pred));
#endif
stat = cusparseXcoo2csr(Ccusp->handle,
Ccoo->data().get(),
c->nz,
m,
Ccsr->row_offsets->data().get(),
CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
delete wPerm;
delete Acoo;
delete Bcoo;
delete Ccoo;
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries,
Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
if (Acusp->transgen && Bcusp->transgen) { /* if A and B have the transpose, generate C transpose too */
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct;
CsrMatrix *CcsrT = new CsrMatrix;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
Ccusp->transgen = PETSC_TRUE;
CmatT->cprowIndices = NULL;
CmatT->mat = CcsrT;
CcsrT->num_rows = n;
CcsrT->num_cols = m;
CcsrT->num_entries = c->nz;
CcsrT->row_offsets = new THRUSTINTARRAY32(n+1);
CcsrT->column_indices = new THRUSTINTARRAY32(c->nz);
CcsrT->values = new THRUSTARRAY(c->nz);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto rT = CcsrT->row_offsets->begin();
if (AT) {
rT = thrust::copy(AcsrT->row_offsets->begin(),AcsrT->row_offsets->end(),rT);
thrust::advance(rT,-1);
}
if (BT) {
auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(),Shift(a->nz));
auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(),Shift(a->nz));
thrust::copy(titb,tite,rT);
}
auto cT = CcsrT->column_indices->begin();
if (AT) cT = thrust::copy(AcsrT->column_indices->begin(),AcsrT->column_indices->end(),cT);
if (BT) thrust::copy(BcsrT->column_indices->begin(),BcsrT->column_indices->end(),cT);
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
stat = cusparseCreateMatDescr(&CmatT->descr);CHKERRCUSPARSE(stat);
stat = cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSPARSE(stat);
stat = cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSPARSE(stat);
cerr = cudaMalloc((void **)&(CmatT->alpha_one),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_zero),sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar));CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->alpha_one,&PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_zero,&PETSC_CUSPARSE_ZERO,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
#if PETSC_PKG_CUDA_VERSION_GE(11,0,0)
stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries,
CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype);CHKERRCUSPARSE(stat);
#endif
Ccusp->matTranspose = CmatT;
}
}
c->singlemalloc = PETSC_FALSE;
c->free_a = PETSC_TRUE;
c->free_ij = PETSC_TRUE;
ierr = PetscMalloc1(m+1,&c->i);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->j);CHKERRQ(ierr);
if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */
THRUSTINTARRAY ii(Ccsr->row_offsets->size());
THRUSTINTARRAY jj(Ccsr->column_indices->size());
ii = *Ccsr->row_offsets;
jj = *Ccsr->column_indices;
cerr = cudaMemcpy(c->i,ii.data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,jj.data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
} else {
cerr = cudaMemcpy(c->i,Ccsr->row_offsets->data().get(),Ccsr->row_offsets->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaMemcpy(c->j,Ccsr->column_indices->data().get(),Ccsr->column_indices->size()*sizeof(PetscInt),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size())*sizeof(PetscInt));CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->ilen);CHKERRQ(ierr);
ierr = PetscMalloc1(m,&c->imax);CHKERRQ(ierr);
c->maxnz = c->nz;
c->nonzerorowcnt = 0;
c->rmax = 0;
for (i = 0; i < m; i++) {
const PetscInt nn = c->i[i+1] - c->i[i];
c->ilen[i] = c->imax[i] = nn;
c->nonzerorowcnt += (PetscInt)!!nn;
c->rmax = PetscMax(c->rmax,nn);
}
ierr = MatMarkDiagonal_SeqAIJ(*C);CHKERRQ(ierr);
ierr = PetscMalloc1(c->nz,&c->a);CHKERRQ(ierr);
(*C)->nonzerostate++;
ierr = PetscLayoutSetUp((*C)->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp((*C)->cmap);CHKERRQ(ierr);
Ccusp->nonzerostate = (*C)->nonzerostate;
(*C)->preallocated = PETSC_TRUE;
} else {
if ((*C)->rmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Invalid number or rows %D != %D",(*C)->rmap->n,B->rmap->n);
c = (Mat_SeqAIJ*)(*C)->data;
if (c->nz) {
Ccusp = (Mat_SeqAIJCUSPARSE*)(*C)->spptr;
if (!Ccusp->cooPerm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing cooPerm");
if (Ccusp->format == MAT_CUSPARSE_ELL || Ccusp->format == MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not implemented");
if (Ccusp->nonzerostate != (*C)->nonzerostate) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Wrong nonzerostate");
ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr);
ierr = MatSeqAIJCUSPARSECopyToGPU(B);CHKERRQ(ierr);
if (!Acusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
if (!Bcusp->mat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing Mat_SeqAIJCUSPARSEMultStruct");
Acsr = (CsrMatrix*)Acusp->mat->mat;
Bcsr = (CsrMatrix*)Bcusp->mat->mat;
Ccsr = (CsrMatrix*)Ccusp->mat->mat;
if (Acsr->num_entries != (PetscInt)Acsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"A nnz %D != %D",Acsr->num_entries,(PetscInt)Acsr->values->size());
if (Bcsr->num_entries != (PetscInt)Bcsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"B nnz %D != %D",Bcsr->num_entries,(PetscInt)Bcsr->values->size());
if (Ccsr->num_entries != (PetscInt)Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D",Ccsr->num_entries,(PetscInt)Ccsr->values->size());
if (Ccsr->num_entries != Acsr->num_entries + Bcsr->num_entries) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_COR,"C nnz %D != %D + %D",Ccsr->num_entries,Acsr->num_entries,Bcsr->num_entries);
if (Ccusp->cooPerm->size() != Ccsr->values->size()) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_COR,"permSize %D != %D",(PetscInt)Ccusp->cooPerm->size(),(PetscInt)Ccsr->values->size());
auto pmid = Ccusp->cooPerm->begin();
thrust::advance(pmid,Acsr->num_entries);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->begin())));
auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
thrust::for_each(zibait,zieait,VecCUDAEquals());
auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(),
thrust::make_permutation_iterator(Ccsr->values->begin(),pmid)));
auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(),
thrust::make_permutation_iterator(Ccsr->values->begin(),Ccusp->cooPerm->end())));
thrust::for_each(zibbit,ziebit,VecCUDAEquals());
if (Acusp->transgen && Bcusp->transgen && Ccusp->transgen) {
if (!Ccusp->matTranspose) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_COR,"Missing transpose Mat_SeqAIJCUSPARSEMultStruct");
PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE;
CsrMatrix *AcsrT = AT ? (CsrMatrix*)Acusp->matTranspose->mat : NULL;
CsrMatrix *BcsrT = BT ? (CsrMatrix*)Bcusp->matTranspose->mat : NULL;
CsrMatrix *CcsrT = (CsrMatrix*)Ccusp->matTranspose->mat;
auto vT = CcsrT->values->begin();
if (AT) vT = thrust::copy(AcsrT->values->begin(),AcsrT->values->end(),vT);
if (BT) thrust::copy(BcsrT->values->begin(),BcsrT->values->end(),vT);
}
cerr = WaitForCUDA();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
}
}
ierr = PetscObjectStateIncrease((PetscObject)*C);CHKERRQ(ierr);
(*C)->assembled = PETSC_TRUE;
(*C)->was_assembled = PETSC_FALSE;
(*C)->offloadmask = PETSC_OFFLOAD_GPU;
PetscFunctionReturn(0);
}
static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
{
PetscErrorCode ierr;
bool dmem;
const PetscScalar *av;
cudaError_t cerr;
PetscFunctionBegin;
dmem = isCudaMem(v);
ierr = MatSeqAIJCUSPARSEGetArrayRead(A,&av);CHKERRQ(ierr);
if (n && idx) {
THRUSTINTARRAY widx(n);
widx.assign(idx,idx+n);
ierr = PetscLogCpuToGpu(n*sizeof(PetscInt));CHKERRQ(ierr);
THRUSTARRAY *w = NULL;
thrust::device_ptr<PetscScalar> dv;
if (dmem) {
dv = thrust::device_pointer_cast(v);
} else {
w = new THRUSTARRAY(n);
dv = w->data();
}
thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av);
auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.begin()),dv));
auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav,widx.end()),dv+n));
thrust::for_each(zibit,zieit,VecCUDAEquals());
if (w) {
cerr = cudaMemcpy(v,w->data().get(),n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
delete w;
} else {
cerr = cudaMemcpy(v,av,n*sizeof(PetscScalar),dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
if (!dmem) { ierr = PetscLogCpuToGpu(n*sizeof(PetscScalar));CHKERRQ(ierr); }
ierr = MatSeqAIJCUSPARSERestoreArrayRead(A,&av);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
0914739afb0062e86ade8b73b68157d2e3180ba6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
for (int i = 0; i < 5; ++i)
{
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( printNumber), dim3(1), dim3(1), 0, stream, i);
hipStreamDestroy(stream);
}
hipDeviceSynchronize();
#if 0
hipStream_t stream[5];
for (int i = 0; i < 5; ++i)
{
hipStreamCreate(&stream[i]);
}
for (int i = 0; i < 5; ++i)
{
hipLaunchKernelGGL(( printNumber), dim3(1), dim3(1), 0, stream[i], i);
}
for (int i = 0; i < 5; ++i)
{
hipStreamDestroy(stream[i]);
}
hipDeviceSynchronize();
#endif
}
| 0914739afb0062e86ade8b73b68157d2e3180ba6.cu | #include <stdio.h>
__global__ void printNumber(int number)
{
printf("%d\n", number);
}
int main()
{
for (int i = 0; i < 5; ++i)
{
cudaStream_t stream;
cudaStreamCreate(&stream);
printNumber<<<1, 1, 0, stream>>>(i);
cudaStreamDestroy(stream);
}
cudaDeviceSynchronize();
#if 0
cudaStream_t stream[5];
for (int i = 0; i < 5; ++i)
{
cudaStreamCreate(&stream[i]);
}
for (int i = 0; i < 5; ++i)
{
printNumber<<<1, 1, 0, stream[i]>>>(i);
}
for (int i = 0; i < 5; ++i)
{
cudaStreamDestroy(stream[i]);
}
cudaDeviceSynchronize();
#endif
}
|
7d26bb273e4244f7593db1175c3227b1121c4ee0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "reader_impl.hpp"
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <numeric>
namespace cudf::io::detail::parquet {
namespace {
/**
* @brief Generate depth remappings for repetition and definition levels.
*
* When dealing with columns that contain lists, we must examine incoming
* repetition and definition level pairs to determine what range of output nesting
* is indicated when adding new values. This function generates the mappings of
* the R/D levels to those start/end bounds
*
* @param remap Maps column schema index to the R/D remapping vectors for that column
* @param src_col_schema The column schema to generate the new mapping for
* @param md File metadata information
*/
void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap,
int src_col_schema,
aggregate_reader_metadata const& md)
{
// already generated for this level
if (remap.find(src_col_schema) != remap.end()) { return; }
auto schema = md.get_schema(src_col_schema);
int max_depth = md.get_output_nesting_depth(src_col_schema);
CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(),
"Attempting to remap a schema more than once");
auto inserted =
remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}});
auto& depth_remap = inserted.first->second;
std::vector<int>& rep_depth_remap = (depth_remap.first);
rep_depth_remap.resize(schema.max_repetition_level + 1);
std::vector<int>& def_depth_remap = (depth_remap.second);
def_depth_remap.resize(schema.max_definition_level + 1);
// the key:
// for incoming level values R/D
// add values starting at the shallowest nesting level X has repetition level R
// until you reach the deepest nesting level Y that corresponds to the repetition level R1
// held by the nesting level that has definition level D
//
// Example: a 3 level struct with a list at the bottom
//
// R / D Depth
// level0 0 / 1 0
// level1 0 / 2 1
// level2 0 / 3 2
// list 0 / 3 3
// element 1 / 4 4
//
// incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0)
// incoming R/D : 0, 1 -> add values from depth 0 to 3
// incoming R/D : 0, 2 -> add values from depth 0 to 3
// incoming R/D : 1, 4 -> add values from depth 4 to 4
//
// Note : the -validity- of values is simply checked by comparing the incoming D value against the
// D value of the given nesting level (incoming D >= the D for the nesting level == valid,
// otherwise NULL). The tricky part is determining what nesting levels to add values at.
//
// For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting
// depth.
//
// compute "X" from above
for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) {
auto find_shallowest = [&](int r) {
int shallowest = -1;
int cur_depth = max_depth - 1;
int schema_idx = src_col_schema;
while (schema_idx > 0) {
auto cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r) {
// if this is a repeated field, map it one level deeper
shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth;
}
// if it's one-level encoding list
else if (cur_schema.is_one_level_list(md.get_schema(cur_schema.parent_idx))) {
shallowest = cur_depth - 1;
}
if (!cur_schema.is_stub()) { cur_depth--; }
schema_idx = cur_schema.parent_idx;
}
return shallowest;
};
rep_depth_remap[s_idx] = find_shallowest(s_idx);
}
// compute "Y" from above
for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) {
auto find_deepest = [&](int d) {
SchemaElement prev_schema;
int schema_idx = src_col_schema;
int r1 = 0;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_definition_level == d) {
// if this is a repeated field, map it one level deeper
r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level
: cur_schema.max_repetition_level;
break;
}
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
// we now know R1 from above. return the deepest nesting level that has the
// same repetition level
schema_idx = src_col_schema;
int depth = max_depth - 1;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r1) {
// if this is a repeated field, map it one level deeper
depth = cur_schema.is_stub() ? depth + 1 : depth;
break;
}
if (!cur_schema.is_stub()) { depth--; }
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
return depth;
};
def_depth_remap[s_idx] = find_deepest(s_idx);
}
}
/**
* @brief Return the required number of bits to store a value.
*/
template <typename T = uint8_t>
[[nodiscard]] T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief Converts cuDF units to Parquet units.
*
* @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type.
*/
[[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_chrono(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 &&
not cudf::is_fixed_point(data_type{column_type_id})) {
converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
/**
* @brief Reads compressed page data to device memory.
*
* @param sources Dataset sources
* @param page_data Buffers to hold compressed page data for each chunk
* @param chunks List of column chunk descriptors
* @param begin_chunk Index of first column chunk to read
* @param end_chunk Index after the last column chunk to read
* @param column_chunk_offsets File offset for all chunks
* @param chunk_source_map Association between each column chunk and its source
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return A future object for reading synchronization
*/
[[nodiscard]] std::future<void> read_column_chunks_async(
std::vector<std::unique_ptr<datasource>> const& sources,
std::vector<std::unique_ptr<datasource::buffer>>& page_data,
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
size_t begin_chunk,
size_t end_chunk,
std::vector<size_t> const& column_chunk_offsets,
std::vector<size_type> const& chunk_source_map,
rmm::cuda_stream_view stream)
{
// Transfer chunk data, coalescing adjacent chunks
std::vector<std::future<size_t>> read_tasks;
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
size_t const io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
bool const is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
size_t const next_offset = column_chunk_offsets[next_chunk];
bool const is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed ||
chunk_source_map[chunk] != chunk_source_map[next_chunk]) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto& source = sources[chunk_source_map[chunk]];
if (source->is_device_read_preferred(io_size)) {
// Buffer needs to be padded.
// Required by `gpuDecodePageData`.
auto buffer =
rmm::device_buffer(cudf::util::round_up_safe(io_size, BUFFER_PADDING_MULTIPLE), stream);
auto fut_read_size = source->device_read_async(
io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream);
read_tasks.emplace_back(std::move(fut_read_size));
page_data[chunk] = datasource::buffer::create(std::move(buffer));
} else {
auto const read_buffer = source->host_read(io_offset, io_size);
// Buffer needs to be padded.
// Required by `gpuDecodePageData`.
auto tmp_buffer = rmm::device_buffer(
cudf::util::round_up_safe(read_buffer->size(), BUFFER_PADDING_MULTIPLE), stream);
CUDF_CUDA_TRY(hipMemcpyAsync(
tmp_buffer.data(), read_buffer->data(), read_buffer->size(), hipMemcpyDefault, stream));
page_data[chunk] = datasource::buffer::create(std::move(tmp_buffer));
}
auto d_compdata = page_data[chunk]->data();
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
auto sync_fn = [](decltype(read_tasks) read_tasks) {
for (auto& task : read_tasks) {
task.wait();
}
};
return std::async(std::launch::deferred, sync_fn, std::move(read_tasks));
}
/**
* @brief Return the number of total pages from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return The total number of pages
*/
[[nodiscard]] size_t count_page_headers(
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks, rmm::cuda_stream_view stream)
{
size_t total_pages = 0;
chunks.host_to_device_async(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
chunks.device_to_host_sync(stream);
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
// see setupLocalPageInfo() in page_data.cu for supported page encodings
constexpr bool is_supported_encoding(Encoding enc)
{
switch (enc) {
case Encoding::PLAIN:
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE:
case Encoding::RLE_DICTIONARY:
case Encoding::DELTA_BINARY_PACKED: return true;
default: return false;
}
}
/**
* @brief Decode the page information from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
* @returns The size in bytes of level type data required
*/
int decode_page_headers(cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
// IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages),
// please update preprocess_nested_columns to reflect this.
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
chunks.host_to_device_async(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
// compute max bytes needed for level data
auto level_bit_size =
cudf::detail::make_counting_transform_iterator(0, [chunks = chunks.begin()] __device__(int i) {
auto c = chunks[i];
return static_cast<int>(
max(c.level_bits[gpu::level_type::REPETITION], c.level_bits[gpu::level_type::DEFINITION]));
});
// max level data bit size.
int const max_level_bits = thrust::reduce(rmm::exec_policy(stream),
level_bit_size,
level_bit_size + chunks.size(),
0,
thrust::maximum<int>());
auto const level_type_size = ::max(1, cudf::util::div_rounding_up_safe(max_level_bits, 8));
pages.device_to_host_sync(stream);
// validate page encodings
CUDF_EXPECTS(std::all_of(pages.begin(),
pages.end(),
[](auto const& page) { return is_supported_encoding(page.encoding); }),
"Unsupported page encoding detected");
return level_type_size;
}
/**
* @brief Decompresses the page data, at page granularity.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return Device buffer to decompressed page data
*/
[[nodiscard]] rmm::device_buffer decompress_page_data(
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, std::function<void(size_t)> const& f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_buffer debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
struct codec_stats {
parquet::Compression compression_type = UNCOMPRESSED;
size_t num_pages = 0;
int32_t max_decompressed_size = 0;
size_t total_decomp_size = 0;
};
std::array codecs{codec_stats{parquet::GZIP},
codec_stats{parquet::SNAPPY},
codec_stats{parquet::BROTLI},
codec_stats{parquet::ZSTD}};
auto is_codec_supported = [&codecs](int8_t codec) {
if (codec == parquet::UNCOMPRESSED) return true;
return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) {
return codec == cstats.compression_type;
}) != codecs.end();
};
CUDF_EXPECTS(std::all_of(chunks.begin(),
chunks.end(),
[&is_codec_supported](auto const& chunk) {
return is_codec_supported(chunk.codec);
}),
"Unsupported compression type");
for (auto& codec : codecs) {
for_each_codec_page(codec.compression_type, [&](size_t page) {
auto page_uncomp_size = pages[page].uncompressed_page_size;
total_decomp_size += page_uncomp_size;
codec.total_decomp_size += page_uncomp_size;
codec.max_decompressed_size = ::max(codec.max_decompressed_size, page_uncomp_size);
codec.num_pages++;
num_comp_pages++;
});
if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream);
}
}
// Dispatch batches of pages to decompress for each codec.
// Buffer needs to be padded, required by `gpuDecodePageData`.
rmm::device_buffer decomp_pages(
cudf::util::round_up_safe(total_decomp_size, BUFFER_PADDING_MULTIPLE), stream);
std::vector<device_span<uint8_t const>> comp_in;
comp_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> comp_out;
comp_out.reserve(num_comp_pages);
// vectors to save v2 def and rep level data, if any
std::vector<device_span<uint8_t const>> copy_in;
copy_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> copy_out;
copy_out.reserve(num_comp_pages);
rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream);
thrust::fill(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
compression_result{0, compression_status::FAILURE});
size_t decomp_offset = 0;
int32_t start_pos = 0;
for (auto const& codec : codecs) {
if (codec.num_pages == 0) { continue; }
for_each_codec_page(codec.compression_type, [&](size_t page_idx) {
auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset;
auto& page = pages[page_idx];
// offset will only be non-zero for V2 pages
auto const offset =
page.lvl_bytes[gpu::level_type::DEFINITION] + page.lvl_bytes[gpu::level_type::REPETITION];
// for V2 need to copy def and rep level info into place, and then offset the
// input and output buffers. otherwise we'd have to keep both the compressed
// and decompressed data.
if (offset != 0) {
copy_in.emplace_back(page.page_data, offset);
copy_out.emplace_back(dst_base, offset);
}
comp_in.emplace_back(page.page_data + offset,
static_cast<size_t>(page.compressed_page_size - offset));
comp_out.emplace_back(dst_base + offset,
static_cast<size_t>(page.uncompressed_page_size - offset));
page.page_data = dst_base;
decomp_offset += page.uncompressed_page_size;
});
host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos,
codec.num_pages};
auto const d_comp_in = cudf::detail::make_device_uvector_async(
comp_in_view, stream, rmm::mr::get_current_device_resource());
host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos,
codec.num_pages);
auto const d_comp_out = cudf::detail::make_device_uvector_async(
comp_out_view, stream, rmm::mr::get_current_device_resource());
device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages);
switch (codec.compression_type) {
case parquet::GZIP:
gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream);
break;
case parquet::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
} else {
gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream);
}
break;
case parquet::ZSTD:
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
break;
case parquet::BROTLI:
gpu_debrotli(d_comp_in,
d_comp_out,
d_comp_res_view,
debrotli_scratch.data(),
debrotli_scratch.size(),
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
start_pos += codec.num_pages;
}
CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(auto const& res) {
return res.status == compression_status::SUCCESS;
}),
"Error during decompression");
// now copy the uncompressed V2 def and rep level data
if (not copy_in.empty()) {
auto const d_copy_in = cudf::detail::make_device_uvector_async(
copy_in, stream, rmm::mr::get_current_device_resource());
auto const d_copy_out = cudf::detail::make_device_uvector_async(
copy_out, stream, rmm::mr::get_current_device_resource());
gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream);
stream.synchronize();
}
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
pages.host_to_device_async(stream);
return decomp_pages;
}
} // namespace
void reader::impl::allocate_nesting_info()
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto& page_nesting_info = _file_itm_data.page_nesting_info;
auto& page_nesting_decode_info = _file_itm_data.page_nesting_decode_info;
// compute total # of page_nesting infos needed and allocate space. doing this in one
// buffer to keep it to a single gpu allocation
size_t const total_page_nesting_infos = std::accumulate(
chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) {
// the schema of the input column
auto const& schema = _metadata->get_schema(chunk.src_col_schema);
auto const per_page_nesting_info_size = max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema));
return total + (per_page_nesting_info_size * chunk.num_data_pages);
});
page_nesting_info =
cudf::detail::hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream};
page_nesting_decode_info =
cudf::detail::hostdevice_vector<gpu::PageNestingDecodeInfo>{total_page_nesting_infos, _stream};
// update pointers in the PageInfos
int target_page_index = 0;
int src_info_index = 0;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
auto& schema = _metadata->get_schema(src_col_schema);
auto const per_page_nesting_info_size = ::max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema));
// skip my dict pages
target_page_index += chunks[idx].num_dict_pages;
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].nesting_decode =
page_nesting_decode_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].nesting_info_size = per_page_nesting_info_size;
pages[target_page_index + p_idx].num_output_nesting_levels =
_metadata->get_output_nesting_depth(src_col_schema);
src_info_index += per_page_nesting_info_size;
}
target_page_index += chunks[idx].num_data_pages;
}
// fill in
int nesting_info_index = 0;
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
// schema of the input column
auto& schema = _metadata->get_schema(src_col_schema);
// real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc)
int max_depth = _metadata->get_output_nesting_depth(src_col_schema);
// # of nesting infos stored per page for this column
auto const per_page_nesting_info_size = ::max(schema.max_definition_level + 1, max_depth);
// if this column has lists, generate depth remapping
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
if (schema.max_repetition_level > 0) {
generate_depth_remappings(depth_remapping, src_col_schema, *_metadata);
}
// fill in host-side nesting info
int schema_idx = src_col_schema;
auto cur_schema = _metadata->get_schema(schema_idx);
int cur_depth = max_depth - 1;
while (schema_idx > 0) {
// stub columns (basically the inner field of a list scheme element) are not real columns.
// we can ignore them for the purposes of output nesting info
if (!cur_schema.is_stub()) {
// initialize each page within the chunk
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
gpu::PageNestingInfo* pni =
&page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
gpu::PageNestingDecodeInfo* nesting_info =
&page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
// if we have lists, set our start and end depth remappings
if (schema.max_repetition_level > 0) {
auto remap = depth_remapping.find(src_col_schema);
CUDF_EXPECTS(remap != depth_remapping.end(),
"Could not find depth remapping for schema");
std::vector<int> const& rep_depth_remap = (remap->second.first);
std::vector<int> const& def_depth_remap = (remap->second.second);
for (size_t m = 0; m < rep_depth_remap.size(); m++) {
nesting_info[m].start_depth = rep_depth_remap[m];
}
for (size_t m = 0; m < def_depth_remap.size(); m++) {
nesting_info[m].end_depth = def_depth_remap[m];
}
}
// values indexed by output column index
nesting_info[cur_depth].max_def_level = cur_schema.max_definition_level;
pni[cur_depth].size = 0;
pni[cur_depth].type =
to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id());
pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL;
}
// move up the hierarchy
cur_depth--;
}
// next schema
schema_idx = cur_schema.parent_idx;
cur_schema = _metadata->get_schema(schema_idx);
}
nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages);
}
// copy nesting info to the device
page_nesting_info.host_to_device_async(_stream);
page_nesting_decode_info.host_to_device_async(_stream);
}
void reader::impl::allocate_level_decode_space()
{
auto& pages = _file_itm_data.pages_info;
// TODO: this could be made smaller if we ignored dictionary pages and pages with no
// repetition data.
size_t const per_page_decode_buf_size =
LEVEL_DECODE_BUF_SIZE * 2 * _file_itm_data.level_type_size;
auto const decode_buf_size = per_page_decode_buf_size * pages.size();
_file_itm_data.level_decode_data =
rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource());
// distribute the buffers
uint8_t* buf = static_cast<uint8_t*>(_file_itm_data.level_decode_data.data());
for (size_t idx = 0; idx < pages.size(); idx++) {
auto& p = pages[idx];
p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf;
buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size);
p.lvl_decode_buf[gpu::level_type::REPETITION] = buf;
buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size);
}
}
std::pair<bool, std::vector<std::future<void>>> reader::impl::create_and_read_column_chunks(
cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows)
{
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& chunks = _file_itm_data.chunks;
// Descriptors for all the chunks that make up the selected columns
auto const num_input_columns = _input_columns.size();
auto const num_chunks = row_groups_info.size() * num_input_columns;
chunks = cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream);
// Association between each column chunk and its source
std::vector<size_type> chunk_source_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
std::vector<std::future<void>> read_rowgroup_tasks;
for (auto const& rg : row_groups_info) {
auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index);
auto const row_group_start = rg.start_row;
auto const row_group_source = rg.source_index;
auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
// generate ColumnChunkDesc objects for everything to be decoded (all input columns)
for (size_t i = 0; i < num_input_columns; ++i) {
auto col = _input_columns[i];
// look up metadata
auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx);
auto& schema = _metadata->get_schema(col.schema_idx);
auto [type_width, clock_rate, converted_type] =
conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()),
_timestamp_type.id(),
schema.type,
schema.converted_type,
schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
schema.type,
type_width,
row_group_start,
row_group_rows,
schema.max_definition_level,
schema.max_repetition_level,
_metadata->get_output_nesting_depth(col.schema_idx),
required_bits(schema.max_definition_level),
required_bits(schema.max_repetition_level),
col_meta.codec,
converted_type,
schema.logical_type,
schema.decimal_precision,
clock_rate,
i,
col.schema_idx));
// Map each column chunk to its column index and its source index
chunk_source_map[chunks.size() - 1] = row_group_source;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group_rows;
}
// Read compressed chunk data to device memory
read_rowgroup_tasks.push_back(read_column_chunks_async(_sources,
raw_page_data,
chunks,
0,
chunks.size(),
column_chunk_offsets,
chunk_source_map,
_stream));
CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read.");
return {total_decompressed_size > 0, std::move(read_rowgroup_tasks)};
}
void reader::impl::load_and_decompress_data(
cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows)
{
// This function should never be called if `num_rows == 0`.
CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero.");
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& decomp_page_data = _file_itm_data.decomp_page_data;
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto const [has_compressed_data, read_rowgroup_tasks] =
create_and_read_column_chunks(row_groups_info, num_rows);
for (auto& task : read_rowgroup_tasks) {
task.wait();
}
// Process dataset chunk pages into output columns
auto const total_pages = count_page_headers(chunks, _stream);
pages = cudf::detail::hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream);
if (total_pages > 0) {
// decoding of column/page information
_file_itm_data.level_type_size = decode_page_headers(chunks, pages, _stream);
if (has_compressed_data) {
decomp_page_data = decompress_page_data(chunks, pages, _stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); }
}
}
// build output column info
// walk the schema, building out_buffers that mirror what our final cudf columns will look
// like. important : there is not necessarily a 1:1 mapping between input columns and output
// columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct
// columns. The "structiness" is simply implied by the schema. For example, this schema:
// required group field_id=1 name {
// required binary field_id=2 firstname (String);
// required binary field_id=3 middlename (String);
// required binary field_id=4 lastname (String);
// }
// will only contain 3 columns of data (firstname, middlename, lastname). But of course
// "name" is a struct column that we want to return, so we have to make sure that we
// create it ourselves.
// std::vector<output_column_info> output_info = build_output_column_info();
// the following two allocate functions modify the page data
pages.device_to_host_sync(_stream);
{
// nesting information (sizes, etc) stored -per page-
// note : even for flat schemas, we allocate 1 level of "nesting" info
allocate_nesting_info();
// level decode space
allocate_level_decode_space();
}
pages.host_to_device_async(_stream);
}
}
namespace {
struct cumulative_row_info {
size_t row_count; // cumulative row count
size_t size_bytes; // cumulative size in bytes
int key; // schema index
};
#if defined(PREPROCESS_DEBUG)
void print_pages(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view _stream)
{
pages.device_to_host_sync(_stream);
for (size_t idx = 0; idx < pages.size(); idx++) {
auto const& p = pages[idx];
// skip dictionary pages
if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; }
printf(
"P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), "
"str_bytes(%d)\n",
idx,
p.src_col_schema,
p.chunk_row,
p.num_rows,
p.skipped_values,
p.skipped_leaf_values,
p.str_bytes);
}
}
void print_cumulative_page_info(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::device_uvector<int32_t> const& page_index,
rmm::device_uvector<cumulative_row_info> const& c_info,
rmm::cuda_stream_view stream)
{
pages.device_to_host_sync(stream);
printf("------------\nCumulative sizes by page\n");
std::vector<int> schemas(pages.size());
std::vector<int> h_page_index(pages.size());
CUDF_CUDA_TRY(hipMemcpy(
h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), hipMemcpyDefault));
std::vector<cumulative_row_info> h_cinfo(pages.size());
CUDF_CUDA_TRY(hipMemcpy(
h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), hipMemcpyDefault));
auto schema_iter = cudf::detail::make_counting_transform_iterator(
0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; });
thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin());
auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end());
schemas.resize(last - schemas.begin());
printf("Num schemas: %lu\n", schemas.size());
for (size_t idx = 0; idx < schemas.size(); idx++) {
printf("Schema %d\n", schemas[idx]);
for (size_t pidx = 0; pidx < pages.size(); pidx++) {
auto const& page = pages[h_page_index[pidx]];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) {
continue;
}
printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes);
}
}
}
void print_cumulative_row_info(
host_span<cumulative_row_info const> sizes,
std::string const& label,
std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt)
{
if (splits.has_value()) {
printf("------------\nSplits\n");
for (size_t idx = 0; idx < splits->size(); idx++) {
printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows);
}
}
printf("------------\nCumulative sizes %s\n", label.c_str());
for (size_t idx = 0; idx < sizes.size(); idx++) {
printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key);
if (splits.has_value()) {
// if we have a split at this row count and this is the last instance of this row count
auto start = thrust::make_transform_iterator(
splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; });
auto end = start + splits->size();
auto split = std::find(start, end, sizes[idx].row_count);
auto const split_index = [&]() -> int {
if (split != end &&
((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) {
return static_cast<int>(std::distance(start, split));
}
return idx == 0 ? 0 : -1;
}();
if (split_index >= 0) {
printf(" <-- split {%lu, %lu}",
splits.value()[split_index].skip_rows,
splits.value()[split_index].num_rows);
}
}
printf("\n");
}
}
#endif // PREPROCESS_DEBUG
/**
* @brief Functor which reduces two cumulative_row_info structs of the same key.
*/
struct cumulative_row_sum {
cumulative_row_info operator()
__device__(cumulative_row_info const& a, cumulative_row_info const& b) const
{
return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key};
}
};
/**
* @brief Functor which computes the total data size for a given type of cudf column.
*
* In the case of strings, the return size does not include the chars themselves. That
* information is tracked separately (see PageInfo::str_bytes).
*/
struct row_size_functor {
__device__ size_t validity_size(size_t num_rows, bool nullable)
{
return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0;
}
template <typename T>
__device__ size_t operator()(size_t num_rows, bool nullable)
{
auto const element_size = sizeof(device_storage_type_t<T>);
return (element_size * num_rows) + validity_size(num_rows, nullable);
}
};
template <>
__device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable)
{
auto const offset_size = sizeof(size_type);
// NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset
// for the entire column, whereas this is adding an extra offset per page. So we will get a
// small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better
// to overestimate size somewhat than to underestimate it and potentially generate chunks
// that are too large.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable)
{
return validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable)
{
// only returns the size of offsets and validity. the size of the actual string chars
// is tracked separately.
auto const offset_size = sizeof(size_type);
// see note about offsets in the list_view template.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
/**
* @brief Functor which computes the total output cudf data size for all of
* the data in this page.
*
* Sums across all nesting levels.
*/
struct get_cumulative_row_info {
gpu::PageInfo const* const pages;
__device__ cumulative_row_info operator()(size_type index)
{
auto const& page = pages[index];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return cumulative_row_info{0, 0, page.src_col_schema};
}
// total nested size, not counting string data
auto iter =
cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) {
auto const& pni = page.nesting[i];
return cudf::type_dispatcher(
data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable);
});
size_t const row_count = static_cast<size_t>(page.nesting[0].size);
return {
row_count,
thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes,
page.src_col_schema};
}
};
/**
* @brief Functor which computes the effective size of all input columns by page.
*
* For a given row, we want to find the cost of all pages for all columns involved
* in loading up to that row. The complication here is that not all pages are the
* same size between columns. Example:
*
* page row counts
* Column A: 0 <----> 100 <----> 200
* Column B: 0 <---------------> 200 <--------> 400
|
* if we decide to split at row 100, we don't really know the actual amount of bytes in column B
* at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
* page. Essentially, a conservative over-estimate of the real size.
*/
struct row_total_size {
cumulative_row_info const* c_info;
size_type const* key_offsets;
size_t num_keys;
__device__ cumulative_row_info operator()(cumulative_row_info const& i)
{
// sum sizes for each input column at this row
size_t sum = 0;
for (int idx = 0; idx < num_keys; idx++) {
auto const start = key_offsets[idx];
auto const end = key_offsets[idx + 1];
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&] __device__(size_type i) { return c_info[i].row_count; });
auto const page_index =
thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter;
sum += c_info[page_index].size_bytes;
}
return {i.row_count, sum, i.key};
}
};
/**
* @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read
* limit, determine the set of splits.
*
* @param sizes Vector of cumulative {row_count, byte_size} pairs
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
*/
std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes,
size_t num_rows,
size_t chunk_read_limit)
{
// now we have an array of {row_count, real output bytes}. just walk through it and generate
// splits.
// TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch
// sizes are reasonably large, this shouldn't iterate too many times
std::vector<gpu::chunk_read_info> splits;
{
size_t cur_pos = 0;
size_t cur_cumulative_size = 0;
size_t cur_row_count = 0;
auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) {
return i.size_bytes - cur_cumulative_size;
});
auto end = start + sizes.size();
while (cur_row_count < num_rows) {
int64_t split_pos =
thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start;
// if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back
// one.
if (static_cast<size_t>(split_pos) >= sizes.size() ||
(sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) {
split_pos--;
}
// best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in
// a loop because all of the cumulative sizes for all the pages are sorted into one big list.
// so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in
// the list twice. so we have to iterate until we skip past all of them. The idea is that we
// either do this, or we have to call unique() on the input first.
while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) &&
(split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) {
split_pos++;
}
auto const start_row = cur_row_count;
cur_row_count = sizes[split_pos].row_count;
splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row});
cur_pos = split_pos;
cur_cumulative_size = sizes[split_pos].size_bytes;
}
}
// print_cumulative_row_info(sizes, "adjusted", splits);
return splits;
}
/**
* @brief Given a set of pages that have had their sizes computed by nesting level and
* a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing
* a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes.
*
* @param pages All pages in the file
* @param id Additional intermediate information required to process the pages
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
* @param stream CUDA stream to use
*/
std::vector<gpu::chunk_read_info> compute_splits(
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
gpu::chunk_intermediate_data const& id,
size_t num_rows,
size_t chunk_read_limit,
rmm::cuda_stream_view stream)
{
auto const& page_keys = id.page_keys;
auto const& page_index = id.page_index;
// generate cumulative row counts and sizes
rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream);
// convert PageInfo to cumulative_row_info
auto page_input = thrust::make_transform_iterator(page_index.begin(),
get_cumulative_row_info{pages.device_ptr()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_input,
c_info.begin(),
thrust::equal_to{},
cumulative_row_sum{});
// print_cumulative_page_info(pages, page_index, c_info, stream);
// sort by row count
rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream};
thrust::sort(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
[] __device__(cumulative_row_info const& a, cumulative_row_info const& b) {
return a.row_count < b.row_count;
});
// std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size());
// CUDF_CUDA_TRY(hipMemcpy(h_c_info_sorted.data(),
// c_info_sorted.data(),
// sizeof(cumulative_row_info) * c_info_sorted.size(),
// hipMemcpyDefault));
// print_cumulative_row_info(h_c_info_sorted, "raw");
// generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per
// key
rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream);
auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
key_offsets.begin())
.second;
size_t const num_unique_keys = key_offsets_end - key_offsets.begin();
thrust::exclusive_scan(
rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin());
// adjust the cumulative info such that for each row count, the size includes any pages that span
// that row count. this is so that if we have this case:
// page row counts
// Column A: 0 <----> 100 <----> 200
// Column B: 0 <---------------> 200 <--------> 400
// |
// if we decide to split at row 100, we don't really know the actual amount of bytes in column B
// at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
// page.
//
rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream);
thrust::transform(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
aggregated_info.begin(),
row_total_size{c_info.data(), key_offsets.data(), num_unique_keys});
// bring back to the cpu
std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size());
CUDF_CUDA_TRY(hipMemcpyAsync(h_aggregated_info.data(),
aggregated_info.data(),
sizeof(cumulative_row_info) * c_info.size(),
hipMemcpyDefault,
stream.value()));
stream.synchronize();
return find_splits(h_aggregated_info, num_rows, chunk_read_limit);
}
struct get_page_chunk_idx {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; }
};
struct get_page_num_rows {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; }
};
struct get_page_column_index {
gpu::ColumnChunkDesc const* chunks;
__device__ size_type operator()(gpu::PageInfo const& page)
{
return chunks[page.chunk_idx].src_col_index;
}
};
struct input_col_info {
int const schema_idx;
size_type const nesting_depth;
};
/**
* @brief Converts a 1-dimensional index into page, depth and column indices used in
* allocate_columns to compute columns sizes.
*
* The input index will iterate through pages, nesting depth and column indices in that order.
*/
struct reduction_indices {
size_t const page_idx;
size_type const depth_idx;
size_type const col_idx;
__device__ reduction_indices(size_t index_, size_type max_depth_, size_t num_pages_)
: page_idx(index_ % num_pages_),
depth_idx((index_ / num_pages_) % max_depth_),
col_idx(index_ / (max_depth_ * num_pages_))
{
}
};
/**
* @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema.
*/
struct get_page_nesting_size {
input_col_info const* const input_cols;
size_type const max_depth;
size_t const num_pages;
gpu::PageInfo const* const pages;
int const* page_indices;
__device__ size_type operator()(size_t index) const
{
auto const indices = reduction_indices{index, max_depth, num_pages};
auto const& page = pages[page_indices[indices.page_idx]];
if (page.src_col_schema != input_cols[indices.col_idx].schema_idx ||
page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY ||
indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) {
return 0;
}
return page.nesting[indices.depth_idx].batch_size;
}
};
struct get_reduction_key {
size_t const num_pages;
__device__ size_t operator()(size_t index) const { return index / num_pages; }
};
/**
* @brief Writes to the chunk_row field of the PageInfo struct.
*/
struct chunk_row_output_iter {
gpu::PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
};
/**
* @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema.
*/
struct start_offset_output_iterator {
gpu::PageInfo const* pages;
int const* page_indices;
size_t cur_index;
input_col_info const* input_cols;
size_type max_depth;
size_t num_pages;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
constexpr void operator=(start_offset_output_iterator const& other)
{
pages = other.pages;
page_indices = other.page_indices;
cur_index = other.cur_index;
input_cols = other.input_cols;
max_depth = other.max_depth;
num_pages = other.num_pages;
}
constexpr start_offset_output_iterator operator+(size_t i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, input_cols, max_depth, num_pages};
}
constexpr void operator++() { cur_index++; }
__device__ reference operator[](size_t i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(size_t index)
{
auto const indices = reduction_indices{index, max_depth, num_pages};
gpu::PageInfo const& p = pages[page_indices[indices.page_idx]];
if (p.src_col_schema != input_cols[indices.col_idx].schema_idx ||
p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY ||
indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) {
return empty;
}
return p.nesting_decode[indices.depth_idx].page_start_value;
}
};
struct flat_column_num_rows {
gpu::PageInfo const* pages;
gpu::ColumnChunkDesc const* chunks;
__device__ size_type operator()(size_type pindex) const
{
gpu::PageInfo const& page = pages[pindex];
// ignore dictionary pages and pages belonging to any column containing repetition (lists)
if ((page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) ||
(chunks[page.chunk_idx].max_level[gpu::level_type::REPETITION] > 0)) {
return 0;
}
return page.num_rows;
}
};
struct row_counts_nonzero {
__device__ bool operator()(size_type count) const { return count > 0; }
};
struct row_counts_different {
size_type const expected;
__device__ bool operator()(size_type count) const { return (count != 0) && (count != expected); }
};
/**
* @brief Detect malformed parquet input data.
*
* We have seen cases where parquet files can be oddly malformed. This function specifically
* detects one case in particular:
*
* - When you have a file containing N rows
* - For some reason, the sum total of the number of rows over all pages for a given column
* is != N
*
* @param pages All pages to be decoded
* @param chunks Chunk data
* @param page_keys Keys (schema id) associated with each page, sorted by column
* @param page_index Page indices for iteration, sorted by column
* @param expected_row_count Expected row count, if applicable
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void detect_malformed_pages(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc> const& chunks,
device_span<int const> page_keys,
device_span<int const> page_index,
std::optional<size_t> expected_row_count,
rmm::cuda_stream_view stream)
{
// sum row counts for all non-dictionary, non-list columns. other columns will be indicated as 0
rmm::device_uvector<size_type> row_counts(pages.size(),
stream); // worst case: num keys == num pages
auto const size_iter = thrust::make_transform_iterator(
page_index.begin(), flat_column_num_rows{pages.device_ptr(), chunks.device_ptr()});
auto const row_counts_begin = row_counts.begin();
auto const row_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
size_iter,
thrust::make_discard_iterator(),
row_counts_begin)
.second;
// make sure all non-zero row counts are the same
rmm::device_uvector<size_type> compacted_row_counts(pages.size(), stream);
auto const compacted_row_counts_begin = compacted_row_counts.begin();
auto const compacted_row_counts_end = thrust::copy_if(rmm::exec_policy(stream),
row_counts_begin,
row_counts_end,
compacted_row_counts_begin,
row_counts_nonzero{});
if (compacted_row_counts_end != compacted_row_counts_begin) {
size_t const found_row_count = static_cast<size_t>(compacted_row_counts.element(0, stream));
// if we somehow don't match the expected row count from the row groups themselves
if (expected_row_count.has_value()) {
CUDF_EXPECTS(expected_row_count.value() == found_row_count,
"Encountered malformed parquet page data (unexpected row count in page data)");
}
// all non-zero row counts must be the same
auto const chk =
thrust::count_if(rmm::exec_policy(stream),
compacted_row_counts_begin,
compacted_row_counts_end,
row_counts_different{static_cast<size_type>(found_row_count)});
CUDF_EXPECTS(chk == 0,
"Encountered malformed parquet page data (row count mismatch in page data)");
}
}
struct page_to_string_size {
gpu::PageInfo* pages;
gpu::ColumnChunkDesc const* chunks;
__device__ size_t operator()(size_type page_idx) const
{
auto const page = pages[page_idx];
auto const chunk = chunks[page.chunk_idx];
if (not is_string_col(chunk) || (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) != 0) {
return 0;
}
return pages[page_idx].str_bytes;
}
};
struct page_offset_output_iter {
gpu::PageInfo* p;
size_type const* index;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ page_offset_output_iter operator+(int i)
{
return page_offset_output_iter{p, index + i};
}
__host__ __device__ void operator++() { index++; }
__device__ reference operator[](int i) { return p[index[i]].str_offset; }
__device__ reference operator*() { return p[*index].str_offset; }
};
} // anonymous namespace
void reader::impl::preprocess_pages(size_t skip_rows,
size_t num_rows,
bool uses_custom_row_bounds,
size_t chunk_read_limit)
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// compute page ordering.
//
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To process pages by key (exclusive_scan_by_key, reduce_by_key, etc), the ordering we actually
// want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
rmm::device_uvector<int> page_keys(pages.size(), _stream);
rmm::device_uvector<int> page_index(pages.size(), _stream);
{
thrust::transform(rmm::exec_policy(_stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
get_page_column_index{chunks.device_ptr()});
thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// detect malformed columns.
// - we have seen some cases in the wild where we have a row group containing N
// rows, but the total number of rows in the pages for column X is != N. while it
// is possible to load this by just capping the number of rows read, we cannot tell
// which rows are invalid so we may be returning bad data. in addition, this mismatch
// confuses the chunked reader
detect_malformed_pages(pages,
chunks,
page_keys,
page_index,
uses_custom_row_bounds ? std::nullopt : std::make_optional(num_rows),
_stream);
// iterate over all input columns and determine if they contain lists so we can further
// preprocess them.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
break;
}
}
if (has_lists) { break; }
}
// generate string dict indices if necessary
{
auto is_dict_chunk = [](gpu::ColumnChunkDesc const& chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_input_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
_chunk_itm_data.str_dict_index =
cudf::detail::make_zeroed_device_uvector_async<string_index_pair>(
total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource());
// Update chunks with pointers to string dict indices
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
input_column_info const& input_col = _input_columns[chunks[c].src_col_index];
CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema,
"Column/page schema index mismatch");
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs;
str_ofs += pages[page_count].num_input_values;
}
// column_data_base will always point to leaf data, even for nested types.
page_count += chunks[c].max_num_pages;
}
if (total_str_dict_indexes > 0) {
chunks.host_to_device_async(_stream);
gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream);
}
}
// intermediate data we will need for further chunked reads
if (has_lists || chunk_read_limit > 0) {
// computes:
// PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into
// account), not just the number of values. PageNestingInfo::size for each level of nesting, for
// each page.
//
// we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen
// if:
// - user has passed custom row bounds
// - we will be doing a chunked read
gpu::ComputePageSizes(pages,
chunks,
0, // 0-max size_t. process all possible rows
std::numeric_limits<size_t>::max(),
true, // compute num_rows
chunk_read_limit > 0, // compute string sizes
_file_itm_data.level_type_size,
_stream);
// computes:
// PageInfo::chunk_row (the absolute start row index) for all pages
// Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already
// been computed during header decoding. the overall amount of work here is very small though.
auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{});
auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{});
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// retrieve pages back
pages.device_to_host_sync(_stream);
// print_pages(pages, _stream);
}
// preserve page ordering data for string decoder
_chunk_itm_data.page_keys = std::move(page_keys);
_chunk_itm_data.page_index = std::move(page_index);
// compute splits if necessary. otherwise return a single split representing
// the whole file.
_chunk_read_info = chunk_read_limit > 0
? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream)
: std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}};
}
void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds)
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// Should not reach here if there is no page data.
CUDF_EXPECTS(pages.size() > 0, "There is no page to parse");
// computes:
// PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into
// account. PageInfo::skipped_values, which tells us where to start decoding in the input to
// respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds
// is set (if the user has specified artificial bounds).
if (uses_custom_row_bounds) {
gpu::ComputePageSizes(pages,
chunks,
skip_rows,
num_rows,
false, // num_rows is already computed
false, // no need to compute string sizes
_file_itm_data.level_type_size,
_stream);
// print_pages(pages, _stream);
}
// iterate over all input columns and allocate any associated output
// buffers if they are not part of a list hierarchy. mark down
// if we have any list columns that need further processing.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
}
// if we haven't already processed this column because it is part of a struct hierarchy
else if (out_buf.size == 0) {
// add 1 for the offset if this is a list column
out_buf.create(
out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows,
_stream,
_mr);
}
}
}
// compute output column sizes by examining the pages of the -input- columns
if (has_lists) {
auto& page_index = _chunk_itm_data.page_index;
std::vector<input_col_info> h_cols_info;
h_cols_info.reserve(_input_columns.size());
std::transform(_input_columns.cbegin(),
_input_columns.cend(),
std::back_inserter(h_cols_info),
[](auto& col) -> input_col_info {
return {col.schema_idx, static_cast<size_type>(col.nesting_depth())};
});
auto const max_depth =
(*std::max_element(h_cols_info.cbegin(),
h_cols_info.cend(),
[](auto& l, auto& r) { return l.nesting_depth < r.nesting_depth; }))
.nesting_depth;
auto const d_cols_info = cudf::detail::make_device_uvector_async(
h_cols_info, _stream, rmm::mr::get_current_device_resource());
auto const num_keys = _input_columns.size() * max_depth * pages.size();
// size iterator. indexes pages by sorted order
rmm::device_uvector<size_type> size_input{num_keys, _stream};
thrust::transform(
rmm::exec_policy(_stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys),
size_input.begin(),
get_page_nesting_size{
d_cols_info.data(), max_depth, pages.size(), pages.device_ptr(), page_index.begin()});
auto const reduction_keys =
cudf::detail::make_counting_transform_iterator(0, get_reduction_key{pages.size()});
cudf::detail::hostdevice_vector<size_t> sizes{_input_columns.size() * max_depth, _stream};
// find the size of each column
thrust::reduce_by_key(rmm::exec_policy(_stream),
reduction_keys,
reduction_keys + num_keys,
size_input.cbegin(),
thrust::make_discard_iterator(),
sizes.d_begin());
// for nested hierarchies, compute per-page start offset
thrust::exclusive_scan_by_key(
rmm::exec_policy(_stream),
reduction_keys,
reduction_keys + num_keys,
size_input.cbegin(),
start_offset_output_iterator{
pages.device_ptr(), page_index.begin(), 0, d_cols_info.data(), max_depth, pages.size()});
sizes.device_to_host_sync(_stream);
for (size_type idx = 0; idx < static_cast<size_type>(_input_columns.size()); idx++) {
auto const& input_col = _input_columns[idx];
auto* cols = &_output_buffers;
for (size_type l_idx = 0; l_idx < static_cast<size_type>(input_col.nesting_depth());
l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this buffer is part of a list hierarchy, we need to determine it's
// final size and allocate it here.
//
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) {
auto size = sizes[(idx * max_depth) + l_idx];
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, _stream, _mr);
}
}
}
}
}
std::vector<size_t> reader::impl::calculate_page_string_offsets()
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto const& page_keys = _chunk_itm_data.page_keys;
auto const& page_index = _chunk_itm_data.page_index;
std::vector<size_t> col_sizes(_input_columns.size(), 0L);
rmm::device_uvector<size_t> d_col_sizes(col_sizes.size(), _stream);
// use page_index to fetch page string sizes in the proper order
auto val_iter = thrust::make_transform_iterator(
page_index.begin(), page_to_string_size{pages.device_ptr(), chunks.device_ptr()});
// do scan by key to calculate string offsets for each page
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
val_iter,
page_offset_output_iter{pages.device_ptr(), page_index.data()});
// now sum up page sizes
rmm::device_uvector<int> reduce_keys(col_sizes.size(), _stream);
thrust::reduce_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
val_iter,
reduce_keys.begin(),
d_col_sizes.begin());
hipMemcpyAsync(col_sizes.data(),
d_col_sizes.data(),
sizeof(size_t) * col_sizes.size(),
hipMemcpyDeviceToHost,
_stream);
_stream.synchronize();
return col_sizes;
}
} // namespace cudf::io::detail::parquet
| 7d26bb273e4244f7593db1175c3227b1121c4ee0.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "reader_impl.hpp"
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <numeric>
namespace cudf::io::detail::parquet {
namespace {
/**
* @brief Generate depth remappings for repetition and definition levels.
*
* When dealing with columns that contain lists, we must examine incoming
* repetition and definition level pairs to determine what range of output nesting
* is indicated when adding new values. This function generates the mappings of
* the R/D levels to those start/end bounds
*
* @param remap Maps column schema index to the R/D remapping vectors for that column
* @param src_col_schema The column schema to generate the new mapping for
* @param md File metadata information
*/
void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap,
int src_col_schema,
aggregate_reader_metadata const& md)
{
// already generated for this level
if (remap.find(src_col_schema) != remap.end()) { return; }
auto schema = md.get_schema(src_col_schema);
int max_depth = md.get_output_nesting_depth(src_col_schema);
CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(),
"Attempting to remap a schema more than once");
auto inserted =
remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}});
auto& depth_remap = inserted.first->second;
std::vector<int>& rep_depth_remap = (depth_remap.first);
rep_depth_remap.resize(schema.max_repetition_level + 1);
std::vector<int>& def_depth_remap = (depth_remap.second);
def_depth_remap.resize(schema.max_definition_level + 1);
// the key:
// for incoming level values R/D
// add values starting at the shallowest nesting level X has repetition level R
// until you reach the deepest nesting level Y that corresponds to the repetition level R1
// held by the nesting level that has definition level D
//
// Example: a 3 level struct with a list at the bottom
//
// R / D Depth
// level0 0 / 1 0
// level1 0 / 2 1
// level2 0 / 3 2
// list 0 / 3 3
// element 1 / 4 4
//
// incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0)
// incoming R/D : 0, 1 -> add values from depth 0 to 3
// incoming R/D : 0, 2 -> add values from depth 0 to 3
// incoming R/D : 1, 4 -> add values from depth 4 to 4
//
// Note : the -validity- of values is simply checked by comparing the incoming D value against the
// D value of the given nesting level (incoming D >= the D for the nesting level == valid,
// otherwise NULL). The tricky part is determining what nesting levels to add values at.
//
// For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting
// depth.
//
// compute "X" from above
for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) {
auto find_shallowest = [&](int r) {
int shallowest = -1;
int cur_depth = max_depth - 1;
int schema_idx = src_col_schema;
while (schema_idx > 0) {
auto cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r) {
// if this is a repeated field, map it one level deeper
shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth;
}
// if it's one-level encoding list
else if (cur_schema.is_one_level_list(md.get_schema(cur_schema.parent_idx))) {
shallowest = cur_depth - 1;
}
if (!cur_schema.is_stub()) { cur_depth--; }
schema_idx = cur_schema.parent_idx;
}
return shallowest;
};
rep_depth_remap[s_idx] = find_shallowest(s_idx);
}
// compute "Y" from above
for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) {
auto find_deepest = [&](int d) {
SchemaElement prev_schema;
int schema_idx = src_col_schema;
int r1 = 0;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_definition_level == d) {
// if this is a repeated field, map it one level deeper
r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level
: cur_schema.max_repetition_level;
break;
}
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
// we now know R1 from above. return the deepest nesting level that has the
// same repetition level
schema_idx = src_col_schema;
int depth = max_depth - 1;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r1) {
// if this is a repeated field, map it one level deeper
depth = cur_schema.is_stub() ? depth + 1 : depth;
break;
}
if (!cur_schema.is_stub()) { depth--; }
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
return depth;
};
def_depth_remap[s_idx] = find_deepest(s_idx);
}
}
/**
* @brief Return the required number of bits to store a value.
*/
template <typename T = uint8_t>
[[nodiscard]] T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief Converts cuDF units to Parquet units.
*
* @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type.
*/
[[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_chrono(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 &&
not cudf::is_fixed_point(data_type{column_type_id})) {
converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
/**
* @brief Reads compressed page data to device memory.
*
* @param sources Dataset sources
* @param page_data Buffers to hold compressed page data for each chunk
* @param chunks List of column chunk descriptors
* @param begin_chunk Index of first column chunk to read
* @param end_chunk Index after the last column chunk to read
* @param column_chunk_offsets File offset for all chunks
* @param chunk_source_map Association between each column chunk and its source
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return A future object for reading synchronization
*/
[[nodiscard]] std::future<void> read_column_chunks_async(
std::vector<std::unique_ptr<datasource>> const& sources,
std::vector<std::unique_ptr<datasource::buffer>>& page_data,
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
size_t begin_chunk,
size_t end_chunk,
std::vector<size_t> const& column_chunk_offsets,
std::vector<size_type> const& chunk_source_map,
rmm::cuda_stream_view stream)
{
// Transfer chunk data, coalescing adjacent chunks
std::vector<std::future<size_t>> read_tasks;
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
size_t const io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
bool const is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
size_t const next_offset = column_chunk_offsets[next_chunk];
bool const is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed ||
chunk_source_map[chunk] != chunk_source_map[next_chunk]) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto& source = sources[chunk_source_map[chunk]];
if (source->is_device_read_preferred(io_size)) {
// Buffer needs to be padded.
// Required by `gpuDecodePageData`.
auto buffer =
rmm::device_buffer(cudf::util::round_up_safe(io_size, BUFFER_PADDING_MULTIPLE), stream);
auto fut_read_size = source->device_read_async(
io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream);
read_tasks.emplace_back(std::move(fut_read_size));
page_data[chunk] = datasource::buffer::create(std::move(buffer));
} else {
auto const read_buffer = source->host_read(io_offset, io_size);
// Buffer needs to be padded.
// Required by `gpuDecodePageData`.
auto tmp_buffer = rmm::device_buffer(
cudf::util::round_up_safe(read_buffer->size(), BUFFER_PADDING_MULTIPLE), stream);
CUDF_CUDA_TRY(cudaMemcpyAsync(
tmp_buffer.data(), read_buffer->data(), read_buffer->size(), cudaMemcpyDefault, stream));
page_data[chunk] = datasource::buffer::create(std::move(tmp_buffer));
}
auto d_compdata = page_data[chunk]->data();
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
auto sync_fn = [](decltype(read_tasks) read_tasks) {
for (auto& task : read_tasks) {
task.wait();
}
};
return std::async(std::launch::deferred, sync_fn, std::move(read_tasks));
}
/**
* @brief Return the number of total pages from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return The total number of pages
*/
[[nodiscard]] size_t count_page_headers(
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks, rmm::cuda_stream_view stream)
{
size_t total_pages = 0;
chunks.host_to_device_async(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
chunks.device_to_host_sync(stream);
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
// see setupLocalPageInfo() in page_data.cu for supported page encodings
constexpr bool is_supported_encoding(Encoding enc)
{
switch (enc) {
case Encoding::PLAIN:
case Encoding::PLAIN_DICTIONARY:
case Encoding::RLE:
case Encoding::RLE_DICTIONARY:
case Encoding::DELTA_BINARY_PACKED: return true;
default: return false;
}
}
/**
* @brief Decode the page information from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
* @returns The size in bytes of level type data required
*/
int decode_page_headers(cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
// IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages),
// please update preprocess_nested_columns to reflect this.
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
chunks.host_to_device_async(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
// compute max bytes needed for level data
auto level_bit_size =
cudf::detail::make_counting_transform_iterator(0, [chunks = chunks.begin()] __device__(int i) {
auto c = chunks[i];
return static_cast<int>(
max(c.level_bits[gpu::level_type::REPETITION], c.level_bits[gpu::level_type::DEFINITION]));
});
// max level data bit size.
int const max_level_bits = thrust::reduce(rmm::exec_policy(stream),
level_bit_size,
level_bit_size + chunks.size(),
0,
thrust::maximum<int>());
auto const level_type_size = std::max(1, cudf::util::div_rounding_up_safe(max_level_bits, 8));
pages.device_to_host_sync(stream);
// validate page encodings
CUDF_EXPECTS(std::all_of(pages.begin(),
pages.end(),
[](auto const& page) { return is_supported_encoding(page.encoding); }),
"Unsupported page encoding detected");
return level_type_size;
}
/**
* @brief Decompresses the page data, at page granularity.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return Device buffer to decompressed page data
*/
[[nodiscard]] rmm::device_buffer decompress_page_data(
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, std::function<void(size_t)> const& f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_buffer debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
struct codec_stats {
parquet::Compression compression_type = UNCOMPRESSED;
size_t num_pages = 0;
int32_t max_decompressed_size = 0;
size_t total_decomp_size = 0;
};
std::array codecs{codec_stats{parquet::GZIP},
codec_stats{parquet::SNAPPY},
codec_stats{parquet::BROTLI},
codec_stats{parquet::ZSTD}};
auto is_codec_supported = [&codecs](int8_t codec) {
if (codec == parquet::UNCOMPRESSED) return true;
return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) {
return codec == cstats.compression_type;
}) != codecs.end();
};
CUDF_EXPECTS(std::all_of(chunks.begin(),
chunks.end(),
[&is_codec_supported](auto const& chunk) {
return is_codec_supported(chunk.codec);
}),
"Unsupported compression type");
for (auto& codec : codecs) {
for_each_codec_page(codec.compression_type, [&](size_t page) {
auto page_uncomp_size = pages[page].uncompressed_page_size;
total_decomp_size += page_uncomp_size;
codec.total_decomp_size += page_uncomp_size;
codec.max_decompressed_size = std::max(codec.max_decompressed_size, page_uncomp_size);
codec.num_pages++;
num_comp_pages++;
});
if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream);
}
}
// Dispatch batches of pages to decompress for each codec.
// Buffer needs to be padded, required by `gpuDecodePageData`.
rmm::device_buffer decomp_pages(
cudf::util::round_up_safe(total_decomp_size, BUFFER_PADDING_MULTIPLE), stream);
std::vector<device_span<uint8_t const>> comp_in;
comp_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> comp_out;
comp_out.reserve(num_comp_pages);
// vectors to save v2 def and rep level data, if any
std::vector<device_span<uint8_t const>> copy_in;
copy_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> copy_out;
copy_out.reserve(num_comp_pages);
rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream);
thrust::fill(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
compression_result{0, compression_status::FAILURE});
size_t decomp_offset = 0;
int32_t start_pos = 0;
for (auto const& codec : codecs) {
if (codec.num_pages == 0) { continue; }
for_each_codec_page(codec.compression_type, [&](size_t page_idx) {
auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset;
auto& page = pages[page_idx];
// offset will only be non-zero for V2 pages
auto const offset =
page.lvl_bytes[gpu::level_type::DEFINITION] + page.lvl_bytes[gpu::level_type::REPETITION];
// for V2 need to copy def and rep level info into place, and then offset the
// input and output buffers. otherwise we'd have to keep both the compressed
// and decompressed data.
if (offset != 0) {
copy_in.emplace_back(page.page_data, offset);
copy_out.emplace_back(dst_base, offset);
}
comp_in.emplace_back(page.page_data + offset,
static_cast<size_t>(page.compressed_page_size - offset));
comp_out.emplace_back(dst_base + offset,
static_cast<size_t>(page.uncompressed_page_size - offset));
page.page_data = dst_base;
decomp_offset += page.uncompressed_page_size;
});
host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos,
codec.num_pages};
auto const d_comp_in = cudf::detail::make_device_uvector_async(
comp_in_view, stream, rmm::mr::get_current_device_resource());
host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos,
codec.num_pages);
auto const d_comp_out = cudf::detail::make_device_uvector_async(
comp_out_view, stream, rmm::mr::get_current_device_resource());
device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages);
switch (codec.compression_type) {
case parquet::GZIP:
gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream);
break;
case parquet::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
} else {
gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream);
}
break;
case parquet::ZSTD:
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
break;
case parquet::BROTLI:
gpu_debrotli(d_comp_in,
d_comp_out,
d_comp_res_view,
debrotli_scratch.data(),
debrotli_scratch.size(),
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
start_pos += codec.num_pages;
}
CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(auto const& res) {
return res.status == compression_status::SUCCESS;
}),
"Error during decompression");
// now copy the uncompressed V2 def and rep level data
if (not copy_in.empty()) {
auto const d_copy_in = cudf::detail::make_device_uvector_async(
copy_in, stream, rmm::mr::get_current_device_resource());
auto const d_copy_out = cudf::detail::make_device_uvector_async(
copy_out, stream, rmm::mr::get_current_device_resource());
gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream);
stream.synchronize();
}
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
pages.host_to_device_async(stream);
return decomp_pages;
}
} // namespace
void reader::impl::allocate_nesting_info()
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto& page_nesting_info = _file_itm_data.page_nesting_info;
auto& page_nesting_decode_info = _file_itm_data.page_nesting_decode_info;
// compute total # of page_nesting infos needed and allocate space. doing this in one
// buffer to keep it to a single gpu allocation
size_t const total_page_nesting_infos = std::accumulate(
chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) {
// the schema of the input column
auto const& schema = _metadata->get_schema(chunk.src_col_schema);
auto const per_page_nesting_info_size = max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema));
return total + (per_page_nesting_info_size * chunk.num_data_pages);
});
page_nesting_info =
cudf::detail::hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream};
page_nesting_decode_info =
cudf::detail::hostdevice_vector<gpu::PageNestingDecodeInfo>{total_page_nesting_infos, _stream};
// update pointers in the PageInfos
int target_page_index = 0;
int src_info_index = 0;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
auto& schema = _metadata->get_schema(src_col_schema);
auto const per_page_nesting_info_size = std::max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema));
// skip my dict pages
target_page_index += chunks[idx].num_dict_pages;
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].nesting_decode =
page_nesting_decode_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].nesting_info_size = per_page_nesting_info_size;
pages[target_page_index + p_idx].num_output_nesting_levels =
_metadata->get_output_nesting_depth(src_col_schema);
src_info_index += per_page_nesting_info_size;
}
target_page_index += chunks[idx].num_data_pages;
}
// fill in
int nesting_info_index = 0;
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
// schema of the input column
auto& schema = _metadata->get_schema(src_col_schema);
// real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc)
int max_depth = _metadata->get_output_nesting_depth(src_col_schema);
// # of nesting infos stored per page for this column
auto const per_page_nesting_info_size = std::max(schema.max_definition_level + 1, max_depth);
// if this column has lists, generate depth remapping
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
if (schema.max_repetition_level > 0) {
generate_depth_remappings(depth_remapping, src_col_schema, *_metadata);
}
// fill in host-side nesting info
int schema_idx = src_col_schema;
auto cur_schema = _metadata->get_schema(schema_idx);
int cur_depth = max_depth - 1;
while (schema_idx > 0) {
// stub columns (basically the inner field of a list scheme element) are not real columns.
// we can ignore them for the purposes of output nesting info
if (!cur_schema.is_stub()) {
// initialize each page within the chunk
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
gpu::PageNestingInfo* pni =
&page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
gpu::PageNestingDecodeInfo* nesting_info =
&page_nesting_decode_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
// if we have lists, set our start and end depth remappings
if (schema.max_repetition_level > 0) {
auto remap = depth_remapping.find(src_col_schema);
CUDF_EXPECTS(remap != depth_remapping.end(),
"Could not find depth remapping for schema");
std::vector<int> const& rep_depth_remap = (remap->second.first);
std::vector<int> const& def_depth_remap = (remap->second.second);
for (size_t m = 0; m < rep_depth_remap.size(); m++) {
nesting_info[m].start_depth = rep_depth_remap[m];
}
for (size_t m = 0; m < def_depth_remap.size(); m++) {
nesting_info[m].end_depth = def_depth_remap[m];
}
}
// values indexed by output column index
nesting_info[cur_depth].max_def_level = cur_schema.max_definition_level;
pni[cur_depth].size = 0;
pni[cur_depth].type =
to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id());
pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL;
}
// move up the hierarchy
cur_depth--;
}
// next schema
schema_idx = cur_schema.parent_idx;
cur_schema = _metadata->get_schema(schema_idx);
}
nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages);
}
// copy nesting info to the device
page_nesting_info.host_to_device_async(_stream);
page_nesting_decode_info.host_to_device_async(_stream);
}
void reader::impl::allocate_level_decode_space()
{
auto& pages = _file_itm_data.pages_info;
// TODO: this could be made smaller if we ignored dictionary pages and pages with no
// repetition data.
size_t const per_page_decode_buf_size =
LEVEL_DECODE_BUF_SIZE * 2 * _file_itm_data.level_type_size;
auto const decode_buf_size = per_page_decode_buf_size * pages.size();
_file_itm_data.level_decode_data =
rmm::device_buffer(decode_buf_size, _stream, rmm::mr::get_current_device_resource());
// distribute the buffers
uint8_t* buf = static_cast<uint8_t*>(_file_itm_data.level_decode_data.data());
for (size_t idx = 0; idx < pages.size(); idx++) {
auto& p = pages[idx];
p.lvl_decode_buf[gpu::level_type::DEFINITION] = buf;
buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size);
p.lvl_decode_buf[gpu::level_type::REPETITION] = buf;
buf += (LEVEL_DECODE_BUF_SIZE * _file_itm_data.level_type_size);
}
}
std::pair<bool, std::vector<std::future<void>>> reader::impl::create_and_read_column_chunks(
cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows)
{
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& chunks = _file_itm_data.chunks;
// Descriptors for all the chunks that make up the selected columns
auto const num_input_columns = _input_columns.size();
auto const num_chunks = row_groups_info.size() * num_input_columns;
chunks = cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream);
// Association between each column chunk and its source
std::vector<size_type> chunk_source_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
std::vector<std::future<void>> read_rowgroup_tasks;
for (auto const& rg : row_groups_info) {
auto const& row_group = _metadata->get_row_group(rg.index, rg.source_index);
auto const row_group_start = rg.start_row;
auto const row_group_source = rg.source_index;
auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
// generate ColumnChunkDesc objects for everything to be decoded (all input columns)
for (size_t i = 0; i < num_input_columns; ++i) {
auto col = _input_columns[i];
// look up metadata
auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx);
auto& schema = _metadata->get_schema(col.schema_idx);
auto [type_width, clock_rate, converted_type] =
conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()),
_timestamp_type.id(),
schema.type,
schema.converted_type,
schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
schema.type,
type_width,
row_group_start,
row_group_rows,
schema.max_definition_level,
schema.max_repetition_level,
_metadata->get_output_nesting_depth(col.schema_idx),
required_bits(schema.max_definition_level),
required_bits(schema.max_repetition_level),
col_meta.codec,
converted_type,
schema.logical_type,
schema.decimal_precision,
clock_rate,
i,
col.schema_idx));
// Map each column chunk to its column index and its source index
chunk_source_map[chunks.size() - 1] = row_group_source;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
remaining_rows -= row_group_rows;
}
// Read compressed chunk data to device memory
read_rowgroup_tasks.push_back(read_column_chunks_async(_sources,
raw_page_data,
chunks,
0,
chunks.size(),
column_chunk_offsets,
chunk_source_map,
_stream));
CUDF_EXPECTS(remaining_rows == 0, "All rows data must be read.");
return {total_decompressed_size > 0, std::move(read_rowgroup_tasks)};
}
void reader::impl::load_and_decompress_data(
cudf::host_span<row_group_info const> const row_groups_info, size_type num_rows)
{
// This function should never be called if `num_rows == 0`.
CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero.");
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& decomp_page_data = _file_itm_data.decomp_page_data;
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto const [has_compressed_data, read_rowgroup_tasks] =
create_and_read_column_chunks(row_groups_info, num_rows);
for (auto& task : read_rowgroup_tasks) {
task.wait();
}
// Process dataset chunk pages into output columns
auto const total_pages = count_page_headers(chunks, _stream);
pages = cudf::detail::hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream);
if (total_pages > 0) {
// decoding of column/page information
_file_itm_data.level_type_size = decode_page_headers(chunks, pages, _stream);
if (has_compressed_data) {
decomp_page_data = decompress_page_data(chunks, pages, _stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) { raw_page_data[c].reset(); }
}
}
// build output column info
// walk the schema, building out_buffers that mirror what our final cudf columns will look
// like. important : there is not necessarily a 1:1 mapping between input columns and output
// columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct
// columns. The "structiness" is simply implied by the schema. For example, this schema:
// required group field_id=1 name {
// required binary field_id=2 firstname (String);
// required binary field_id=3 middlename (String);
// required binary field_id=4 lastname (String);
// }
// will only contain 3 columns of data (firstname, middlename, lastname). But of course
// "name" is a struct column that we want to return, so we have to make sure that we
// create it ourselves.
// std::vector<output_column_info> output_info = build_output_column_info();
// the following two allocate functions modify the page data
pages.device_to_host_sync(_stream);
{
// nesting information (sizes, etc) stored -per page-
// note : even for flat schemas, we allocate 1 level of "nesting" info
allocate_nesting_info();
// level decode space
allocate_level_decode_space();
}
pages.host_to_device_async(_stream);
}
}
namespace {
struct cumulative_row_info {
size_t row_count; // cumulative row count
size_t size_bytes; // cumulative size in bytes
int key; // schema index
};
#if defined(PREPROCESS_DEBUG)
void print_pages(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view _stream)
{
pages.device_to_host_sync(_stream);
for (size_t idx = 0; idx < pages.size(); idx++) {
auto const& p = pages[idx];
// skip dictionary pages
if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; }
printf(
"P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d), "
"str_bytes(%d)\n",
idx,
p.src_col_schema,
p.chunk_row,
p.num_rows,
p.skipped_values,
p.skipped_leaf_values,
p.str_bytes);
}
}
void print_cumulative_page_info(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
rmm::device_uvector<int32_t> const& page_index,
rmm::device_uvector<cumulative_row_info> const& c_info,
rmm::cuda_stream_view stream)
{
pages.device_to_host_sync(stream);
printf("------------\nCumulative sizes by page\n");
std::vector<int> schemas(pages.size());
std::vector<int> h_page_index(pages.size());
CUDF_CUDA_TRY(cudaMemcpy(
h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDefault));
std::vector<cumulative_row_info> h_cinfo(pages.size());
CUDF_CUDA_TRY(cudaMemcpy(
h_cinfo.data(), c_info.data(), sizeof(cumulative_row_info) * pages.size(), cudaMemcpyDefault));
auto schema_iter = cudf::detail::make_counting_transform_iterator(
0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; });
thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin());
auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end());
schemas.resize(last - schemas.begin());
printf("Num schemas: %lu\n", schemas.size());
for (size_t idx = 0; idx < schemas.size(); idx++) {
printf("Schema %d\n", schemas[idx]);
for (size_t pidx = 0; pidx < pages.size(); pidx++) {
auto const& page = pages[h_page_index[pidx]];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) {
continue;
}
printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes);
}
}
}
void print_cumulative_row_info(
host_span<cumulative_row_info const> sizes,
std::string const& label,
std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt)
{
if (splits.has_value()) {
printf("------------\nSplits\n");
for (size_t idx = 0; idx < splits->size(); idx++) {
printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows);
}
}
printf("------------\nCumulative sizes %s\n", label.c_str());
for (size_t idx = 0; idx < sizes.size(); idx++) {
printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key);
if (splits.has_value()) {
// if we have a split at this row count and this is the last instance of this row count
auto start = thrust::make_transform_iterator(
splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; });
auto end = start + splits->size();
auto split = std::find(start, end, sizes[idx].row_count);
auto const split_index = [&]() -> int {
if (split != end &&
((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) {
return static_cast<int>(std::distance(start, split));
}
return idx == 0 ? 0 : -1;
}();
if (split_index >= 0) {
printf(" <-- split {%lu, %lu}",
splits.value()[split_index].skip_rows,
splits.value()[split_index].num_rows);
}
}
printf("\n");
}
}
#endif // PREPROCESS_DEBUG
/**
* @brief Functor which reduces two cumulative_row_info structs of the same key.
*/
struct cumulative_row_sum {
cumulative_row_info operator()
__device__(cumulative_row_info const& a, cumulative_row_info const& b) const
{
return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key};
}
};
/**
* @brief Functor which computes the total data size for a given type of cudf column.
*
* In the case of strings, the return size does not include the chars themselves. That
* information is tracked separately (see PageInfo::str_bytes).
*/
struct row_size_functor {
__device__ size_t validity_size(size_t num_rows, bool nullable)
{
return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0;
}
template <typename T>
__device__ size_t operator()(size_t num_rows, bool nullable)
{
auto const element_size = sizeof(device_storage_type_t<T>);
return (element_size * num_rows) + validity_size(num_rows, nullable);
}
};
template <>
__device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable)
{
auto const offset_size = sizeof(size_type);
// NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset
// for the entire column, whereas this is adding an extra offset per page. So we will get a
// small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better
// to overestimate size somewhat than to underestimate it and potentially generate chunks
// that are too large.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable)
{
return validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable)
{
// only returns the size of offsets and validity. the size of the actual string chars
// is tracked separately.
auto const offset_size = sizeof(size_type);
// see note about offsets in the list_view template.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
/**
* @brief Functor which computes the total output cudf data size for all of
* the data in this page.
*
* Sums across all nesting levels.
*/
struct get_cumulative_row_info {
gpu::PageInfo const* const pages;
__device__ cumulative_row_info operator()(size_type index)
{
auto const& page = pages[index];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return cumulative_row_info{0, 0, page.src_col_schema};
}
// total nested size, not counting string data
auto iter =
cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) {
auto const& pni = page.nesting[i];
return cudf::type_dispatcher(
data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable);
});
size_t const row_count = static_cast<size_t>(page.nesting[0].size);
return {
row_count,
thrust::reduce(thrust::seq, iter, iter + page.num_output_nesting_levels) + page.str_bytes,
page.src_col_schema};
}
};
/**
* @brief Functor which computes the effective size of all input columns by page.
*
* For a given row, we want to find the cost of all pages for all columns involved
* in loading up to that row. The complication here is that not all pages are the
* same size between columns. Example:
*
* page row counts
* Column A: 0 <----> 100 <----> 200
* Column B: 0 <---------------> 200 <--------> 400
|
* if we decide to split at row 100, we don't really know the actual amount of bytes in column B
* at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
* page. Essentially, a conservative over-estimate of the real size.
*/
struct row_total_size {
cumulative_row_info const* c_info;
size_type const* key_offsets;
size_t num_keys;
__device__ cumulative_row_info operator()(cumulative_row_info const& i)
{
// sum sizes for each input column at this row
size_t sum = 0;
for (int idx = 0; idx < num_keys; idx++) {
auto const start = key_offsets[idx];
auto const end = key_offsets[idx + 1];
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&] __device__(size_type i) { return c_info[i].row_count; });
auto const page_index =
thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter;
sum += c_info[page_index].size_bytes;
}
return {i.row_count, sum, i.key};
}
};
/**
* @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read
* limit, determine the set of splits.
*
* @param sizes Vector of cumulative {row_count, byte_size} pairs
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
*/
std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes,
size_t num_rows,
size_t chunk_read_limit)
{
// now we have an array of {row_count, real output bytes}. just walk through it and generate
// splits.
// TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch
// sizes are reasonably large, this shouldn't iterate too many times
std::vector<gpu::chunk_read_info> splits;
{
size_t cur_pos = 0;
size_t cur_cumulative_size = 0;
size_t cur_row_count = 0;
auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) {
return i.size_bytes - cur_cumulative_size;
});
auto end = start + sizes.size();
while (cur_row_count < num_rows) {
int64_t split_pos =
thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start;
// if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back
// one.
if (static_cast<size_t>(split_pos) >= sizes.size() ||
(sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) {
split_pos--;
}
// best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in
// a loop because all of the cumulative sizes for all the pages are sorted into one big list.
// so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in
// the list twice. so we have to iterate until we skip past all of them. The idea is that we
// either do this, or we have to call unique() on the input first.
while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) &&
(split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) {
split_pos++;
}
auto const start_row = cur_row_count;
cur_row_count = sizes[split_pos].row_count;
splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row});
cur_pos = split_pos;
cur_cumulative_size = sizes[split_pos].size_bytes;
}
}
// print_cumulative_row_info(sizes, "adjusted", splits);
return splits;
}
/**
* @brief Given a set of pages that have had their sizes computed by nesting level and
* a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing
* a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes.
*
* @param pages All pages in the file
* @param id Additional intermediate information required to process the pages
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
* @param stream CUDA stream to use
*/
std::vector<gpu::chunk_read_info> compute_splits(
cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
gpu::chunk_intermediate_data const& id,
size_t num_rows,
size_t chunk_read_limit,
rmm::cuda_stream_view stream)
{
auto const& page_keys = id.page_keys;
auto const& page_index = id.page_index;
// generate cumulative row counts and sizes
rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream);
// convert PageInfo to cumulative_row_info
auto page_input = thrust::make_transform_iterator(page_index.begin(),
get_cumulative_row_info{pages.device_ptr()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_input,
c_info.begin(),
thrust::equal_to{},
cumulative_row_sum{});
// print_cumulative_page_info(pages, page_index, c_info, stream);
// sort by row count
rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream};
thrust::sort(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
[] __device__(cumulative_row_info const& a, cumulative_row_info const& b) {
return a.row_count < b.row_count;
});
// std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size());
// CUDF_CUDA_TRY(cudaMemcpy(h_c_info_sorted.data(),
// c_info_sorted.data(),
// sizeof(cumulative_row_info) * c_info_sorted.size(),
// cudaMemcpyDefault));
// print_cumulative_row_info(h_c_info_sorted, "raw");
// generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per
// key
rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream);
auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
key_offsets.begin())
.second;
size_t const num_unique_keys = key_offsets_end - key_offsets.begin();
thrust::exclusive_scan(
rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin());
// adjust the cumulative info such that for each row count, the size includes any pages that span
// that row count. this is so that if we have this case:
// page row counts
// Column A: 0 <----> 100 <----> 200
// Column B: 0 <---------------> 200 <--------> 400
// |
// if we decide to split at row 100, we don't really know the actual amount of bytes in column B
// at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
// page.
//
rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream);
thrust::transform(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
aggregated_info.begin(),
row_total_size{c_info.data(), key_offsets.data(), num_unique_keys});
// bring back to the cpu
std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size());
CUDF_CUDA_TRY(cudaMemcpyAsync(h_aggregated_info.data(),
aggregated_info.data(),
sizeof(cumulative_row_info) * c_info.size(),
cudaMemcpyDefault,
stream.value()));
stream.synchronize();
return find_splits(h_aggregated_info, num_rows, chunk_read_limit);
}
struct get_page_chunk_idx {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; }
};
struct get_page_num_rows {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; }
};
struct get_page_column_index {
gpu::ColumnChunkDesc const* chunks;
__device__ size_type operator()(gpu::PageInfo const& page)
{
return chunks[page.chunk_idx].src_col_index;
}
};
struct input_col_info {
int const schema_idx;
size_type const nesting_depth;
};
/**
* @brief Converts a 1-dimensional index into page, depth and column indices used in
* allocate_columns to compute columns sizes.
*
* The input index will iterate through pages, nesting depth and column indices in that order.
*/
struct reduction_indices {
size_t const page_idx;
size_type const depth_idx;
size_type const col_idx;
__device__ reduction_indices(size_t index_, size_type max_depth_, size_t num_pages_)
: page_idx(index_ % num_pages_),
depth_idx((index_ / num_pages_) % max_depth_),
col_idx(index_ / (max_depth_ * num_pages_))
{
}
};
/**
* @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema.
*/
struct get_page_nesting_size {
input_col_info const* const input_cols;
size_type const max_depth;
size_t const num_pages;
gpu::PageInfo const* const pages;
int const* page_indices;
__device__ size_type operator()(size_t index) const
{
auto const indices = reduction_indices{index, max_depth, num_pages};
auto const& page = pages[page_indices[indices.page_idx]];
if (page.src_col_schema != input_cols[indices.col_idx].schema_idx ||
page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY ||
indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) {
return 0;
}
return page.nesting[indices.depth_idx].batch_size;
}
};
struct get_reduction_key {
size_t const num_pages;
__device__ size_t operator()(size_t index) const { return index / num_pages; }
};
/**
* @brief Writes to the chunk_row field of the PageInfo struct.
*/
struct chunk_row_output_iter {
gpu::PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
};
/**
* @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema.
*/
struct start_offset_output_iterator {
gpu::PageInfo const* pages;
int const* page_indices;
size_t cur_index;
input_col_info const* input_cols;
size_type max_depth;
size_t num_pages;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
constexpr void operator=(start_offset_output_iterator const& other)
{
pages = other.pages;
page_indices = other.page_indices;
cur_index = other.cur_index;
input_cols = other.input_cols;
max_depth = other.max_depth;
num_pages = other.num_pages;
}
constexpr start_offset_output_iterator operator+(size_t i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, input_cols, max_depth, num_pages};
}
constexpr void operator++() { cur_index++; }
__device__ reference operator[](size_t i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(size_t index)
{
auto const indices = reduction_indices{index, max_depth, num_pages};
gpu::PageInfo const& p = pages[page_indices[indices.page_idx]];
if (p.src_col_schema != input_cols[indices.col_idx].schema_idx ||
p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY ||
indices.depth_idx >= input_cols[indices.col_idx].nesting_depth) {
return empty;
}
return p.nesting_decode[indices.depth_idx].page_start_value;
}
};
struct flat_column_num_rows {
gpu::PageInfo const* pages;
gpu::ColumnChunkDesc const* chunks;
__device__ size_type operator()(size_type pindex) const
{
gpu::PageInfo const& page = pages[pindex];
// ignore dictionary pages and pages belonging to any column containing repetition (lists)
if ((page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) ||
(chunks[page.chunk_idx].max_level[gpu::level_type::REPETITION] > 0)) {
return 0;
}
return page.num_rows;
}
};
struct row_counts_nonzero {
__device__ bool operator()(size_type count) const { return count > 0; }
};
struct row_counts_different {
size_type const expected;
__device__ bool operator()(size_type count) const { return (count != 0) && (count != expected); }
};
/**
* @brief Detect malformed parquet input data.
*
* We have seen cases where parquet files can be oddly malformed. This function specifically
* detects one case in particular:
*
* - When you have a file containing N rows
* - For some reason, the sum total of the number of rows over all pages for a given column
* is != N
*
* @param pages All pages to be decoded
* @param chunks Chunk data
* @param page_keys Keys (schema id) associated with each page, sorted by column
* @param page_index Page indices for iteration, sorted by column
* @param expected_row_count Expected row count, if applicable
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void detect_malformed_pages(cudf::detail::hostdevice_vector<gpu::PageInfo>& pages,
cudf::detail::hostdevice_vector<gpu::ColumnChunkDesc> const& chunks,
device_span<int const> page_keys,
device_span<int const> page_index,
std::optional<size_t> expected_row_count,
rmm::cuda_stream_view stream)
{
// sum row counts for all non-dictionary, non-list columns. other columns will be indicated as 0
rmm::device_uvector<size_type> row_counts(pages.size(),
stream); // worst case: num keys == num pages
auto const size_iter = thrust::make_transform_iterator(
page_index.begin(), flat_column_num_rows{pages.device_ptr(), chunks.device_ptr()});
auto const row_counts_begin = row_counts.begin();
auto const row_counts_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
size_iter,
thrust::make_discard_iterator(),
row_counts_begin)
.second;
// make sure all non-zero row counts are the same
rmm::device_uvector<size_type> compacted_row_counts(pages.size(), stream);
auto const compacted_row_counts_begin = compacted_row_counts.begin();
auto const compacted_row_counts_end = thrust::copy_if(rmm::exec_policy(stream),
row_counts_begin,
row_counts_end,
compacted_row_counts_begin,
row_counts_nonzero{});
if (compacted_row_counts_end != compacted_row_counts_begin) {
size_t const found_row_count = static_cast<size_t>(compacted_row_counts.element(0, stream));
// if we somehow don't match the expected row count from the row groups themselves
if (expected_row_count.has_value()) {
CUDF_EXPECTS(expected_row_count.value() == found_row_count,
"Encountered malformed parquet page data (unexpected row count in page data)");
}
// all non-zero row counts must be the same
auto const chk =
thrust::count_if(rmm::exec_policy(stream),
compacted_row_counts_begin,
compacted_row_counts_end,
row_counts_different{static_cast<size_type>(found_row_count)});
CUDF_EXPECTS(chk == 0,
"Encountered malformed parquet page data (row count mismatch in page data)");
}
}
struct page_to_string_size {
gpu::PageInfo* pages;
gpu::ColumnChunkDesc const* chunks;
__device__ size_t operator()(size_type page_idx) const
{
auto const page = pages[page_idx];
auto const chunk = chunks[page.chunk_idx];
if (not is_string_col(chunk) || (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) != 0) {
return 0;
}
return pages[page_idx].str_bytes;
}
};
struct page_offset_output_iter {
gpu::PageInfo* p;
size_type const* index;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ page_offset_output_iter operator+(int i)
{
return page_offset_output_iter{p, index + i};
}
__host__ __device__ void operator++() { index++; }
__device__ reference operator[](int i) { return p[index[i]].str_offset; }
__device__ reference operator*() { return p[*index].str_offset; }
};
} // anonymous namespace
void reader::impl::preprocess_pages(size_t skip_rows,
size_t num_rows,
bool uses_custom_row_bounds,
size_t chunk_read_limit)
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// compute page ordering.
//
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To process pages by key (exclusive_scan_by_key, reduce_by_key, etc), the ordering we actually
// want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
rmm::device_uvector<int> page_keys(pages.size(), _stream);
rmm::device_uvector<int> page_index(pages.size(), _stream);
{
thrust::transform(rmm::exec_policy(_stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
get_page_column_index{chunks.device_ptr()});
thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// detect malformed columns.
// - we have seen some cases in the wild where we have a row group containing N
// rows, but the total number of rows in the pages for column X is != N. while it
// is possible to load this by just capping the number of rows read, we cannot tell
// which rows are invalid so we may be returning bad data. in addition, this mismatch
// confuses the chunked reader
detect_malformed_pages(pages,
chunks,
page_keys,
page_index,
uses_custom_row_bounds ? std::nullopt : std::make_optional(num_rows),
_stream);
// iterate over all input columns and determine if they contain lists so we can further
// preprocess them.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
break;
}
}
if (has_lists) { break; }
}
// generate string dict indices if necessary
{
auto is_dict_chunk = [](gpu::ColumnChunkDesc const& chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_input_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
_chunk_itm_data.str_dict_index =
cudf::detail::make_zeroed_device_uvector_async<string_index_pair>(
total_str_dict_indexes, _stream, rmm::mr::get_current_device_resource());
// Update chunks with pointers to string dict indices
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
input_column_info const& input_col = _input_columns[chunks[c].src_col_index];
CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema,
"Column/page schema index mismatch");
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs;
str_ofs += pages[page_count].num_input_values;
}
// column_data_base will always point to leaf data, even for nested types.
page_count += chunks[c].max_num_pages;
}
if (total_str_dict_indexes > 0) {
chunks.host_to_device_async(_stream);
gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream);
}
}
// intermediate data we will need for further chunked reads
if (has_lists || chunk_read_limit > 0) {
// computes:
// PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into
// account), not just the number of values. PageNestingInfo::size for each level of nesting, for
// each page.
//
// we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen
// if:
// - user has passed custom row bounds
// - we will be doing a chunked read
gpu::ComputePageSizes(pages,
chunks,
0, // 0-max size_t. process all possible rows
std::numeric_limits<size_t>::max(),
true, // compute num_rows
chunk_read_limit > 0, // compute string sizes
_file_itm_data.level_type_size,
_stream);
// computes:
// PageInfo::chunk_row (the absolute start row index) for all pages
// Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already
// been computed during header decoding. the overall amount of work here is very small though.
auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{});
auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{});
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// retrieve pages back
pages.device_to_host_sync(_stream);
// print_pages(pages, _stream);
}
// preserve page ordering data for string decoder
_chunk_itm_data.page_keys = std::move(page_keys);
_chunk_itm_data.page_index = std::move(page_index);
// compute splits if necessary. otherwise return a single split representing
// the whole file.
_chunk_read_info = chunk_read_limit > 0
? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream)
: std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}};
}
void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds)
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// Should not reach here if there is no page data.
CUDF_EXPECTS(pages.size() > 0, "There is no page to parse");
// computes:
// PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into
// account. PageInfo::skipped_values, which tells us where to start decoding in the input to
// respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds
// is set (if the user has specified artificial bounds).
if (uses_custom_row_bounds) {
gpu::ComputePageSizes(pages,
chunks,
skip_rows,
num_rows,
false, // num_rows is already computed
false, // no need to compute string sizes
_file_itm_data.level_type_size,
_stream);
// print_pages(pages, _stream);
}
// iterate over all input columns and allocate any associated output
// buffers if they are not part of a list hierarchy. mark down
// if we have any list columns that need further processing.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
}
// if we haven't already processed this column because it is part of a struct hierarchy
else if (out_buf.size == 0) {
// add 1 for the offset if this is a list column
out_buf.create(
out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows,
_stream,
_mr);
}
}
}
// compute output column sizes by examining the pages of the -input- columns
if (has_lists) {
auto& page_index = _chunk_itm_data.page_index;
std::vector<input_col_info> h_cols_info;
h_cols_info.reserve(_input_columns.size());
std::transform(_input_columns.cbegin(),
_input_columns.cend(),
std::back_inserter(h_cols_info),
[](auto& col) -> input_col_info {
return {col.schema_idx, static_cast<size_type>(col.nesting_depth())};
});
auto const max_depth =
(*std::max_element(h_cols_info.cbegin(),
h_cols_info.cend(),
[](auto& l, auto& r) { return l.nesting_depth < r.nesting_depth; }))
.nesting_depth;
auto const d_cols_info = cudf::detail::make_device_uvector_async(
h_cols_info, _stream, rmm::mr::get_current_device_resource());
auto const num_keys = _input_columns.size() * max_depth * pages.size();
// size iterator. indexes pages by sorted order
rmm::device_uvector<size_type> size_input{num_keys, _stream};
thrust::transform(
rmm::exec_policy(_stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(num_keys),
size_input.begin(),
get_page_nesting_size{
d_cols_info.data(), max_depth, pages.size(), pages.device_ptr(), page_index.begin()});
auto const reduction_keys =
cudf::detail::make_counting_transform_iterator(0, get_reduction_key{pages.size()});
cudf::detail::hostdevice_vector<size_t> sizes{_input_columns.size() * max_depth, _stream};
// find the size of each column
thrust::reduce_by_key(rmm::exec_policy(_stream),
reduction_keys,
reduction_keys + num_keys,
size_input.cbegin(),
thrust::make_discard_iterator(),
sizes.d_begin());
// for nested hierarchies, compute per-page start offset
thrust::exclusive_scan_by_key(
rmm::exec_policy(_stream),
reduction_keys,
reduction_keys + num_keys,
size_input.cbegin(),
start_offset_output_iterator{
pages.device_ptr(), page_index.begin(), 0, d_cols_info.data(), max_depth, pages.size()});
sizes.device_to_host_sync(_stream);
for (size_type idx = 0; idx < static_cast<size_type>(_input_columns.size()); idx++) {
auto const& input_col = _input_columns[idx];
auto* cols = &_output_buffers;
for (size_type l_idx = 0; l_idx < static_cast<size_type>(input_col.nesting_depth());
l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this buffer is part of a list hierarchy, we need to determine it's
// final size and allocate it here.
//
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) {
auto size = sizes[(idx * max_depth) + l_idx];
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, _stream, _mr);
}
}
}
}
}
std::vector<size_t> reader::impl::calculate_page_string_offsets()
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto const& page_keys = _chunk_itm_data.page_keys;
auto const& page_index = _chunk_itm_data.page_index;
std::vector<size_t> col_sizes(_input_columns.size(), 0L);
rmm::device_uvector<size_t> d_col_sizes(col_sizes.size(), _stream);
// use page_index to fetch page string sizes in the proper order
auto val_iter = thrust::make_transform_iterator(
page_index.begin(), page_to_string_size{pages.device_ptr(), chunks.device_ptr()});
// do scan by key to calculate string offsets for each page
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
val_iter,
page_offset_output_iter{pages.device_ptr(), page_index.data()});
// now sum up page sizes
rmm::device_uvector<int> reduce_keys(col_sizes.size(), _stream);
thrust::reduce_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
val_iter,
reduce_keys.begin(),
d_col_sizes.begin());
cudaMemcpyAsync(col_sizes.data(),
d_col_sizes.data(),
sizeof(size_t) * col_sizes.size(),
cudaMemcpyDeviceToHost,
_stream);
_stream.synchronize();
return col_sizes;
}
} // namespace cudf::io::detail::parquet
|
5b06547d03c16010c069697ee92c4dd999ec206d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void ktest_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){
int gid = getGid3d3d();
int yid = blockDim.y*blockIdx.y + threadIdx.y;
A[gid] = (sin(y[yid] * 100000)+1) * yMax * omega;
} | 5b06547d03c16010c069697ee92c4dd999ec206d.cu | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void ktest_Ax(double *x, double *y, double *z, double xMax, double yMax, double zMax, double omegaX, double omegaY, double omegaZ, double omega, double fudge, double *A){
int gid = getGid3d3d();
int yid = blockDim.y*blockIdx.y + threadIdx.y;
A[gid] = (sin(y[yid] * 100000)+1) * yMax * omega;
} |
190c9ea1d7660acf1a01abd04f22d9182dad12d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Perform the first step of Velocity Verlet integration.
*
* update displacements (posDelta) and velocities (velm)
*/
extern "C" __global__ void integrateVelocityVerletPart1(int numAtoms, int numPairs, int paddedNumAtoms, const mixed2* __restrict__ dt, const real4* __restrict__ posq,
const real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const int* __restrict__ atomList, const int2* __restrict__ pairList) {
const mixed2 stepSize = dt[0];
const mixed dtPos = stepSize.y;
const mixed dtVel = 0.5f*(stepSize.x+stepSize.y);
const mixed scale = 0.5f*dtVel/(mixed) 0x100000000;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) {
int atom = atomList[index];
mixed4 velocity = velm[atom];
if (velocity.w != 0.0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[atom];
real4 pos2 = posqCorrection[atom];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[atom];
#endif
velocity.x += scale*force[atom]*velocity.w;
velocity.y += scale*force[atom+paddedNumAtoms]*velocity.w;
velocity.z += scale*force[atom+paddedNumAtoms*2]*velocity.w;
pos.x = velocity.x*dtPos;
pos.y = velocity.y*dtPos;
pos.z = velocity.z*dtPos;
posDelta[atom] = pos;
velm[atom] = velocity;
}
}
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
int atom1 = pairList[index].x;
int atom2 = pairList[index].y;
mixed4 v1 = velm[atom1];
mixed4 v2 = velm[atom2];
mixed m1 = v1.w == 0.0f ? 0.0f : 1.0f / v1.w;
mixed m2 = v2.w == 0.0f ? 0.0f : 1.0f / v2.w;
mixed mass1fract = m1 / (m1 + m2);
mixed mass2fract = m2 / (m1 + m2);
mixed invRedMass = (m1 * m2 != 0.0f) ? (m1 + m2)/(m1 * m2) : 0.0f;
mixed invTotMass = (m1 + m2 != 0.0f) ? 1.0f /(m1 + m2) : 0.0f;
mixed3 comVel;
comVel.x= v1.x*mass1fract + v2.x*mass2fract;
comVel.y= v1.y*mass1fract + v2.y*mass2fract;
comVel.z= v1.z*mass1fract + v2.z*mass2fract;
mixed3 relVel;
relVel.x= v2.x - v1.x;
relVel.y= v2.y - v1.y;
relVel.z= v2.z - v1.z;
//
mixed3 comFrc;
comFrc.x = force[atom1] + force[atom2];
comFrc.y = force[atom1 + paddedNumAtoms] + force[atom2 + paddedNumAtoms];
comFrc.z = force[atom1 + paddedNumAtoms*2] + force[atom2 + paddedNumAtoms*2];
mixed3 relFrc;
relFrc.x = mass1fract*force[atom2] - mass2fract*force[atom1];
relFrc.y = mass1fract*force[atom2+paddedNumAtoms] - mass2fract*force[atom1+paddedNumAtoms];
relFrc.z = mass1fract*force[atom2+paddedNumAtoms*2] - mass2fract*force[atom1+paddedNumAtoms*2];
comVel.x += comFrc.x * scale * invTotMass;
comVel.y += comFrc.y * scale * invTotMass;
comVel.z += comFrc.z * scale * invTotMass;
relVel.x += relFrc.x * scale * invRedMass;
relVel.y += relFrc.y * scale * invRedMass;
relVel.z += relFrc.z * scale * invRedMass;
#ifdef USE_MIXED_PRECISION
real4 posv1 = posq[atom1];
real4 posv2 = posq[atom2];
real4 posc1 = posqCorrection[atom1];
real4 posc2 = posqCorrection[atom2];
mixed4 pos1 = make_mixed4(posv1.x+(mixed)posc1.x, posv1.y+(mixed)posc1.y, posv1.z+(mixed)posc1.z, posv1.w);
mixed4 pos2 = make_mixed4(posv2.x+(mixed)posc2.x, posv2.y+(mixed)posc2.y, posv2.z+(mixed)posc2.z, posv2.w);
#else
real4 pos1 = posq[atom1];
real4 pos2 = posq[atom2];
#endif
if (v1.w != 0.0f) {
v1.x = comVel.x - relVel.x*mass2fract;
v1.y = comVel.y - relVel.y*mass2fract;
v1.z = comVel.z - relVel.z*mass2fract;
pos1.x = v1.x*dtPos;
pos1.y = v1.y*dtPos;
pos1.z = v1.z*dtPos;
posDelta[atom1] = pos1;
velm[atom1] = v1;
}
if (v2.w != 0.0f) {
v2.x = comVel.x + relVel.x*mass1fract;
v2.y = comVel.y + relVel.y*mass1fract;
v2.z = comVel.z + relVel.z*mass1fract;
pos2.x = v2.x*dtPos;
pos2.y = v2.y*dtPos;
pos2.z = v2.z*dtPos;
posDelta[atom2] = pos2;
velm[atom2] = v2;
}
}
}
/**
* Perform the second step of Velocity Verlet integration.
*
* apply displacements to positions (posq) after constraints have been enforced
*/
extern "C" __global__ void integrateVelocityVerletPart2(int numAtoms, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const mixed4* __restrict__ posDelta) {
mixed2 stepSize = dt[0];
int index = blockIdx.x*blockDim.x+threadIdx.x;
if (index == 0)
dt[0].x = stepSize.y;
for (; index < numAtoms; index += blockDim.x*gridDim.x) {
mixed4 velocity = velm[index];
if (velocity.w != 0.0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
}
}
}
/**
* Perform the third step of Velocity Verlet integration.
*
* modify the velocities (velm) after the force update
*/
extern "C" __global__ void integrateVelocityVerletPart3(int numAtoms, int numPairs, int paddedNumAtoms, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const long long* __restrict__ force, const mixed4* __restrict__ posDelta,
const int* __restrict__ atomList, const int2* __restrict__ pairList) {
mixed2 stepSize = dt[0];
#if __CUDA_ARCH__ >= 130
double oneOverDt = 1.0/stepSize.y;
#else
float oneOverDt = 1.0f/stepSize.y;
float correction = (1.0f-oneOverDt*stepSize.y)/stepSize.y;
#endif
const mixed dtVel = 0.5f*(stepSize.x+stepSize.y);
const mixed scale = 0.5f*dtVel/(mixed) 0x100000000;
int index = blockIdx.x*blockDim.x+threadIdx.x;
if (index == 0)
dt[0].x = stepSize.y;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) {
int atom = atomList[index];
mixed4 velocity = velm[atom];
if (velocity.w != 0.0) {
mixed4 deltaXconstrained = posDelta[atom];
velocity.x += scale*force[atom]*velocity.w + (deltaXconstrained.x - velocity.x*stepSize.y)*oneOverDt;
velocity.y += scale*force[atom+paddedNumAtoms]*velocity.w + (deltaXconstrained.y - velocity.y*stepSize.y)*oneOverDt;
velocity.z += scale*force[atom+paddedNumAtoms*2]*velocity.w + (deltaXconstrained.z - velocity.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
velocity.x += (deltaXconstrained.x - velocity.x*stepSize.y)*correction;
velocity.y += (deltaXconstrained.y - velocity.y*stepSize.y)*correction;
velocity.z += (deltaXconstrained.z - velocity.z*stepSize.y)*correction;
#endif
velm[atom] = velocity;
}
}
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
int atom1 = pairList[index].x;
int atom2 = pairList[index].y;
mixed4 v1 = velm[atom1];
mixed4 v2 = velm[atom2];
mixed m1 = v1.w == 0.0f ? 0.0f : 1.0f / v1.w;
mixed m2 = v2.w == 0.0f ? 0.0f : 1.0f / v2.w;
mixed mass1fract = m1 / (m1 + m2);
mixed mass2fract = m2 / (m1 + m2);
mixed invRedMass = (m1 * m2 != 0.0f) ? (m1 + m2)/(m1 * m2) : 0.0f;
mixed invTotMass = (m1 + m2 != 0.0f) ? 1.0f /(m1 + m2) : 0.0f;
mixed3 comVel;
comVel.x= v1.x*mass1fract + v2.x*mass2fract;
comVel.y= v1.y*mass1fract + v2.y*mass2fract;
comVel.z= v1.z*mass1fract + v2.z*mass2fract;
mixed3 relVel;
relVel.x= v2.x - v1.x;
relVel.y= v2.y - v1.y;
relVel.z= v2.z - v1.z;
//
mixed3 comFrc;
comFrc.x = force[atom1] + force[atom2];
comFrc.y = force[atom1 + paddedNumAtoms] + force[atom2 + paddedNumAtoms];
comFrc.z = force[atom1 + paddedNumAtoms*2] + force[atom2 + paddedNumAtoms*2];
mixed3 relFrc;
relFrc.x = mass1fract*force[atom2] - mass2fract*force[atom1];
relFrc.y = mass1fract*force[atom2+paddedNumAtoms] - mass2fract*force[atom1+paddedNumAtoms];
relFrc.z = mass1fract*force[atom2+paddedNumAtoms*2] - mass2fract*force[atom1+paddedNumAtoms*2];
comVel.x += comFrc.x * scale * invTotMass;
comVel.y += comFrc.y * scale * invTotMass;
comVel.z += comFrc.z * scale * invTotMass;
relVel.x += relFrc.x * scale * invRedMass;
relVel.y += relFrc.y * scale * invRedMass;
relVel.z += relFrc.z * scale * invRedMass;
if (v1.w != 0.0f) {
mixed4 deltaXconstrained = posDelta[atom1];
v1.x = comVel.x - relVel.x*mass2fract + (deltaXconstrained.x - v1.x*stepSize.y)*oneOverDt;
v1.y = comVel.y - relVel.y*mass2fract + (deltaXconstrained.y - v1.y*stepSize.y)*oneOverDt;
v1.z = comVel.z - relVel.z*mass2fract + (deltaXconstrained.z - v1.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
v1.x += (deltaXconstrained.x - v1.x*stepSize.y)*correction;
v1.y += (deltaXconstrained.y - v1.y*stepSize.y)*correction;
v1.z += (deltaXconstrained.z - v1.z*stepSize.y)*correction;
#endif
velm[atom1] = v1;
}
if (v2.w != 0.0f) {
mixed4 deltaXconstrained = posDelta[atom2];
v2.x = comVel.x + relVel.x*mass1fract + (deltaXconstrained.x - v2.x*stepSize.y)*oneOverDt;
v2.y = comVel.y + relVel.y*mass1fract + (deltaXconstrained.y - v2.y*stepSize.y)*oneOverDt;
v2.z = comVel.z + relVel.z*mass1fract + (deltaXconstrained.z - v2.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
v2.x += (deltaXconstrained.x - v2.x*stepSize.y)*correction;
v2.y += (deltaXconstrained.y - v2.y*stepSize.y)*correction;
v2.z += (deltaXconstrained.z - v2.z*stepSize.y)*correction;
#endif
velm[atom2] = v2;
}
}
}
/**
* Apply the hard wall constraint
*/
extern "C" __global__ void integrateVelocityVerletHardWall(int numPairs, const float* __restrict__ maxPairDistance, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm,
const int2* __restrict__ pairList, const float* __restrict__ pairTemperature) {
mixed dtPos = dt[0].y;
mixed maxDelta = (mixed) maxPairDistance[0];
// Apply hard wall constraints.
if (maxDelta > 0) {
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
const mixed hardWallScale = sqrt( ((mixed) pairTemperature[index]) * ((mixed) BOLTZ));
int2 atom = make_int2(pairList[index].x, pairList[index].y);
#ifdef USE_MIXED_PRECISION
real4 posv1 = posq[atom.x];
real4 posc1 = posqCorrection[atom.x];
mixed4 pos1 = make_mixed4(posv1.x+(mixed)posc1.x, posv1.y+(mixed)posc1.y, posv1.z+(mixed)posc1.z, posv1.w);
real4 posv2 = posq[atom.y];
real4 posc2 = posqCorrection[atom.y];
mixed4 pos2 = make_mixed4(posv2.x+(mixed)posc2.x, posv2.y+(mixed)posc2.y, posv2.z+(mixed)posc2.z, posv2.w);
#else
real4 pos1 = posq[atom.x];
real4 pos2 = posq[atom.y];
#endif
mixed3 delta = make_mixed3(
mixed (pos1.x - pos2.x),
mixed (pos1.y - pos2.y),
mixed (pos1.z - pos2.z)
);
mixed r = sqrt(delta.x*delta.x + delta.y*delta.y + delta.z*delta.z);
mixed rInv = 1/r;
if (rInv*maxDelta < 1.0) {
// The constraint has been violated, so make the inter-particle distance "bounce"
// off the hard wall.
mixed3 bondDir = make_mixed3(delta.x * rInv, delta.y * rInv, delta.z * rInv);
mixed3 vel1 = make_mixed3(velm[atom.x].x, velm[atom.x].y, velm[atom.x].z);
mixed3 vel2 = make_mixed3(velm[atom.y].x, velm[atom.y].y, velm[atom.y].z);
mixed m1 = velm[atom.x].w != 0.0 ? 1.0/velm[atom.x].w : 0.0;
mixed m2 = velm[atom.y].w != 0.0 ? 1.0/velm[atom.y].w : 0.0;
mixed invTotMass = (m1 + m2 != 0.0) ? 1.0 /(m1 + m2) : 0.0;
mixed deltaR = r-maxDelta;
mixed deltaT = dtPos;
mixed dt = dtPos;
mixed dotvr1 = vel1.x*bondDir.x + vel1.y*bondDir.y + vel1.z*bondDir.z;
mixed3 vb1 = make_mixed3(bondDir.x*dotvr1, bondDir.y*dotvr1, bondDir.z*dotvr1);
mixed3 vp1 = make_mixed3(vel1.x-vb1.x, vel1.y-vb1.y, vel1.z-vb1.z);
if (m2 == 0) {
// The parent particle is massless, so move only the Drude particle.
if (dotvr1 != 0.0)
deltaT = deltaR/fabs(dotvr1);
if (deltaT > dtPos)
deltaT = dtPos;
dotvr1 = -dotvr1*hardWallScale/(fabs(dotvr1)*sqrt(m1));
mixed dr = -deltaR + deltaT*dotvr1;
pos1.x += bondDir.x*dr;
pos1.y += bondDir.y*dr;
pos1.z += bondDir.z*dr;
velm[atom.x] = make_mixed4(vp1.x + bondDir.x*dotvr1, vp1.y + bondDir.y*dotvr1, vp1.z + bondDir.z*dotvr1, velm[atom.x].w);
#ifdef USE_MIXED_PRECISION
posq[atom.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posqCorrection[atom.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
#else
posq[atom.x] = pos1;
#endif
}
else {
// Move both particles.
mixed dotvr2 = vel2.x*bondDir.x + vel2.y*bondDir.y + vel2.z*bondDir.z;
mixed3 vb2 = make_mixed3(bondDir.x*dotvr2, bondDir.y*dotvr2, bondDir.z*dotvr2);
mixed3 vp2 = make_mixed3(vel2.x-vb2.x, vel2.y-vb2.y, vel2.z-vb2.z);
mixed vbCMass = (m1*dotvr1 + m2*dotvr2)*invTotMass;
dotvr1 -= vbCMass;
dotvr2 -= vbCMass;
if (dotvr1 != dotvr2)
deltaT = deltaR/fabs(dotvr1-dotvr2);
if (deltaT > dt)
deltaT = dt;
mixed vBond = hardWallScale/sqrt(m1);
dotvr1 = -dotvr1*vBond*m2*invTotMass/fabs(dotvr1);
dotvr2 = -dotvr2*vBond*m1*invTotMass/fabs(dotvr2);
mixed dr1 = -deltaR*m2*invTotMass + deltaT*dotvr1;
mixed dr2 = deltaR*m1*invTotMass + deltaT*dotvr2;
dotvr1 += vbCMass;
dotvr2 += vbCMass;
pos1.x += bondDir.x*dr1;
pos1.y += bondDir.y*dr1;
pos1.z += bondDir.z*dr1;
pos2.x += bondDir.x*dr2;
pos2.y += bondDir.y*dr2;
pos2.z += bondDir.z*dr2;
velm[atom.x] = make_mixed4(vp1.x + bondDir.x*dotvr1, vp1.y + bondDir.y*dotvr1, vp1.z + bondDir.z*dotvr1, velm[atom.x].w);
velm[atom.y] = make_mixed4(vp2.x + bondDir.x*dotvr2, vp2.y + bondDir.y*dotvr2, vp2.z + bondDir.z*dotvr2, velm[atom.y].w);
#ifdef USE_MIXED_PRECISION
posq[atom.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posq[atom.y] = make_real4((real) pos2.x, (real) pos2.y, (real) pos2.z, (real) pos2.w);
posqCorrection[atom.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
posqCorrection[atom.y] = make_real4(pos2.x-(real) pos2.x, pos2.y-(real) pos2.y, pos2.z-(real) pos2.z, 0);
#else
posq[atom.x] = pos1;
posq[atom.y] = pos2;
#endif
}
}
}
} /* end of hard wall constraint part */
}
| 190c9ea1d7660acf1a01abd04f22d9182dad12d3.cu | /**
* Perform the first step of Velocity Verlet integration.
*
* update displacements (posDelta) and velocities (velm)
*/
extern "C" __global__ void integrateVelocityVerletPart1(int numAtoms, int numPairs, int paddedNumAtoms, const mixed2* __restrict__ dt, const real4* __restrict__ posq,
const real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const long long* __restrict__ force, mixed4* __restrict__ posDelta,
const int* __restrict__ atomList, const int2* __restrict__ pairList) {
const mixed2 stepSize = dt[0];
const mixed dtPos = stepSize.y;
const mixed dtVel = 0.5f*(stepSize.x+stepSize.y);
const mixed scale = 0.5f*dtVel/(mixed) 0x100000000;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) {
int atom = atomList[index];
mixed4 velocity = velm[atom];
if (velocity.w != 0.0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[atom];
real4 pos2 = posqCorrection[atom];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[atom];
#endif
velocity.x += scale*force[atom]*velocity.w;
velocity.y += scale*force[atom+paddedNumAtoms]*velocity.w;
velocity.z += scale*force[atom+paddedNumAtoms*2]*velocity.w;
pos.x = velocity.x*dtPos;
pos.y = velocity.y*dtPos;
pos.z = velocity.z*dtPos;
posDelta[atom] = pos;
velm[atom] = velocity;
}
}
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
int atom1 = pairList[index].x;
int atom2 = pairList[index].y;
mixed4 v1 = velm[atom1];
mixed4 v2 = velm[atom2];
mixed m1 = v1.w == 0.0f ? 0.0f : 1.0f / v1.w;
mixed m2 = v2.w == 0.0f ? 0.0f : 1.0f / v2.w;
mixed mass1fract = m1 / (m1 + m2);
mixed mass2fract = m2 / (m1 + m2);
mixed invRedMass = (m1 * m2 != 0.0f) ? (m1 + m2)/(m1 * m2) : 0.0f;
mixed invTotMass = (m1 + m2 != 0.0f) ? 1.0f /(m1 + m2) : 0.0f;
mixed3 comVel;
comVel.x= v1.x*mass1fract + v2.x*mass2fract;
comVel.y= v1.y*mass1fract + v2.y*mass2fract;
comVel.z= v1.z*mass1fract + v2.z*mass2fract;
mixed3 relVel;
relVel.x= v2.x - v1.x;
relVel.y= v2.y - v1.y;
relVel.z= v2.z - v1.z;
//
mixed3 comFrc;
comFrc.x = force[atom1] + force[atom2];
comFrc.y = force[atom1 + paddedNumAtoms] + force[atom2 + paddedNumAtoms];
comFrc.z = force[atom1 + paddedNumAtoms*2] + force[atom2 + paddedNumAtoms*2];
mixed3 relFrc;
relFrc.x = mass1fract*force[atom2] - mass2fract*force[atom1];
relFrc.y = mass1fract*force[atom2+paddedNumAtoms] - mass2fract*force[atom1+paddedNumAtoms];
relFrc.z = mass1fract*force[atom2+paddedNumAtoms*2] - mass2fract*force[atom1+paddedNumAtoms*2];
comVel.x += comFrc.x * scale * invTotMass;
comVel.y += comFrc.y * scale * invTotMass;
comVel.z += comFrc.z * scale * invTotMass;
relVel.x += relFrc.x * scale * invRedMass;
relVel.y += relFrc.y * scale * invRedMass;
relVel.z += relFrc.z * scale * invRedMass;
#ifdef USE_MIXED_PRECISION
real4 posv1 = posq[atom1];
real4 posv2 = posq[atom2];
real4 posc1 = posqCorrection[atom1];
real4 posc2 = posqCorrection[atom2];
mixed4 pos1 = make_mixed4(posv1.x+(mixed)posc1.x, posv1.y+(mixed)posc1.y, posv1.z+(mixed)posc1.z, posv1.w);
mixed4 pos2 = make_mixed4(posv2.x+(mixed)posc2.x, posv2.y+(mixed)posc2.y, posv2.z+(mixed)posc2.z, posv2.w);
#else
real4 pos1 = posq[atom1];
real4 pos2 = posq[atom2];
#endif
if (v1.w != 0.0f) {
v1.x = comVel.x - relVel.x*mass2fract;
v1.y = comVel.y - relVel.y*mass2fract;
v1.z = comVel.z - relVel.z*mass2fract;
pos1.x = v1.x*dtPos;
pos1.y = v1.y*dtPos;
pos1.z = v1.z*dtPos;
posDelta[atom1] = pos1;
velm[atom1] = v1;
}
if (v2.w != 0.0f) {
v2.x = comVel.x + relVel.x*mass1fract;
v2.y = comVel.y + relVel.y*mass1fract;
v2.z = comVel.z + relVel.z*mass1fract;
pos2.x = v2.x*dtPos;
pos2.y = v2.y*dtPos;
pos2.z = v2.z*dtPos;
posDelta[atom2] = pos2;
velm[atom2] = v2;
}
}
}
/**
* Perform the second step of Velocity Verlet integration.
*
* apply displacements to positions (posq) after constraints have been enforced
*/
extern "C" __global__ void integrateVelocityVerletPart2(int numAtoms, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const mixed4* __restrict__ posDelta) {
mixed2 stepSize = dt[0];
int index = blockIdx.x*blockDim.x+threadIdx.x;
if (index == 0)
dt[0].x = stepSize.y;
for (; index < numAtoms; index += blockDim.x*gridDim.x) {
mixed4 velocity = velm[index];
if (velocity.w != 0.0) {
#ifdef USE_MIXED_PRECISION
real4 pos1 = posq[index];
real4 pos2 = posqCorrection[index];
mixed4 pos = make_mixed4(pos1.x+(mixed)pos2.x, pos1.y+(mixed)pos2.y, pos1.z+(mixed)pos2.z, pos1.w);
#else
real4 pos = posq[index];
#endif
mixed4 delta = posDelta[index];
pos.x += delta.x;
pos.y += delta.y;
pos.z += delta.z;
#ifdef USE_MIXED_PRECISION
posq[index] = make_real4((real) pos.x, (real) pos.y, (real) pos.z, (real) pos.w);
posqCorrection[index] = make_real4(pos.x-(real) pos.x, pos.y-(real) pos.y, pos.z-(real) pos.z, 0);
#else
posq[index] = pos;
#endif
}
}
}
/**
* Perform the third step of Velocity Verlet integration.
*
* modify the velocities (velm) after the force update
*/
extern "C" __global__ void integrateVelocityVerletPart3(int numAtoms, int numPairs, int paddedNumAtoms, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm, const long long* __restrict__ force, const mixed4* __restrict__ posDelta,
const int* __restrict__ atomList, const int2* __restrict__ pairList) {
mixed2 stepSize = dt[0];
#if __CUDA_ARCH__ >= 130
double oneOverDt = 1.0/stepSize.y;
#else
float oneOverDt = 1.0f/stepSize.y;
float correction = (1.0f-oneOverDt*stepSize.y)/stepSize.y;
#endif
const mixed dtVel = 0.5f*(stepSize.x+stepSize.y);
const mixed scale = 0.5f*dtVel/(mixed) 0x100000000;
int index = blockIdx.x*blockDim.x+threadIdx.x;
if (index == 0)
dt[0].x = stepSize.y;
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) {
int atom = atomList[index];
mixed4 velocity = velm[atom];
if (velocity.w != 0.0) {
mixed4 deltaXconstrained = posDelta[atom];
velocity.x += scale*force[atom]*velocity.w + (deltaXconstrained.x - velocity.x*stepSize.y)*oneOverDt;
velocity.y += scale*force[atom+paddedNumAtoms]*velocity.w + (deltaXconstrained.y - velocity.y*stepSize.y)*oneOverDt;
velocity.z += scale*force[atom+paddedNumAtoms*2]*velocity.w + (deltaXconstrained.z - velocity.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
velocity.x += (deltaXconstrained.x - velocity.x*stepSize.y)*correction;
velocity.y += (deltaXconstrained.y - velocity.y*stepSize.y)*correction;
velocity.z += (deltaXconstrained.z - velocity.z*stepSize.y)*correction;
#endif
velm[atom] = velocity;
}
}
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
int atom1 = pairList[index].x;
int atom2 = pairList[index].y;
mixed4 v1 = velm[atom1];
mixed4 v2 = velm[atom2];
mixed m1 = v1.w == 0.0f ? 0.0f : 1.0f / v1.w;
mixed m2 = v2.w == 0.0f ? 0.0f : 1.0f / v2.w;
mixed mass1fract = m1 / (m1 + m2);
mixed mass2fract = m2 / (m1 + m2);
mixed invRedMass = (m1 * m2 != 0.0f) ? (m1 + m2)/(m1 * m2) : 0.0f;
mixed invTotMass = (m1 + m2 != 0.0f) ? 1.0f /(m1 + m2) : 0.0f;
mixed3 comVel;
comVel.x= v1.x*mass1fract + v2.x*mass2fract;
comVel.y= v1.y*mass1fract + v2.y*mass2fract;
comVel.z= v1.z*mass1fract + v2.z*mass2fract;
mixed3 relVel;
relVel.x= v2.x - v1.x;
relVel.y= v2.y - v1.y;
relVel.z= v2.z - v1.z;
//
mixed3 comFrc;
comFrc.x = force[atom1] + force[atom2];
comFrc.y = force[atom1 + paddedNumAtoms] + force[atom2 + paddedNumAtoms];
comFrc.z = force[atom1 + paddedNumAtoms*2] + force[atom2 + paddedNumAtoms*2];
mixed3 relFrc;
relFrc.x = mass1fract*force[atom2] - mass2fract*force[atom1];
relFrc.y = mass1fract*force[atom2+paddedNumAtoms] - mass2fract*force[atom1+paddedNumAtoms];
relFrc.z = mass1fract*force[atom2+paddedNumAtoms*2] - mass2fract*force[atom1+paddedNumAtoms*2];
comVel.x += comFrc.x * scale * invTotMass;
comVel.y += comFrc.y * scale * invTotMass;
comVel.z += comFrc.z * scale * invTotMass;
relVel.x += relFrc.x * scale * invRedMass;
relVel.y += relFrc.y * scale * invRedMass;
relVel.z += relFrc.z * scale * invRedMass;
if (v1.w != 0.0f) {
mixed4 deltaXconstrained = posDelta[atom1];
v1.x = comVel.x - relVel.x*mass2fract + (deltaXconstrained.x - v1.x*stepSize.y)*oneOverDt;
v1.y = comVel.y - relVel.y*mass2fract + (deltaXconstrained.y - v1.y*stepSize.y)*oneOverDt;
v1.z = comVel.z - relVel.z*mass2fract + (deltaXconstrained.z - v1.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
v1.x += (deltaXconstrained.x - v1.x*stepSize.y)*correction;
v1.y += (deltaXconstrained.y - v1.y*stepSize.y)*correction;
v1.z += (deltaXconstrained.z - v1.z*stepSize.y)*correction;
#endif
velm[atom1] = v1;
}
if (v2.w != 0.0f) {
mixed4 deltaXconstrained = posDelta[atom2];
v2.x = comVel.x + relVel.x*mass1fract + (deltaXconstrained.x - v2.x*stepSize.y)*oneOverDt;
v2.y = comVel.y + relVel.y*mass1fract + (deltaXconstrained.y - v2.y*stepSize.y)*oneOverDt;
v2.z = comVel.z + relVel.z*mass1fract + (deltaXconstrained.z - v2.z*stepSize.y)*oneOverDt;
#if __CUDA_ARCH__ < 130
v2.x += (deltaXconstrained.x - v2.x*stepSize.y)*correction;
v2.y += (deltaXconstrained.y - v2.y*stepSize.y)*correction;
v2.z += (deltaXconstrained.z - v2.z*stepSize.y)*correction;
#endif
velm[atom2] = v2;
}
}
}
/**
* Apply the hard wall constraint
*/
extern "C" __global__ void integrateVelocityVerletHardWall(int numPairs, const float* __restrict__ maxPairDistance, mixed2* __restrict__ dt, real4* __restrict__ posq,
real4* __restrict__ posqCorrection, mixed4* __restrict__ velm,
const int2* __restrict__ pairList, const float* __restrict__ pairTemperature) {
mixed dtPos = dt[0].y;
mixed maxDelta = (mixed) maxPairDistance[0];
// Apply hard wall constraints.
if (maxDelta > 0) {
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < numPairs; index += blockDim.x*gridDim.x) {
const mixed hardWallScale = sqrt( ((mixed) pairTemperature[index]) * ((mixed) BOLTZ));
int2 atom = make_int2(pairList[index].x, pairList[index].y);
#ifdef USE_MIXED_PRECISION
real4 posv1 = posq[atom.x];
real4 posc1 = posqCorrection[atom.x];
mixed4 pos1 = make_mixed4(posv1.x+(mixed)posc1.x, posv1.y+(mixed)posc1.y, posv1.z+(mixed)posc1.z, posv1.w);
real4 posv2 = posq[atom.y];
real4 posc2 = posqCorrection[atom.y];
mixed4 pos2 = make_mixed4(posv2.x+(mixed)posc2.x, posv2.y+(mixed)posc2.y, posv2.z+(mixed)posc2.z, posv2.w);
#else
real4 pos1 = posq[atom.x];
real4 pos2 = posq[atom.y];
#endif
mixed3 delta = make_mixed3(
mixed (pos1.x - pos2.x),
mixed (pos1.y - pos2.y),
mixed (pos1.z - pos2.z)
);
mixed r = sqrt(delta.x*delta.x + delta.y*delta.y + delta.z*delta.z);
mixed rInv = 1/r;
if (rInv*maxDelta < 1.0) {
// The constraint has been violated, so make the inter-particle distance "bounce"
// off the hard wall.
mixed3 bondDir = make_mixed3(delta.x * rInv, delta.y * rInv, delta.z * rInv);
mixed3 vel1 = make_mixed3(velm[atom.x].x, velm[atom.x].y, velm[atom.x].z);
mixed3 vel2 = make_mixed3(velm[atom.y].x, velm[atom.y].y, velm[atom.y].z);
mixed m1 = velm[atom.x].w != 0.0 ? 1.0/velm[atom.x].w : 0.0;
mixed m2 = velm[atom.y].w != 0.0 ? 1.0/velm[atom.y].w : 0.0;
mixed invTotMass = (m1 + m2 != 0.0) ? 1.0 /(m1 + m2) : 0.0;
mixed deltaR = r-maxDelta;
mixed deltaT = dtPos;
mixed dt = dtPos;
mixed dotvr1 = vel1.x*bondDir.x + vel1.y*bondDir.y + vel1.z*bondDir.z;
mixed3 vb1 = make_mixed3(bondDir.x*dotvr1, bondDir.y*dotvr1, bondDir.z*dotvr1);
mixed3 vp1 = make_mixed3(vel1.x-vb1.x, vel1.y-vb1.y, vel1.z-vb1.z);
if (m2 == 0) {
// The parent particle is massless, so move only the Drude particle.
if (dotvr1 != 0.0)
deltaT = deltaR/fabs(dotvr1);
if (deltaT > dtPos)
deltaT = dtPos;
dotvr1 = -dotvr1*hardWallScale/(fabs(dotvr1)*sqrt(m1));
mixed dr = -deltaR + deltaT*dotvr1;
pos1.x += bondDir.x*dr;
pos1.y += bondDir.y*dr;
pos1.z += bondDir.z*dr;
velm[atom.x] = make_mixed4(vp1.x + bondDir.x*dotvr1, vp1.y + bondDir.y*dotvr1, vp1.z + bondDir.z*dotvr1, velm[atom.x].w);
#ifdef USE_MIXED_PRECISION
posq[atom.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posqCorrection[atom.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
#else
posq[atom.x] = pos1;
#endif
}
else {
// Move both particles.
mixed dotvr2 = vel2.x*bondDir.x + vel2.y*bondDir.y + vel2.z*bondDir.z;
mixed3 vb2 = make_mixed3(bondDir.x*dotvr2, bondDir.y*dotvr2, bondDir.z*dotvr2);
mixed3 vp2 = make_mixed3(vel2.x-vb2.x, vel2.y-vb2.y, vel2.z-vb2.z);
mixed vbCMass = (m1*dotvr1 + m2*dotvr2)*invTotMass;
dotvr1 -= vbCMass;
dotvr2 -= vbCMass;
if (dotvr1 != dotvr2)
deltaT = deltaR/fabs(dotvr1-dotvr2);
if (deltaT > dt)
deltaT = dt;
mixed vBond = hardWallScale/sqrt(m1);
dotvr1 = -dotvr1*vBond*m2*invTotMass/fabs(dotvr1);
dotvr2 = -dotvr2*vBond*m1*invTotMass/fabs(dotvr2);
mixed dr1 = -deltaR*m2*invTotMass + deltaT*dotvr1;
mixed dr2 = deltaR*m1*invTotMass + deltaT*dotvr2;
dotvr1 += vbCMass;
dotvr2 += vbCMass;
pos1.x += bondDir.x*dr1;
pos1.y += bondDir.y*dr1;
pos1.z += bondDir.z*dr1;
pos2.x += bondDir.x*dr2;
pos2.y += bondDir.y*dr2;
pos2.z += bondDir.z*dr2;
velm[atom.x] = make_mixed4(vp1.x + bondDir.x*dotvr1, vp1.y + bondDir.y*dotvr1, vp1.z + bondDir.z*dotvr1, velm[atom.x].w);
velm[atom.y] = make_mixed4(vp2.x + bondDir.x*dotvr2, vp2.y + bondDir.y*dotvr2, vp2.z + bondDir.z*dotvr2, velm[atom.y].w);
#ifdef USE_MIXED_PRECISION
posq[atom.x] = make_real4((real) pos1.x, (real) pos1.y, (real) pos1.z, (real) pos1.w);
posq[atom.y] = make_real4((real) pos2.x, (real) pos2.y, (real) pos2.z, (real) pos2.w);
posqCorrection[atom.x] = make_real4(pos1.x-(real) pos1.x, pos1.y-(real) pos1.y, pos1.z-(real) pos1.z, 0);
posqCorrection[atom.y] = make_real4(pos2.x-(real) pos2.x, pos2.y-(real) pos2.y, pos2.z-(real) pos2.z, 0);
#else
posq[atom.x] = pos1;
posq[atom.y] = pos2;
#endif
}
}
}
} /* end of hard wall constraint part */
}
|
2ea18d6d307e2edafdf9552f92f1310790630d22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
////////////////////////////////////////////////////////////////////////////////
/*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////////////////////////////
//The function "GenerateHologram" contains two different algorithms for
//hologram generation. The last parameter in the function call selects which
//one to use:
//0: Complex addition of "Lenses and Prisms", no optimization (3D)
//1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
//2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
//-(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
// (0) is automatically selected if the number of spots is < 3.
////////////////////////////////////////////////////////////////////////////////
//Fresnel propagation based algorithm (1) described in:
//Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
//"Computer generation of optimal holograms for optical trap arrays"
//Opt. Express 15, 1913-1922 (2007)
//
//The original algorithm has been modified to allow variable spot amplitudes
////////////////////////////////////////////////////////////////////////////////
//Naming convention for variables:
//-The prefix indicates where data is located
//--In host functions: h = host memory
// d = device memory
// c = constant memory
//--In global functions: g = global memory
// s = shared memory
// c = constant memory
// no prefix = registers
//-The suffix indicates the data type, no suffix usually indicates an iteger
////////////////////////////////////////////////////////////////////////////////
//Possible improvements:
//-Improve convergence of the GS algorithms for 2 spots. *done
//-Compensate spot intensities for distance from center of field. *done
//-Put all arguments for device functions and trap positions in constant memory. *done
// (Requires all functions to be moved into the same file or the use of some
// workaround found on nVidia forum)
//-Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
//-Use "zero-copy" to transfer pSLM to host.
//-Rename functions and variables for consistency and readability
//-Allow variable spot phases for Lenses and Prisms
////////////////////////////////////////////////////////////////////////////////
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
hipEvent_t start, stop;
////////////////////////////////////////////////////////////////////////////////
//Includes
////////////////////////////////////////////////////////////////////////////////
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 1024 //decrease this if your GPU keeps running out of memory
#define BLOCK_SIZE 256 //should be a power of 2
#define SLM_SIZE 512
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwise modulu operations if the SLM size is a power of 2
#endif
////////////////////////////////////////////////////////////////////////////////
// forward declarations
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//Set correction parameters
////////////////////////////////////////////////////////////////////////////////
__global__ void uc2f(float *f, unsigned char *uc, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
f[idx] = uc[idx]*2.0f*M_PI/256.0f - M_PI;
}
} | 2ea18d6d307e2edafdf9552f92f1310790630d22.cu | #include "includes.h"
////////////////////////////////////////////////////////////////////////////////
/*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
*/
///////////////////////////////////////////////////////////////////////////////////
//The function "GenerateHologram" contains two different algorithms for
//hologram generation. The last parameter in the function call selects which
//one to use:
//0: Complex addition of "Lenses and Prisms", no optimization (3D)
//1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
//2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
//-(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
// (0) is automatically selected if the number of spots is < 3.
////////////////////////////////////////////////////////////////////////////////
//Fresnel propagation based algorithm (1) described in:
//Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
//"Computer generation of optimal holograms for optical trap arrays"
//Opt. Express 15, 1913-1922 (2007)
//
//The original algorithm has been modified to allow variable spot amplitudes
////////////////////////////////////////////////////////////////////////////////
//Naming convention for variables:
//-The prefix indicates where data is located
//--In host functions: h = host memory
// d = device memory
// c = constant memory
//--In global functions: g = global memory
// s = shared memory
// c = constant memory
// no prefix = registers
//-The suffix indicates the data type, no suffix usually indicates an iteger
////////////////////////////////////////////////////////////////////////////////
//Possible improvements:
//-Improve convergence of the GS algorithms for 2 spots. *done
//-Compensate spot intensities for distance from center of field. *done
//-Put all arguments for device functions and trap positions in constant memory. *done
// (Requires all functions to be moved into the same file or the use of some
// workaround found on nVidia forum)
//-Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
//-Use "zero-copy" to transfer pSLM to host.
//-Rename functions and variables for consistency and readability
//-Allow variable spot phases for Lenses and Prisms
////////////////////////////////////////////////////////////////////////////////
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
cudaEvent_t start, stop;
////////////////////////////////////////////////////////////////////////////////
//Includes
////////////////////////////////////////////////////////////////////////////////
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 1024 //decrease this if your GPU keeps running out of memory
#define BLOCK_SIZE 256 //should be a power of 2
#define SLM_SIZE 512
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwise modulu operations if the SLM size is a power of 2
#endif
////////////////////////////////////////////////////////////////////////////////
// forward declarations
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//Set correction parameters
////////////////////////////////////////////////////////////////////////////////
__global__ void uc2f(float *f, unsigned char *uc, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
f[idx] = uc[idx]*2.0f*M_PI/256.0f - M_PI;
}
} |
27a51ae136b4e2746880b250c723f93a5ec71993.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define THREADS_MAX 512
template<typename index>
__device__ inline bool divide_loop(index &start, index &end) {
index num = (end - start + gridDim.x - 1) / gridDim.x;
start = start + num * blockIdx.x;
end = (start + num < end ? start + num : end);
num = (end - start + blockDim.x - 1) / blockDim.x;
start = start + num * threadIdx.x;
end = (start + num < end ? start + num : end);
return (end > start);
}
__device__ static float atomicMax(float* address, float val) {
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__float_as_int(fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static double atomicMax(double* address, double val) {
unsigned long long* address_as_i = (unsigned long long*) address;
unsigned long long old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicExch(double* address, double val) {
return __longlong_as_double(atomicExch((unsigned long long*)address,
__double_as_longlong(val)));
}
#include "rasterize.h"
template<typename scalar, typename index>
__global__ void rasterize_kernel(index b, index nv, index nf, index h, index w,
bool repeat_v, bool repeat_f, bool perspective,
const scalar *v0, const index *f0, index *i, scalar *c,
scalar *zB, scalar eps = 1e-6) {
scalar Ainv[9], coeff[3], uv[2], det = 1;
index start = 0, end = b * nf;
int64_t bbox[4];
divide_loop(start, end);
for(index t_ = start; t_ < end; ++t_) {
index batch = t_ / nf, t = t_ % nf;
const scalar *v = (repeat_v || v0 == NULL ? v0 : v0 + batch*nv*3);
const index *f = (repeat_f || f0 == NULL ? f0 : f0 + batch*nf*3);
if(f == NULL || v == NULL
|| f[3*t]>=nv || f[3*t+1]>=nv || f[3*t+2]>=nv
|| f[3*t] < 0 || f[3*t+1] < 0 || f[3*t+2] < 0)
continue;
scalar v_[] = {
v[3*f[3*t]], v[3*f[3*t]+1], v[3*f[3*t]+2],
v[3*f[3*t+1]],v[3*f[3*t+1]+1],v[3*f[3*t+1]+2],
v[3*f[3*t+2]],v[3*f[3*t+2]+1],v[3*f[3*t+2]+2]};
if(barycentric<scalar>(v_, (int64_t)h, (int64_t)w, bbox, Ainv,
&det, perspective, eps))
for(index y = bbox[2]; y <= bbox[3]; ++y)
for(index x = bbox[0]; x <= bbox[1]; ++x) {
index ind = x + w * (y + h * batch);
uv[0] = (scalar)x;
uv[1] = (scalar)y;
coeff[0] = Ainv[0] + Ainv[3] * uv[0] + Ainv[6] * uv[1];
coeff[1] = Ainv[1] + Ainv[4] * uv[0] + Ainv[7] * uv[1];
coeff[2] = Ainv[2] + Ainv[5] * uv[0] + Ainv[8] * uv[1];
if(assign_buffer<scalar>(v_, coeff, uv,
zB== NULL ? NULL : zB+ ind,
c == NULL ? NULL : c + ind*3,
perspective, Ainv, det, eps)
&& i != NULL) {
i[ind*3] = f[3*t] + (repeat_v ? 0 : nv * batch);
i[ind*3+1]= f[3*t+1]+ (repeat_v ? 0 : nv * batch);
i[ind*3+2]= f[3*t+2]+ (repeat_v ? 0 : nv * batch);
}
}
}
__syncthreads();
}
template<typename scalar, typename index>
bool rasterize_gpu(index b, index nv, index nf, index h, index w,
bool repeat_v, bool repeat_f, bool perspective,
const scalar *v, const index *f, index *i, scalar *c,
scalar *zB, scalar eps = 1e-6) {
index threads = (THREADS_MAX < nf ? THREADS_MAX : nf);
hipLaunchKernelGGL(( rasterize_kernel<scalar,index>), dim3(b), dim3(threads), 0, 0,
b, nv, nf, h, w, repeat_v, repeat_f, perspective,
v, f, i, c, zB, eps);
hipError_t e = hipGetLastError();
if(e != hipSuccess) {
printf("%s\n", hipGetErrorString(e));
return false;
} else
return true;
}
template<typename scalar, typename index>
__global__ void rasterize_kernel_backward(index b, index n, index h, index w,
bool repeat_v, bool perspective, const scalar *v,
const index *i, scalar *dcoeff, scalar eps = 1e-6) {
scalar uv[2];
index start = 0, end = b * h * w;
divide_loop(start, end);
for(index t = start; t < end; ++t) {
if(i == NULL || v == NULL
|| i[3*t] == i[3*t+1] || i[3*t] == i[3*t+2] || i[3*t+1] == i[3*t+2]
|| i[3*t] < 0 || i[3*t+1] < 0 || i[3*t+2] < 0
|| i[3*t]>=n*b|| i[3*t+1]>=n*b|| i[3*t+2]>=n*b)
continue;
scalar v_[] = {
v[3*i[3*t]], v[3*i[3*t]+1], v[3*i[3*t]+2],
v[3*i[3*t+1]],v[3*i[3*t+1]+1],v[3*i[3*t+1]+2],
v[3*i[3*t+2]],v[3*i[3*t+2]+1],v[3*i[3*t+2]+2]};
uv[0] = (scalar)(t % w);
uv[1] = (scalar)((t%(h*w))/w);
barycentric_grad<scalar>(v_, uv, (scalar)h, (scalar)w,
dcoeff + t * 27, perspective, eps);
}
__syncthreads();
}
template<typename scalar, typename index>
bool rasterize_gpu_backward(index b, index n, index h, index w,
bool repeat_v, bool perspective, const scalar *v,
const index *i, scalar *dc, scalar eps = 1e-6) {
index threads = (THREADS_MAX < h*w ? THREADS_MAX : h*w);
hipLaunchKernelGGL(( rasterize_kernel_backward<scalar,index>), dim3(b), dim3(threads), 0, 0,
b, n, h, w, repeat_v, perspective,
v, i, dc, eps);
hipError_t e = hipGetLastError();
if(e != hipSuccess) {
printf("%s\n", hipGetErrorString(e));
return false;
} else
return true;
}
template __host__ bool barycentric<float>(float*,int64_t,int64_t,
int64_t*,float*,float*,bool,float);
template __host__ bool barycentric_grad<float>(const float*,float*,
float,float,float*,bool,float);
template __host__ bool assign_buffer<float>(const float*,
float*,const float*,float*,float*,bool,const float*,float,float);
template bool rasterize_gpu<float,int64_t>(int64_t,int64_t,int64_t,int64_t,int64_t,bool,bool,bool,
const float*,const int64_t*,int64_t*,float*,float*,float);
template bool rasterize_gpu_backward<float,int64_t>(int64_t,int64_t,int64_t,int64_t,
bool,bool,const float*,const int64_t*,float*,float);
template __host__ bool barycentric<double>(double*,int64_t,int64_t,
int64_t*,double*,double*,bool,double);
template __host__ bool barycentric_grad<double>(const double*,double*,
double,double,double*,bool,double);
template __host__ bool assign_buffer<double>(const double*,
double*,const double*,double*,double*,bool,const double*,double,double);
template bool rasterize_gpu<double,int64_t>(int64_t,int64_t,int64_t,int64_t,int64_t,bool,bool,bool,
const double*,const int64_t*,int64_t*,double*,double*,double);
template bool rasterize_gpu_backward<double,int64_t>(int64_t,int64_t,int64_t,int64_t,
bool,bool,const double*,const int64_t*,double*,double);
| 27a51ae136b4e2746880b250c723f93a5ec71993.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define THREADS_MAX 512
template<typename index>
__device__ inline bool divide_loop(index &start, index &end) {
index num = (end - start + gridDim.x - 1) / gridDim.x;
start = start + num * blockIdx.x;
end = (start + num < end ? start + num : end);
num = (end - start + blockDim.x - 1) / blockDim.x;
start = start + num * threadIdx.x;
end = (start + num < end ? start + num : end);
return (end > start);
}
__device__ static float atomicMax(float* address, float val) {
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__float_as_int(fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static double atomicMax(double* address, double val) {
unsigned long long* address_as_i = (unsigned long long*) address;
unsigned long long old = *address_as_i, assumed;
do {
assumed = old;
old = atomicCAS(address_as_i, assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicExch(double* address, double val) {
return __longlong_as_double(atomicExch((unsigned long long*)address,
__double_as_longlong(val)));
}
#include "rasterize.h"
template<typename scalar, typename index>
__global__ void rasterize_kernel(index b, index nv, index nf, index h, index w,
bool repeat_v, bool repeat_f, bool perspective,
const scalar *v0, const index *f0, index *i, scalar *c,
scalar *zB, scalar eps = 1e-6) {
scalar Ainv[9], coeff[3], uv[2], det = 1;
index start = 0, end = b * nf;
int64_t bbox[4];
divide_loop(start, end);
for(index t_ = start; t_ < end; ++t_) {
index batch = t_ / nf, t = t_ % nf;
const scalar *v = (repeat_v || v0 == NULL ? v0 : v0 + batch*nv*3);
const index *f = (repeat_f || f0 == NULL ? f0 : f0 + batch*nf*3);
if(f == NULL || v == NULL
|| f[3*t]>=nv || f[3*t+1]>=nv || f[3*t+2]>=nv
|| f[3*t] < 0 || f[3*t+1] < 0 || f[3*t+2] < 0)
continue;
scalar v_[] = {
v[3*f[3*t]], v[3*f[3*t]+1], v[3*f[3*t]+2],
v[3*f[3*t+1]],v[3*f[3*t+1]+1],v[3*f[3*t+1]+2],
v[3*f[3*t+2]],v[3*f[3*t+2]+1],v[3*f[3*t+2]+2]};
if(barycentric<scalar>(v_, (int64_t)h, (int64_t)w, bbox, Ainv,
&det, perspective, eps))
for(index y = bbox[2]; y <= bbox[3]; ++y)
for(index x = bbox[0]; x <= bbox[1]; ++x) {
index ind = x + w * (y + h * batch);
uv[0] = (scalar)x;
uv[1] = (scalar)y;
coeff[0] = Ainv[0] + Ainv[3] * uv[0] + Ainv[6] * uv[1];
coeff[1] = Ainv[1] + Ainv[4] * uv[0] + Ainv[7] * uv[1];
coeff[2] = Ainv[2] + Ainv[5] * uv[0] + Ainv[8] * uv[1];
if(assign_buffer<scalar>(v_, coeff, uv,
zB== NULL ? NULL : zB+ ind,
c == NULL ? NULL : c + ind*3,
perspective, Ainv, det, eps)
&& i != NULL) {
i[ind*3] = f[3*t] + (repeat_v ? 0 : nv * batch);
i[ind*3+1]= f[3*t+1]+ (repeat_v ? 0 : nv * batch);
i[ind*3+2]= f[3*t+2]+ (repeat_v ? 0 : nv * batch);
}
}
}
__syncthreads();
}
template<typename scalar, typename index>
bool rasterize_gpu(index b, index nv, index nf, index h, index w,
bool repeat_v, bool repeat_f, bool perspective,
const scalar *v, const index *f, index *i, scalar *c,
scalar *zB, scalar eps = 1e-6) {
index threads = (THREADS_MAX < nf ? THREADS_MAX : nf);
rasterize_kernel<scalar,index><<<b, threads>>>(
b, nv, nf, h, w, repeat_v, repeat_f, perspective,
v, f, i, c, zB, eps);
cudaError_t e = cudaGetLastError();
if(e != cudaSuccess) {
printf("%s\n", cudaGetErrorString(e));
return false;
} else
return true;
}
template<typename scalar, typename index>
__global__ void rasterize_kernel_backward(index b, index n, index h, index w,
bool repeat_v, bool perspective, const scalar *v,
const index *i, scalar *dcoeff, scalar eps = 1e-6) {
scalar uv[2];
index start = 0, end = b * h * w;
divide_loop(start, end);
for(index t = start; t < end; ++t) {
if(i == NULL || v == NULL
|| i[3*t] == i[3*t+1] || i[3*t] == i[3*t+2] || i[3*t+1] == i[3*t+2]
|| i[3*t] < 0 || i[3*t+1] < 0 || i[3*t+2] < 0
|| i[3*t]>=n*b|| i[3*t+1]>=n*b|| i[3*t+2]>=n*b)
continue;
scalar v_[] = {
v[3*i[3*t]], v[3*i[3*t]+1], v[3*i[3*t]+2],
v[3*i[3*t+1]],v[3*i[3*t+1]+1],v[3*i[3*t+1]+2],
v[3*i[3*t+2]],v[3*i[3*t+2]+1],v[3*i[3*t+2]+2]};
uv[0] = (scalar)(t % w);
uv[1] = (scalar)((t%(h*w))/w);
barycentric_grad<scalar>(v_, uv, (scalar)h, (scalar)w,
dcoeff + t * 27, perspective, eps);
}
__syncthreads();
}
template<typename scalar, typename index>
bool rasterize_gpu_backward(index b, index n, index h, index w,
bool repeat_v, bool perspective, const scalar *v,
const index *i, scalar *dc, scalar eps = 1e-6) {
index threads = (THREADS_MAX < h*w ? THREADS_MAX : h*w);
rasterize_kernel_backward<scalar,index><<<b, threads>>>(
b, n, h, w, repeat_v, perspective,
v, i, dc, eps);
cudaError_t e = cudaGetLastError();
if(e != cudaSuccess) {
printf("%s\n", cudaGetErrorString(e));
return false;
} else
return true;
}
template __host__ bool barycentric<float>(float*,int64_t,int64_t,
int64_t*,float*,float*,bool,float);
template __host__ bool barycentric_grad<float>(const float*,float*,
float,float,float*,bool,float);
template __host__ bool assign_buffer<float>(const float*,
float*,const float*,float*,float*,bool,const float*,float,float);
template bool rasterize_gpu<float,int64_t>(int64_t,int64_t,int64_t,int64_t,int64_t,bool,bool,bool,
const float*,const int64_t*,int64_t*,float*,float*,float);
template bool rasterize_gpu_backward<float,int64_t>(int64_t,int64_t,int64_t,int64_t,
bool,bool,const float*,const int64_t*,float*,float);
template __host__ bool barycentric<double>(double*,int64_t,int64_t,
int64_t*,double*,double*,bool,double);
template __host__ bool barycentric_grad<double>(const double*,double*,
double,double,double*,bool,double);
template __host__ bool assign_buffer<double>(const double*,
double*,const double*,double*,double*,bool,const double*,double,double);
template bool rasterize_gpu<double,int64_t>(int64_t,int64_t,int64_t,int64_t,int64_t,bool,bool,bool,
const double*,const int64_t*,int64_t*,double*,double*,double);
template bool rasterize_gpu_backward<double,int64_t>(int64_t,int64_t,int64_t,int64_t,
bool,bool,const double*,const int64_t*,double*,double);
|
59a80468e2de37b91fc1cb9d40b389daacde41e9.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 59a80468e2de37b91fc1cb9d40b389daacde41e9.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
430003c727be3558db7377a6f3edd04ced5d4125.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "simple.h"
#include <string>
#include <stdio.h>
__device__ float GetElementCuda(Matrix2* mat, int row, int col)
{
if (row >= *mat->height || col >= *mat->width) return -999999999;
return mat->elements[row * (*mat->width) + col];
}
__global__ void ConvolveKernel(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
for (int j = 0; j < *filters->width; ++j)
{
//printf("%d,%d,%d\n",k,i,j);
sum += GetElementCuda(&input->mats[k], height + i, width + j) * GetElementCuda(&filters->mats[k], i, j);
}
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
__global__ void MaxPoolKernel(Matrix3* input, Matrix3* output, int stride, int blockSize)
{
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
//printf("height: %d\n", *output->width);
if (height >= *output->height || width >= *output->width) return;
float value = -999999999;
for (int i = 0; i < stride; ++i)
{
for (int j = 0; j < stride; ++j)
{
float cur_value = GetElementCuda(&input->mats[0], stride*height+i, stride*width+j);
if (cur_value > value)
{
value = cur_value;
}
}
}
//printf("Setting %d %d of %d to %f\n", xIdx, yIdx, matIdx, value);
output->mats[0].elements[(*output->width)*height + width] = value;
//printf("MaxPooling: inside GPU\n");
//printf("%f\n",value);
}
__global__ void ConvolveKernel_SharedMemory(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
__shared__ float filter_shared[3][3][3];
if(blockRow<3&&blockCol<9){
filter_shared[blockRow][blockCol/3][blockCol%3]=GetElementCuda(&filters->mats[blockRow],blockCol/3,blockCol%3);
}
__syncthreads();
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
for (int j = 0; j < *filters->width; ++j)
{
//printf("%d,%d,%d\n",k,i,j);
sum += GetElementCuda(&input->mats[k], height + i, width + j) * filter_shared[k][i][j];
}
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
__global__ void ConvolveKernel_SharedMemory_unroll(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
__shared__ float filter_shared[3][3][3];
if(blockRow<3&&blockCol<9){
filter_shared[blockRow][blockCol/3][blockCol%3]=GetElementCuda(&filters->mats[blockRow],blockCol/3,blockCol%3);
}
__syncthreads();
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
sum += GetElementCuda(&input->mats[k], height + i, width ) * filter_shared[k][i][0];
sum += GetElementCuda(&input->mats[k], height + i, width + 1) * filter_shared[k][i][1];
sum += GetElementCuda(&input->mats[k], height + i, width + 2) * filter_shared[k][i][2];
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
/*
__global__ void test(Matrix3* input){
int idx=threadIdx.x;
if(idx>=1) return;
printf("index = %d,%f\n",idx,input->mats[2].elements[0]);
}*/
| 430003c727be3558db7377a6f3edd04ced5d4125.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "simple.h"
#include <string>
#include <stdio.h>
__device__ float GetElementCuda(Matrix2* mat, int row, int col)
{
if (row >= *mat->height || col >= *mat->width) return -999999999;
return mat->elements[row * (*mat->width) + col];
}
__global__ void ConvolveKernel(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
for (int j = 0; j < *filters->width; ++j)
{
//printf("%d,%d,%d\n",k,i,j);
sum += GetElementCuda(&input->mats[k], height + i, width + j) * GetElementCuda(&filters->mats[k], i, j);
}
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
__global__ void MaxPoolKernel(Matrix3* input, Matrix3* output, int stride, int blockSize)
{
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
//printf("height: %d\n", *output->width);
if (height >= *output->height || width >= *output->width) return;
float value = -999999999;
for (int i = 0; i < stride; ++i)
{
for (int j = 0; j < stride; ++j)
{
float cur_value = GetElementCuda(&input->mats[0], stride*height+i, stride*width+j);
if (cur_value > value)
{
value = cur_value;
}
}
}
//printf("Setting %d %d of %d to %f\n", xIdx, yIdx, matIdx, value);
output->mats[0].elements[(*output->width)*height + width] = value;
//printf("MaxPooling: inside GPU\n");
//printf("%f\n",value);
}
__global__ void ConvolveKernel_SharedMemory(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
__shared__ float filter_shared[3][3][3];
if(blockRow<3&&blockCol<9){
filter_shared[blockRow][blockCol/3][blockCol%3]=GetElementCuda(&filters->mats[blockRow],blockCol/3,blockCol%3);
}
__syncthreads();
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
for (int j = 0; j < *filters->width; ++j)
{
//printf("%d,%d,%d\n",k,i,j);
sum += GetElementCuda(&input->mats[k], height + i, width + j) * filter_shared[k][i][j];
}
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
__global__ void ConvolveKernel_SharedMemory_unroll(Matrix3* input, Matrix3* output, Matrix3* filters, int filterCount, int stride,int blockSize){
int gridIdy = blockIdx.y;
int gridIdx = blockIdx.x;
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
int height = gridIdy * blockSize + blockRow;
int width = gridIdx * blockSize + blockCol;
__shared__ float filter_shared[3][3][3];
if(blockRow<3&&blockCol<9){
filter_shared[blockRow][blockCol/3][blockCol%3]=GetElementCuda(&filters->mats[blockRow],blockCol/3,blockCol%3);
}
__syncthreads();
//int idx = gridIdx * blockSize * blockSize + gridIdy * blockDim.x * blockSize * blockSize + blockCol + blockRow * blockSize;
if (height >= *input->height - *filters->height + 1 || width >= *input->width - *filters->width + 1) return;
float sum = 0;
for (int k = 0; k < *filters->depth; ++k)
{
for (int i = 0; i < *filters->height; ++i)
{
sum += GetElementCuda(&input->mats[k], height + i, width ) * filter_shared[k][i][0];
sum += GetElementCuda(&input->mats[k], height + i, width + 1) * filter_shared[k][i][1];
sum += GetElementCuda(&input->mats[k], height + i, width + 2) * filter_shared[k][i][2];
}
}
output->mats[0].elements[(*output->width) * height + width] = sum;
//printf("%f \n",sum);
}
/*
__global__ void test(Matrix3* input){
int idx=threadIdx.x;
if(idx>=1) return;
printf("index = %d,%f\n",idx,input->mats[2].elements[0]);
}*/
|
bcea93112ca7bc181c5cd558c5a4439e10cacc4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <loss.hpp>
#include <utils.cuh>
#include <vector>
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: regularizer_(regularizer),
gpu_resource_(gpu_resource),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {
label_tensors_.push_back(label_tensor);
input_tensors_.push_back(input_tensor);
loss_tensors_.push_back(loss_tensor);
if (regularizer_ == nullptr) {
CK_THROW_(Error_t::WrongInput,
"There is no regularizer specified. If you intend not to use any regularizer, pass a NoRegularizer object.");
}
}
template <typename T>
void Loss<T>::compute(bool is_train) {
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const auto &input_dim = input_tensor.get_dimensions();
int batch_size = input_dim[0];
compute(is_train, batch_size);
}
// Note: current_batchsize here is the batchsize on this device
template <typename T>
void Loss<T>::compute(bool is_train, long long current_batchsize) {
if (regularizer_ == nullptr) {
CK_THROW_(Error_t::WrongInput,
"Null regularizer is not allowed in calling Loss::compute().");
}
CudaDeviceContext context(get_device_id());
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const Tensor2<float> &label_tensor = get_label_tensors(is_train)[0];
Tensor2<float> &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int batch_size = input_dim[0];
int feature_dim = input_dim[1];
T *input = input_tensor.get_ptr();
const float *label = label_tensor.get_ptr();
float *loss = loss_tensor.get_ptr();
float rterm = 0.0f;
regularizer_->compute_rterm();
rterm = regularizer_->get_rterm();
if (current_batchsize > batch_size && current_batchsize < 0) {
CK_THROW_(Error_t::WrongInput, "current_batchsize > batch_size && current_batchsize < 0");
}
do_compute(input, label, loss, current_batchsize, feature_dim, scaler_, rterm, is_train,
get_gpu().get_stream());
if (is_train) {
// once current_batchsize < batch_size in train we set the rest dgrad to 0
if (current_batchsize < batch_size) {
CK_CUDA_THROW_(hipMemsetAsync(
input + current_batchsize * feature_dim, 0,
(batch_size - current_batchsize) * feature_dim * sizeof(T),
get_gpu().get_stream()));
}
}
if (is_train) {
regularizer_->initialize_wgrad();
}
#ifndef NDEBUG
CK_CUDA_THROW_(hipDeviceSynchronize());
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = i * feature_dim;
id2 = i * feature_dim + 1;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
hipLaunchKernelGGL(( CrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream, input, label, loss, batch_size,
Loss<T>::get_total_gpu_count(),
feature_dim, scaler, rterm, is_train);
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, hipStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
hipLaunchKernelGGL(( BinaryCrossEntropy_Kernel), dim3(1), dim3(block_size), smem_size, stream,
input, label, loss, scaler, batch_size, Loss<T>::get_total_gpu_count(), rterm, is_train);
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
hipStream_t stream) {
int labels_per_sample = feature_dim;
CK_CUDA_THROW_(hipMemsetAsync(loss, 0, Loss<T>::get_loss_tensors()[0].get_size_in_bytes(), stream));
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_.get_ptr();
hipLaunchKernelGGL(( MultiCrossEntropy_Kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, stream,
input, label, target_weight, loss, batch_size, Loss<T>::get_total_gpu_count(),
labels_per_sample, scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::vector<float> &target_weight,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
if (label_tensor.get_dimensions().size() != 2 || input_tensor.get_dimensions().size() != 2 ||
label_tensor.get_dimensions()[0] != input_tensor.get_dimensions()[0] ||
label_tensor.get_dimensions()[1] != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
std::shared_ptr<GeneralBuffer2<CudaAllocator>> internal_buff =
GeneralBuffer2<CudaAllocator>::create();
std::vector<size_t> twdim = {1, label_tensor.get_dimensions()[1]};
internal_buff->reserve(twdim, &target_weight_);
CudaDeviceContext context(Loss<T>::get_device_id());
internal_buff->allocate();
CK_CUDA_THROW_(hipMemcpy(target_weight_.get_ptr(), target_weight.data(),
target_weight_.get_size_in_bytes(), hipMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
| bcea93112ca7bc181c5cd558c5a4439e10cacc4d.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <loss.hpp>
#include <utils.cuh>
#include <vector>
namespace HugeCTR {
namespace {
template <typename T>
__forceinline__ __device__ void atomic_global_sum_div(T val, T *acc, float div) {
val = warpReduceSum(val);
if (threadIdx.x % warpSize == 0) {
atomicAdd(acc, (T)(val / div));
}
return;
}
} // namespace
template <typename T>
Loss<T>::Loss(const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: regularizer_(regularizer),
gpu_resource_(gpu_resource),
total_gpu_count_(total_gpu_count),
scaler_(scaler) {
label_tensors_.push_back(label_tensor);
input_tensors_.push_back(input_tensor);
loss_tensors_.push_back(loss_tensor);
if (regularizer_ == nullptr) {
CK_THROW_(Error_t::WrongInput,
"There is no regularizer specified. If you intend not to use any regularizer, pass a NoRegularizer object.");
}
}
template <typename T>
void Loss<T>::compute(bool is_train) {
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const auto &input_dim = input_tensor.get_dimensions();
int batch_size = input_dim[0];
compute(is_train, batch_size);
}
// Note: current_batchsize here is the batchsize on this device
template <typename T>
void Loss<T>::compute(bool is_train, long long current_batchsize) {
if (regularizer_ == nullptr) {
CK_THROW_(Error_t::WrongInput,
"Null regularizer is not allowed in calling Loss::compute().");
}
CudaDeviceContext context(get_device_id());
Tensor2<T> &input_tensor = get_input_tensors(is_train)[0];
const Tensor2<float> &label_tensor = get_label_tensors(is_train)[0];
Tensor2<float> &loss_tensor = loss_tensors_[0];
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int batch_size = input_dim[0];
int feature_dim = input_dim[1];
T *input = input_tensor.get_ptr();
const float *label = label_tensor.get_ptr();
float *loss = loss_tensor.get_ptr();
float rterm = 0.0f;
regularizer_->compute_rterm();
rterm = regularizer_->get_rterm();
if (current_batchsize > batch_size && current_batchsize < 0) {
CK_THROW_(Error_t::WrongInput, "current_batchsize > batch_size && current_batchsize < 0");
}
do_compute(input, label, loss, current_batchsize, feature_dim, scaler_, rterm, is_train,
get_gpu().get_stream());
if (is_train) {
// once current_batchsize < batch_size in train we set the rest dgrad to 0
if (current_batchsize < batch_size) {
CK_CUDA_THROW_(cudaMemsetAsync(
input + current_batchsize * feature_dim, 0,
(batch_size - current_batchsize) * feature_dim * sizeof(T),
get_gpu().get_stream()));
}
}
if (is_train) {
regularizer_->initialize_wgrad();
}
#ifndef NDEBUG
CK_CUDA_THROW_(cudaDeviceSynchronize());
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
CrossEntropyLoss<T>::CrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
const auto &label_dim = label_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 2)
CK_THROW_(Error_t::WrongInput, "The feature dimension of CE loss input should be 2");
if (input_dim[0] != label_dim[0])
CK_THROW_(Error_t::WrongInput, "The batch sizes of input tensor and label tensor are not same");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void CrossEntropy_Kernel(T *input, const float *label, float *cel_loss, int batch_size,
int total_gpu_count, int feature_dim, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
float z0_exp, z1_exp, a0, a1;
int id1, id2;
for (int i = tid; i < batch_size; i += blockDim.x) {
id1 = i * feature_dim;
id2 = i * feature_dim + 1;
z0_exp = exp((double)input[id1]);
z1_exp = exp((double)input[id2]);
a0 = z0_exp / (z0_exp + z1_exp);
a1 = z1_exp / (z0_exp + z1_exp);
bool no_click = label[i] < 0.5f;
if (is_train) {
// calculate the grad
input[id1] = (a0 - (no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
input[id2] = (a1 - (!no_click ? 1.0f : 0.0f)) / batch_size * scaler / total_gpu_count;
}
loss_s[tid] += -1 * log(no_click ? a0 : a1);
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
cel_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void CrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
CrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(input, label, loss, batch_size,
Loss<T>::get_total_gpu_count(),
feature_dim, scaler, rterm, is_train);
}
template <typename T>
BinaryCrossEntropyLoss<T>::BinaryCrossEntropyLoss(
const Tensor2<float> &label_tensor, const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor, const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::shared_ptr<GPUResource> &gpu_resource, int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
const auto &input_dim = input_tensor.get_dimensions();
int feature_dim = input_dim[1];
if (feature_dim != 1)
CK_THROW_(Error_t::WrongInput, "The feature dimension of BCE loss input should be 1");
}
// Suppose we use one thread to calculate one sample
template <typename T>
__global__ void BinaryCrossEntropy_Kernel(T *input, const float *label, float *bce_loss,
float scaler, int batch_size, int total_gpu_count,
float rterm, bool is_train) {
int tid = threadIdx.x;
extern __shared__ float loss_s[];
loss_s[tid] = 0.0f;
for (int i = tid; i < batch_size; i += blockDim.x) {
const float x = input[i];
const float y = label[i];
if (x >= 0) {
float exp_neg_x = exp(-x);
loss_s[tid] += x * (1 - y) + log(1 + exp_neg_x);
input[i] = is_train ? ((1 - y) - exp_neg_x / (1 + exp_neg_x)) * scaler / (float)batch_size /
total_gpu_count
: 1 / (1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss_s[tid] += -x * y + log(1 + exp_x);
input[i] = is_train
? (-y + exp_x / (1 + exp_x)) * scaler / (float)batch_size / total_gpu_count
: exp_x / (exp_x + 1);
}
}
__syncthreads();
float loss_tmp = 0.0f;
if (tid == 0) {
for (int i = 0; i < blockDim.x; ++i) loss_tmp += loss_s[i];
bce_loss[0] = loss_tmp / batch_size + rterm;
}
}
template <typename T>
void BinaryCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss,
int batch_size, int feature_dim, float scaler,
float rterm, bool is_train, cudaStream_t stream) {
int block_size = min(batch_size, 1024);
size_t smem_size = block_size * sizeof(float);
BinaryCrossEntropy_Kernel<<<1, block_size, smem_size, stream>>>(
input, label, loss, scaler, batch_size, Loss<T>::get_total_gpu_count(), rterm, is_train);
}
__forceinline__ __device__ __host__ float cross_entropy_loss(float x, float y) {
float loss = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
loss = x * (1 - y) + log(1 + exp_neg_x);
} else {
float exp_x = exp(x);
loss = -x * y + log(1 + exp_x);
}
return -loss;
}
__forceinline__ __device__ __host__ float cross_entropy_loss_backward(float x, float y) {
float grad = 0.f;
if (x >= 0) {
float exp_neg_x = exp(-x);
grad = ((1 - y) - exp_neg_x / (1 + exp_neg_x));
} else {
float exp_x = exp(x);
grad = (-y + exp_x / (1 + exp_x));
}
return grad;
}
template <typename T>
__global__ void MultiCrossEntropy_Kernel(T *input, const float *label, const float *target_weight,
float *bce_loss, int batchsize, int total_gpu_count,
int labels_per_sample, float scaler, float rterm,
bool is_train) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int num_threads = blockDim.x * gridDim.x;
float loss_s = 0.f;
const int size = batchsize * labels_per_sample;
for (int i = tid; i < size; i += num_threads) {
int target_weight_idx = i % labels_per_sample;
const float x = input[i];
const float y = label[i];
float loss =
(label[i] < -0.5) ? 0.f : (target_weight[target_weight_idx] * cross_entropy_loss(x, y));
loss_s += loss;
if (is_train) {
input[i] = (label[i] < -0.5)
? 0.f
: (target_weight[target_weight_idx] * cross_entropy_loss_backward(x, y) /
size * scaler / total_gpu_count);
}
}
atomic_global_sum_div(-loss_s, bce_loss, size);
if (tid == 0) {
atomicAdd(bce_loss, rterm);
}
return;
}
template <typename T>
void MultiCrossEntropyLoss<T>::do_compute(T *input, const float *label, float *loss, int batch_size,
int feature_dim, float scaler, float rterm, bool is_train,
cudaStream_t stream) {
int labels_per_sample = feature_dim;
CK_CUDA_THROW_(cudaMemsetAsync(loss, 0, Loss<T>::get_loss_tensors()[0].get_size_in_bytes(), stream));
const int BLOCK_SIZE = 256;
const int GRID_SIZE = min(40, (batch_size * labels_per_sample - 1) / BLOCK_SIZE);
float *target_weight = target_weight_.get_ptr();
MultiCrossEntropy_Kernel<<<GRID_SIZE, BLOCK_SIZE, 0, stream>>>(
input, label, target_weight, loss, batch_size, Loss<T>::get_total_gpu_count(),
labels_per_sample, scaler, rterm, is_train);
}
template <typename T>
MultiCrossEntropyLoss<T>::MultiCrossEntropyLoss(const Tensor2<float> &label_tensor,
const Tensor2<T> &input_tensor,
const Tensor2<float> &loss_tensor,
const std::shared_ptr<Regularizer<T>> ®ularizer,
const std::vector<float> &target_weight,
const std::shared_ptr<GPUResource> &gpu_resource,
int total_gpu_count, float scaler)
: Loss<T>(label_tensor, input_tensor, loss_tensor, regularizer, gpu_resource, total_gpu_count,
scaler) {
if (label_tensor.get_dimensions().size() != 2 || input_tensor.get_dimensions().size() != 2 ||
label_tensor.get_dimensions()[0] != input_tensor.get_dimensions()[0] ||
label_tensor.get_dimensions()[1] != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "Format of input tensor and label tensor don't match");
}
// verify the length of target_weight
if (target_weight.size() != input_tensor.get_dimensions()[1]) {
CK_THROW_(Error_t::WrongInput, "target_weight.size() != input_tensor.get_dims()[0]");
}
// load target_weight to internal Tensor
std::shared_ptr<GeneralBuffer2<CudaAllocator>> internal_buff =
GeneralBuffer2<CudaAllocator>::create();
std::vector<size_t> twdim = {1, label_tensor.get_dimensions()[1]};
internal_buff->reserve(twdim, &target_weight_);
CudaDeviceContext context(Loss<T>::get_device_id());
internal_buff->allocate();
CK_CUDA_THROW_(cudaMemcpy(target_weight_.get_ptr(), target_weight.data(),
target_weight_.get_size_in_bytes(), cudaMemcpyHostToDevice));
return;
}
template class Loss<__half>;
template class Loss<float>;
template class MultiCrossEntropyLoss<__half>;
template class MultiCrossEntropyLoss<float>;
template class CrossEntropyLoss<__half>;
template class CrossEntropyLoss<float>;
template class BinaryCrossEntropyLoss<__half>;
template class BinaryCrossEntropyLoss<float>;
} // namespace HugeCTR
|
76703c91ff0e3097da2a87afdc966f2ee653cc79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2015 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPUBinned.cuh"
#include "NeighborListGPU_hip.cuh"
#include <stdio.h>
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param N Number of particles
\param box Box dimensions
\param maxshiftsq The maximum drsq a particle can have before an update is needed
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's current position is
compared to its last position. If the particle has moved a distance more than sqrt(\a maxshiftsq), then *d_result
is set to \a ncheck.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
// each thread will compare vs it's old position to see if the list needs updating
// if that is true, write a 1 to nlist_needs_updating
// it is possible that writes will collide, but at least one will succeed and that is all that matters
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda*last_pos;
dx = box.minImage(dx);
if (dot(dx, dx) >= maxshiftsq)
atomicMax(d_result, checkn);
}
}
hipError_t gpu_nlist_needs_update_check_new(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
unsigned int block_size = 128;
int n_blocks = N/block_size+1;
hipLaunchKernelGGL(( gpu_nlist_needs_update_check_new_kernel), dim3(n_blocks), dim3(block_size), 0, 0, d_result,
d_last_pos,
d_pos,
N,
box,
maxshiftsq,
lambda,
checkn);
return hipSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that are excluded. To allow
for an arbitrary large number of exclusions, these are processed in batch sizes of FILTER_BATCH_SIZE. The kernel
must be called multiple times in order to fully remove all exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in for that particle
(or the thread returns if there are no exlusions past that point). The thread then loops over the neighbor list,
comparing each entry to the list of exclusions. If the entry is not excluded, it is written back out. \a d_n_neigh
is updated to reflect the current number of particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[nli(idx, cur_neigh_idx)];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[nli(idx, new_n_neigh)] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
hipError_t gpu_nlist_filter(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D& nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_nlist_filter_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// determine parameters for kernel launch
int n_blocks = N/run_block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH())/double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
hipLaunchKernelGGL(( gpu_nlist_filter_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_n_neigh,
d_nlist,
nli,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return hipSuccess;
}
//! Compile time determined block size for the NSQ neighbor list calculation
const int NLIST_BLOCK_SIZE = 128;
//! Generate the neighbor list on the GPU in O(N^2) time
/*! \param d_nlist Neighbor list to write out
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions will be written here
\param d_conditions Overflow condition flag
\param nli Indexer for indexing into d_nlist
\param d_pos Current particle positions
\param N number of particles
\param box Box dimensions for handling periodic boundary conditions
\param r_maxsq Precalculated value for r_max*r_max
each thread is to compute the neighborlist for a single particle i
each block will load a bunch of particles into shared mem and then each thread will compare it's particle
to each particle in shmem to see if they are a neighbor. Since all threads in the block access the same
shmem element at the same time, the value is broadcast and there are no bank conflicts
*/
__global__
void gpu_compute_nlist_nsq_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim box,
const Scalar r_maxsq)
{
// shared data to store all of the particles we compare against
__shared__ Scalar sdata[NLIST_BLOCK_SIZE*4];
// load in the particle
int pidx = blockIdx.x * NLIST_BLOCK_SIZE + threadIdx.x;
// store the max number of neighbors needed for this thread
unsigned int n_neigh_needed = 0;
Scalar4 pos = make_scalar4(0, 0, 0, 0);
if (pidx < N)
pos = d_pos[pidx];
Scalar px = pos.x;
Scalar py = pos.y;
Scalar pz = pos.z;
// track the number of neighbors added so far
int n_neigh = 0;
// each block is going to loop over all N particles (this assumes memory is padded to a multiple of blockDim.x)
// in blocks of blockDim.x
// include ghosts as neighbors
for (int start = 0; start < N + n_ghost; start += NLIST_BLOCK_SIZE)
{
// load data
Scalar4 neigh_pos = make_scalar4(0, 0, 0, 0);
if (start + threadIdx.x < N + n_ghost)
neigh_pos = d_pos[start + threadIdx.x];
// make sure everybody is caught up before we stomp on the memory
__syncthreads();
sdata[threadIdx.x] = neigh_pos.x;
sdata[threadIdx.x + NLIST_BLOCK_SIZE] = neigh_pos.y;
sdata[threadIdx.x + 2*NLIST_BLOCK_SIZE] = neigh_pos.z;
sdata[threadIdx.x + 3*NLIST_BLOCK_SIZE] = neigh_pos.w; //< unused, but try to get compiler to fully coalesce reads
// ensure all data is loaded
__syncthreads();
// now each thread loops over every particle in shmem, but doesn't loop past the end of the particle list (since
// the block might extend that far)
int end_offset= NLIST_BLOCK_SIZE;
end_offset = min(end_offset, N + n_ghost - start);
if (pidx < N)
{
for (int cur_offset = 0; cur_offset < end_offset; cur_offset++)
{
// calculate dr
Scalar3 dx = make_scalar3(px - sdata[cur_offset],
py - sdata[cur_offset + NLIST_BLOCK_SIZE],
pz - sdata[cur_offset + 2*NLIST_BLOCK_SIZE]);
dx = box.minImage(dx);
// we don't add if we are comparing to ourselves, and we don't add if we are above the cut
if ((dot(dx,dx) <= r_maxsq) && ((start + cur_offset) != pidx))
{
unsigned int j = start + cur_offset;
if (n_neigh < nli.getH())
d_nlist[nli(pidx, n_neigh)] = j;
else
n_neigh_needed = n_neigh+1;
n_neigh++;
}
}
}
}
// now that we are done: update the first row that lists the number of neighbors
if (pidx < N)
{
d_n_neigh[pidx] = n_neigh;
d_last_updated_pos[pidx] = d_pos[pidx];
if (n_neigh_needed > 0)
atomicMax(&d_conditions[0], n_neigh_needed);
}
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int *tags,
const unsigned int *rtags,
const unsigned int *n_ex_tag,
const unsigned int *ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int *n_ex_idx,
unsigned int *ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
hipError_t gpu_update_exclusion_list(const unsigned int *d_tag,
const unsigned int *d_rtag,
const unsigned int *d_n_ex_tag,
const unsigned int *d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int *d_n_ex_idx,
unsigned int *d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 512;
hipLaunchKernelGGL(( gpu_update_exclusion_list_kernel), dim3(N/block_size + 1), dim3(block_size), 0, 0, d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return hipSuccess;
}
//! Generate the neighbor list on the GPU in O(N^2) time
hipError_t gpu_compute_nlist_nsq(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D& nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim& box,
const Scalar r_maxsq)
{
// setup the grid to run the kernel
int block_size = NLIST_BLOCK_SIZE;
dim3 grid( (N/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_nlist_nsq_kernel), dim3(grid), dim3(threads) , 0, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
nli,
d_pos,
N,
n_ghost,
box,
r_maxsq);
return hipSuccess;
}
| 76703c91ff0e3097da2a87afdc966f2ee653cc79.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2015 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPUBinned.cuh"
#include "NeighborListGPU.cuh"
#include <stdio.h>
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param N Number of particles
\param box Box dimensions
\param maxshiftsq The maximum drsq a particle can have before an update is needed
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's current position is
compared to its last position. If the particle has moved a distance more than sqrt(\a maxshiftsq), then *d_result
is set to \a ncheck.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
// each thread will compare vs it's old position to see if the list needs updating
// if that is true, write a 1 to nlist_needs_updating
// it is possible that writes will collide, but at least one will succeed and that is all that matters
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda*last_pos;
dx = box.minImage(dx);
if (dot(dx, dx) >= maxshiftsq)
atomicMax(d_result, checkn);
}
}
cudaError_t gpu_nlist_needs_update_check_new(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
unsigned int block_size = 128;
int n_blocks = N/block_size+1;
gpu_nlist_needs_update_check_new_kernel<<<n_blocks, block_size>>>(d_result,
d_last_pos,
d_pos,
N,
box,
maxshiftsq,
lambda,
checkn);
return cudaSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that are excluded. To allow
for an arbitrary large number of exclusions, these are processed in batch sizes of FILTER_BATCH_SIZE. The kernel
must be called multiple times in order to fully remove all exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in for that particle
(or the thread returns if there are no exlusions past that point). The thread then loops over the neighbor list,
comparing each entry to the list of exclusions. If the entry is not excluded, it is written back out. \a d_n_neigh
is updated to reflect the current number of particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[nli(idx, cur_neigh_idx)];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[nli(idx, new_n_neigh)] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
cudaError_t gpu_nlist_filter(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D& nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_filter_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// determine parameters for kernel launch
int n_blocks = N/run_block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH())/double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
gpu_nlist_filter_kernel<<<n_blocks, run_block_size>>>(d_n_neigh,
d_nlist,
nli,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return cudaSuccess;
}
//! Compile time determined block size for the NSQ neighbor list calculation
const int NLIST_BLOCK_SIZE = 128;
//! Generate the neighbor list on the GPU in O(N^2) time
/*! \param d_nlist Neighbor list to write out
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions will be written here
\param d_conditions Overflow condition flag
\param nli Indexer for indexing into d_nlist
\param d_pos Current particle positions
\param N number of particles
\param box Box dimensions for handling periodic boundary conditions
\param r_maxsq Precalculated value for r_max*r_max
each thread is to compute the neighborlist for a single particle i
each block will load a bunch of particles into shared mem and then each thread will compare it's particle
to each particle in shmem to see if they are a neighbor. Since all threads in the block access the same
shmem element at the same time, the value is broadcast and there are no bank conflicts
*/
__global__
void gpu_compute_nlist_nsq_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim box,
const Scalar r_maxsq)
{
// shared data to store all of the particles we compare against
__shared__ Scalar sdata[NLIST_BLOCK_SIZE*4];
// load in the particle
int pidx = blockIdx.x * NLIST_BLOCK_SIZE + threadIdx.x;
// store the max number of neighbors needed for this thread
unsigned int n_neigh_needed = 0;
Scalar4 pos = make_scalar4(0, 0, 0, 0);
if (pidx < N)
pos = d_pos[pidx];
Scalar px = pos.x;
Scalar py = pos.y;
Scalar pz = pos.z;
// track the number of neighbors added so far
int n_neigh = 0;
// each block is going to loop over all N particles (this assumes memory is padded to a multiple of blockDim.x)
// in blocks of blockDim.x
// include ghosts as neighbors
for (int start = 0; start < N + n_ghost; start += NLIST_BLOCK_SIZE)
{
// load data
Scalar4 neigh_pos = make_scalar4(0, 0, 0, 0);
if (start + threadIdx.x < N + n_ghost)
neigh_pos = d_pos[start + threadIdx.x];
// make sure everybody is caught up before we stomp on the memory
__syncthreads();
sdata[threadIdx.x] = neigh_pos.x;
sdata[threadIdx.x + NLIST_BLOCK_SIZE] = neigh_pos.y;
sdata[threadIdx.x + 2*NLIST_BLOCK_SIZE] = neigh_pos.z;
sdata[threadIdx.x + 3*NLIST_BLOCK_SIZE] = neigh_pos.w; //< unused, but try to get compiler to fully coalesce reads
// ensure all data is loaded
__syncthreads();
// now each thread loops over every particle in shmem, but doesn't loop past the end of the particle list (since
// the block might extend that far)
int end_offset= NLIST_BLOCK_SIZE;
end_offset = min(end_offset, N + n_ghost - start);
if (pidx < N)
{
for (int cur_offset = 0; cur_offset < end_offset; cur_offset++)
{
// calculate dr
Scalar3 dx = make_scalar3(px - sdata[cur_offset],
py - sdata[cur_offset + NLIST_BLOCK_SIZE],
pz - sdata[cur_offset + 2*NLIST_BLOCK_SIZE]);
dx = box.minImage(dx);
// we don't add if we are comparing to ourselves, and we don't add if we are above the cut
if ((dot(dx,dx) <= r_maxsq) && ((start + cur_offset) != pidx))
{
unsigned int j = start + cur_offset;
if (n_neigh < nli.getH())
d_nlist[nli(pidx, n_neigh)] = j;
else
n_neigh_needed = n_neigh+1;
n_neigh++;
}
}
}
}
// now that we are done: update the first row that lists the number of neighbors
if (pidx < N)
{
d_n_neigh[pidx] = n_neigh;
d_last_updated_pos[pidx] = d_pos[pidx];
if (n_neigh_needed > 0)
atomicMax(&d_conditions[0], n_neigh_needed);
}
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int *tags,
const unsigned int *rtags,
const unsigned int *n_ex_tag,
const unsigned int *ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int *n_ex_idx,
unsigned int *ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
cudaError_t gpu_update_exclusion_list(const unsigned int *d_tag,
const unsigned int *d_rtag,
const unsigned int *d_n_ex_tag,
const unsigned int *d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int *d_n_ex_idx,
unsigned int *d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 512;
gpu_update_exclusion_list_kernel<<<N/block_size + 1, block_size>>>(d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return cudaSuccess;
}
//! Generate the neighbor list on the GPU in O(N^2) time
cudaError_t gpu_compute_nlist_nsq(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D& nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim& box,
const Scalar r_maxsq)
{
// setup the grid to run the kernel
int block_size = NLIST_BLOCK_SIZE;
dim3 grid( (N/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_compute_nlist_nsq_kernel<<< grid, threads >>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
nli,
d_pos,
N,
n_ghost,
box,
r_maxsq);
return cudaSuccess;
}
|
4011aa7f524028850cf884af78d152f8e730dcde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/margin_ranking_criterion_op.h"
namespace caffe2 {
namespace {
__global__ void MRCKernel(
const int N, const int* Y, const float* X1, const float* X2, const float margin,
float* output) {
CUDA_1D_KERNEL_LOOP(i, N) {
output[i] = fmaxf(0.f, -Y[i] * (X1[i] - X2[i]) + margin);
}
}
__global__ void MRCGradientKernel(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float* dX1, float* dX2) {
CUDA_1D_KERNEL_LOOP(i, N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
} // namespace
template <>
bool MarginRankingCriterionOp<CUDAContext>::RunOnDevice() {
auto& X1 = Input(0);
auto& X2 = Input(1);
auto& Y = Input(2);
auto* loss = Output(0);
CAFFE_ENFORCE(
X1.size() == X2.size(),
"The two inputs for computing ranking loss should have the same size.");
CAFFE_ENFORCE(
X1.size() == Y.size(),
"The input and label should have the same size.");
loss->ResizeLike(X1);
const float* X1data = X1.data<float>();
const float* X2data = X2.data<float>();
const int* Ydata = Y.data<int>();
float* output_data = loss->template mutable_data<float>();
hipLaunchKernelGGL(( MRCKernel), dim3(CAFFE_GET_BLOCKS(X1.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X1.size(), Ydata, X1data, X2data, margin_, output_data);
return true;
}
template <>
bool MarginRankingCriterionGradientOp<CUDAContext>::RunOnDevice() {
auto& X1 = Input(0);
auto& X2 = Input(1);
auto& Y = Input(2);
auto& dOutput = Input(3);
auto* dX1 = Output(0);
auto* dX2 = Output(1);
dX1->ResizeLike(X1);
dX2->ResizeLike(X2);
const float* X1data = X1.data<float>();
const float* X2data = X2.data<float>();
const int* Ydata = Y.data<int>();
const float* dOutput_data = dOutput.data<float>();
float* dX1_data = dX1->template mutable_data<float>();
float* dX2_data = dX2->template mutable_data<float>();
hipLaunchKernelGGL(( MRCGradientKernel), dim3(CAFFE_GET_BLOCKS(X1.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X1.size(), Ydata, X1data, X2data,
dOutput_data, margin_, dX1_data, dX2_data);
return true;
}
REGISTER_CUDA_OPERATOR(
MarginRankingCriterion,
MarginRankingCriterionOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
MarginRankingCriterionGradient,
MarginRankingCriterionGradientOp<CUDAContext>);
} // namespace caffe2
| 4011aa7f524028850cf884af78d152f8e730dcde.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/margin_ranking_criterion_op.h"
namespace caffe2 {
namespace {
__global__ void MRCKernel(
const int N, const int* Y, const float* X1, const float* X2, const float margin,
float* output) {
CUDA_1D_KERNEL_LOOP(i, N) {
output[i] = fmaxf(0.f, -Y[i] * (X1[i] - X2[i]) + margin);
}
}
__global__ void MRCGradientKernel(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float* dX1, float* dX2) {
CUDA_1D_KERNEL_LOOP(i, N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
} // namespace
template <>
bool MarginRankingCriterionOp<CUDAContext>::RunOnDevice() {
auto& X1 = Input(0);
auto& X2 = Input(1);
auto& Y = Input(2);
auto* loss = Output(0);
CAFFE_ENFORCE(
X1.size() == X2.size(),
"The two inputs for computing ranking loss should have the same size.");
CAFFE_ENFORCE(
X1.size() == Y.size(),
"The input and label should have the same size.");
loss->ResizeLike(X1);
const float* X1data = X1.data<float>();
const float* X2data = X2.data<float>();
const int* Ydata = Y.data<int>();
float* output_data = loss->template mutable_data<float>();
MRCKernel<<<CAFFE_GET_BLOCKS(X1.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X1.size(), Ydata, X1data, X2data, margin_, output_data);
return true;
}
template <>
bool MarginRankingCriterionGradientOp<CUDAContext>::RunOnDevice() {
auto& X1 = Input(0);
auto& X2 = Input(1);
auto& Y = Input(2);
auto& dOutput = Input(3);
auto* dX1 = Output(0);
auto* dX2 = Output(1);
dX1->ResizeLike(X1);
dX2->ResizeLike(X2);
const float* X1data = X1.data<float>();
const float* X2data = X2.data<float>();
const int* Ydata = Y.data<int>();
const float* dOutput_data = dOutput.data<float>();
float* dX1_data = dX1->template mutable_data<float>();
float* dX2_data = dX2->template mutable_data<float>();
MRCGradientKernel<<<CAFFE_GET_BLOCKS(X1.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X1.size(), Ydata, X1data, X2data,
dOutput_data, margin_, dX1_data, dX2_data);
return true;
}
REGISTER_CUDA_OPERATOR(
MarginRankingCriterion,
MarginRankingCriterionOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
MarginRankingCriterionGradient,
MarginRankingCriterionGradientOp<CUDAContext>);
} // namespace caffe2
|
be7cc02ee9de0b4f70bff904dd19c03dc4c7355b.hip | // !!! This is a file automatically generated by hipify!!!
#define M_PI 3.14159265358979323846
#include <cstdio>
#include <cstdlib>
#include <ctgmath>
#include <ctime>
//#include <complex>
// For the CUDA runtime routines (prefixed with "cuda_")
//#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#include <hip/hip_complex.h>
//#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
#define NUM_OF_THREADS 10000
#define THREADS_PER_BLOCK 256
#define TURNS 25000 /*number of revolution*/
#define NE 10000 /*number of electron*/
#define NUMOFZPT 300 /*number of segmentation of phase*/
//double numofzptdb = numofzpt;
//#define _C_m_s (2.99792458e8) /*c-m/s*/
//#define R56 (0.0512e-3) /*dispersion-m*/
#define ES (0.44e-3) /*energy spread*/
#define DAMPRATE (1.45e-4) /*damping rate*/
//#define EBE (0.629e9) /*beam energy-GeV*/
//#define MANONOR (0.3e6) /*modulation amplitude-GeV*/
#define MA (0.00047694753577106518) /*normalized modulation amplitude*/
//double my_gamma = EBE/(0.511e6); /*Lorentz factor*/
#define NUMMB (20.0) /*number of microbunch*/
#define QNEP (7.4929300010076163e-006) /*for quantum excitation*/
#define MODWL (1.0e-6) /*modulation wavelength-m*/
#define NORR56 (321.69908772759482) /*normalized dispersion*/
#define DN (1.3646097851959425e-005)
#define LPRANGE (125.66370614359172) /*phase range in longitudinal phase space*/
//double zposegdb;
__constant__ int seeding = 1;
__device__ double atomicDoubleAdd(double* address, double val){
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ hipDoubleComplex ft(double *x){
hipDoubleComplex value = make_cuDoubleComplex(0.0, 0.0);//, I = make_cuDoubleComplex(0.0, 1.0);
int l;
double y = -2.0*M_PI/(double)NUMOFZPT*20.0;
hipDoubleComplex tmp;
for(l = 0; l < NUMOFZPT; l++){
sincos(y*(double)l, &tmp.x, &tmp.y);
value.x += x[l]*tmp.x;
value.y += x[l]*tmp.y;
//printf("%f\n",x[l]);
x[l] = 0;
}
return value;
}
// Kernel
__global__ void CalKernel(double *lp, double *lpth, double *bf, hiprandStateXORWOW_t *states){
int tid = threadIdx.x, id = tid + blockIdx.x * THREADS_PER_BLOCK;
hiprandStateXORWOW_t localState = states[id];
__shared__ double zdis[NUMOFZPT];
double l_lp, l_lpth;
hipDoubleComplex l_bf;
int zposeg;
/*energy array*/
double iniu; /*for Gaussian random number*/
double iniv; /*for Gaussian random number*/
double err;
if(id < NE){
iniu = hiprand_uniform_double(&localState);
iniv = hiprand_uniform_double(&localState);
l_lp = hiprand_uniform_double(&localState)*LPRANGE;
l_lpth = sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv)*ES;
//if(id == 0) printf("\nlp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
int i;
if(id == 0){
for(i = 0; i < NUMOFZPT; i++){
zdis[i] = 0;
}
}
for(i = 0; i < TURNS; i++){
__syncthreads();
//if(id == 0 && i < 10) printf("\n**TURN: %d**\n",i);
l_lp += MA*sin(l_lp);
iniu = hiprand_uniform_double(&localState);
iniv = hiprand_uniform_double(&localState);
err = -DAMPRATE*l_lpth + QNEP*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv);
//if(id == 0 && i < 10) printf("err = %f\n",err);
iniu = hiprand_uniform_double(&localState);
iniv = hiprand_uniform_double(&localState);
l_lp += NORR56*l_lpth+ (err + DN*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv))*NORR56/2.0;
l_lpth = l_lpth + err;
//if(id == 0 && i < 10) printf("lp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
l_lp = fmod(l_lp,LPRANGE);
zposeg = (l_lp/(LPRANGE/(double)NUMOFZPT));
if(zposeg >= 0) {atomicDoubleAdd(&zdis[zposeg], 1.0);}
__syncthreads();
if(threadIdx.x == 0){
l_bf = ft(zdis);
atomicDoubleAdd(&bf[i], l_bf.x);
atomicDoubleAdd(&bf[i+TURNS], l_bf.y);
}
}
if(l_lp < 0.0) l_lp = l_lp + LPRANGE;
lp[id] = l_lp;
lpth[id] = l_lpth;
}
}
__global__ void SetupKernel(hiprandStateXORWOW_t *states){
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if(id < NE){
hiprand_init(seeding, id, 0, &states[id]);
}
}
void Calculate(double *lp, double *lpth, double *bf, int blocksPerGrid, hiprandStateXORWOW_t *states){
hipError_t error;
double *d_lp, *d_lpth, *d_bf, bf_tmp[TURNS*2];
//hipMemcpyToSymbol(blockcounter, &counter, sizeof(int));
// Allocate memory for result on Device
hipMalloc(&d_lp, sizeof(double)*NE);
hipMalloc(&d_lpth, sizeof(double)*NE);
hipMalloc(&d_bf, sizeof(double)*TURNS*2);
hipMemset(d_bf, 0, sizeof(double)*TURNS*2);
// Launch Kernel
hipLaunchKernelGGL(( CalKernel), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, d_lp, d_lpth, d_bf, states);
// check for error
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
// Copy result to Host
error = hipMemcpy(lp, d_lp, sizeof(double)*NE, hipMemcpyDeviceToHost);
//printf("Error NO:%d\n", error);
printf("CUDA error: %s\n", hipGetErrorString(error));
hipMemcpy(lpth, d_lpth, sizeof(double)*NE, hipMemcpyDeviceToHost);
hipMemcpy(bf_tmp, d_bf, sizeof(double)*TURNS*2, hipMemcpyDeviceToHost);
int i;
for(i = 0; i < TURNS; i++){
//if(i < 10) printf("(%f, %f i)\n",bf_tmp[i],bf_tmp[i+TURNS]);
bf[i] = (bf_tmp[i]/NE)*(bf_tmp[i]/NE) + (bf_tmp[i+TURNS]/NE)*(bf_tmp[i+TURNS]/NE);
bf[i] = sqrt(bf[i]);
}
// Free Memory
hipFree(d_lp);
hipFree(d_lpth);
hipFree(d_bf);
}
/*void SetupConstant(){
// Calculate constant value
double l_ma = MANONOR/EBE;
double l_qnep = ES*sqrt(2.0*DAMPRATE);
double l_norr56 = 2.0*M_PI*R56/MODWL;
double l_dn = 1.0/sqrt(M_PI*my_gamma/137.0)*DAMPRATE/2.0;
double l_lprange = 2.0*M_PI*NUMMB;
printf("\nma = %f\nqnep = %f\nnorr56 = %f\ndn = %f\nlprange = %f\n",l_ma,l_qnep,l_norr56,l_dn,l_lprange);
// Copy constant value to device
hipMemcpyToSymbol(ma, &l_ma, sizeof(double));
hipMemcpyToSymbol(qnep, &l_qnep, sizeof(double));
hipMemcpyToSymbol(norr56, &l_norr56, sizeof(double));
hipMemcpyToSymbol(dn, &l_dn, sizeof(double));
hipMemcpyToSymbol(lprange, &l_lprange, sizeof(double));
}*/
void CalOnDevice(double *lp, double *lpth, double *bf){
int blocksPerGrid = (NUM_OF_THREADS + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hiprandStateXORWOW_t *states;
// Allocate memory for Random Generator State
hipMalloc((void **)&states, THREADS_PER_BLOCK * blocksPerGrid * sizeof(hiprandStateXORWOW_t));
// Setup Constant
/*printf("Setup Constant...");
SetupConstant();
printf("Complete.\n");*/
// Setup Random Generator State
printf("Setup Random Generator State...");
hipLaunchKernelGGL(( SetupKernel), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), 0, 0, states);
printf("Complete.\n");
// Start Calculation
printf("Start Calculation...");
Calculate(lp, lpth, bf, blocksPerGrid, states);
printf("Complete.\n");
hipFree(states);
}
int main() {
FILE *fpout;
fpout = fopen("out.txt","w");
FILE *fpoutt;
fpoutt = fopen("outt.txt","w");
FILE *fpouttt;
fpouttt = fopen("outtt.txt","w");
double lp[NE]; /*phase array*/
double lpth[NE];
double bf[TURNS];
//double segpb = ((double)numofzpt)/nummb;
printf("Execute calculation on the device.\n");
CalOnDevice(lp, lpth, bf);
int j;
printf("Output: out.txt\n");
for(j = 0; j < TURNS; j++){
fprintf(fpout,"%f\n",sqrt(bf[j]));
}
printf("Output: outt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpoutt,"%f\n",lp[j]);
}
printf("Output: outtt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpouttt,"%f\n",lpth[j]);
}
fclose(fpout);
fclose(fpoutt);
fclose(fpouttt);
return 0;
}
| be7cc02ee9de0b4f70bff904dd19c03dc4c7355b.cu | #define M_PI 3.14159265358979323846
#include <cstdio>
#include <cstdlib>
#include <ctgmath>
#include <ctime>
//#include <complex>
// For the CUDA runtime routines (prefixed with "cuda_")
//#include <cuda.h>
#include <curand_kernel.h>
#include <curand.h>
#include <cuComplex.h>
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
#define NUM_OF_THREADS 10000
#define THREADS_PER_BLOCK 256
#define TURNS 25000 /*number of revolution*/
#define NE 10000 /*number of electron*/
#define NUMOFZPT 300 /*number of segmentation of phase*/
//double numofzptdb = numofzpt;
//#define _C_m_s (2.99792458e8) /*c-m/s*/
//#define R56 (0.0512e-3) /*dispersion-m*/
#define ES (0.44e-3) /*energy spread*/
#define DAMPRATE (1.45e-4) /*damping rate*/
//#define EBE (0.629e9) /*beam energy-GeV*/
//#define MANONOR (0.3e6) /*modulation amplitude-GeV*/
#define MA (0.00047694753577106518) /*normalized modulation amplitude*/
//double my_gamma = EBE/(0.511e6); /*Lorentz factor*/
#define NUMMB (20.0) /*number of microbunch*/
#define QNEP (7.4929300010076163e-006) /*for quantum excitation*/
#define MODWL (1.0e-6) /*modulation wavelength-m*/
#define NORR56 (321.69908772759482) /*normalized dispersion*/
#define DN (1.3646097851959425e-005)
#define LPRANGE (125.66370614359172) /*phase range in longitudinal phase space*/
//double zposegdb;
__constant__ int seeding = 1;
__device__ double atomicDoubleAdd(double* address, double val){
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ cuDoubleComplex ft(double *x){
cuDoubleComplex value = make_cuDoubleComplex(0.0, 0.0);//, I = make_cuDoubleComplex(0.0, 1.0);
int l;
double y = -2.0*M_PI/(double)NUMOFZPT*20.0;
cuDoubleComplex tmp;
for(l = 0; l < NUMOFZPT; l++){
sincos(y*(double)l, &tmp.x, &tmp.y);
value.x += x[l]*tmp.x;
value.y += x[l]*tmp.y;
//printf("%f\n",x[l]);
x[l] = 0;
}
return value;
}
// Kernel
__global__ void CalKernel(double *lp, double *lpth, double *bf, curandStateXORWOW_t *states){
int tid = threadIdx.x, id = tid + blockIdx.x * THREADS_PER_BLOCK;
curandStateXORWOW_t localState = states[id];
__shared__ double zdis[NUMOFZPT];
double l_lp, l_lpth;
cuDoubleComplex l_bf;
int zposeg;
/*energy array*/
double iniu; /*for Gaussian random number*/
double iniv; /*for Gaussian random number*/
double err;
if(id < NE){
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
l_lp = curand_uniform_double(&localState)*LPRANGE;
l_lpth = sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv)*ES;
//if(id == 0) printf("\nlp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
int i;
if(id == 0){
for(i = 0; i < NUMOFZPT; i++){
zdis[i] = 0;
}
}
for(i = 0; i < TURNS; i++){
__syncthreads();
//if(id == 0 && i < 10) printf("\n**TURN: %d**\n",i);
l_lp += MA*sin(l_lp);
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
err = -DAMPRATE*l_lpth + QNEP*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv);
//if(id == 0 && i < 10) printf("err = %f\n",err);
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
l_lp += NORR56*l_lpth+ (err + DN*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv))*NORR56/2.0;
l_lpth = l_lpth + err;
//if(id == 0 && i < 10) printf("lp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
l_lp = fmod(l_lp,LPRANGE);
zposeg = (l_lp/(LPRANGE/(double)NUMOFZPT));
if(zposeg >= 0) {atomicDoubleAdd(&zdis[zposeg], 1.0);}
__syncthreads();
if(threadIdx.x == 0){
l_bf = ft(zdis);
atomicDoubleAdd(&bf[i], l_bf.x);
atomicDoubleAdd(&bf[i+TURNS], l_bf.y);
}
}
if(l_lp < 0.0) l_lp = l_lp + LPRANGE;
lp[id] = l_lp;
lpth[id] = l_lpth;
}
}
__global__ void SetupKernel(curandStateXORWOW_t *states){
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if(id < NE){
curand_init(seeding, id, 0, &states[id]);
}
}
void Calculate(double *lp, double *lpth, double *bf, int blocksPerGrid, curandStateXORWOW_t *states){
cudaError_t error;
double *d_lp, *d_lpth, *d_bf, bf_tmp[TURNS*2];
//cudaMemcpyToSymbol(blockcounter, &counter, sizeof(int));
// Allocate memory for result on Device
cudaMalloc(&d_lp, sizeof(double)*NE);
cudaMalloc(&d_lpth, sizeof(double)*NE);
cudaMalloc(&d_bf, sizeof(double)*TURNS*2);
cudaMemset(d_bf, 0, sizeof(double)*TURNS*2);
// Launch Kernel
CalKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(d_lp, d_lpth, d_bf, states);
// check for error
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
// Copy result to Host
error = cudaMemcpy(lp, d_lp, sizeof(double)*NE, cudaMemcpyDeviceToHost);
//printf("Error NO:%d\n", error);
printf("CUDA error: %s\n", cudaGetErrorString(error));
cudaMemcpy(lpth, d_lpth, sizeof(double)*NE, cudaMemcpyDeviceToHost);
cudaMemcpy(bf_tmp, d_bf, sizeof(double)*TURNS*2, cudaMemcpyDeviceToHost);
int i;
for(i = 0; i < TURNS; i++){
//if(i < 10) printf("(%f, %f i)\n",bf_tmp[i],bf_tmp[i+TURNS]);
bf[i] = (bf_tmp[i]/NE)*(bf_tmp[i]/NE) + (bf_tmp[i+TURNS]/NE)*(bf_tmp[i+TURNS]/NE);
bf[i] = sqrt(bf[i]);
}
// Free Memory
cudaFree(d_lp);
cudaFree(d_lpth);
cudaFree(d_bf);
}
/*void SetupConstant(){
// Calculate constant value
double l_ma = MANONOR/EBE;
double l_qnep = ES*sqrt(2.0*DAMPRATE);
double l_norr56 = 2.0*M_PI*R56/MODWL;
double l_dn = 1.0/sqrt(M_PI*my_gamma/137.0)*DAMPRATE/2.0;
double l_lprange = 2.0*M_PI*NUMMB;
printf("\nma = %f\nqnep = %f\nnorr56 = %f\ndn = %f\nlprange = %f\n",l_ma,l_qnep,l_norr56,l_dn,l_lprange);
// Copy constant value to device
cudaMemcpyToSymbol(ma, &l_ma, sizeof(double));
cudaMemcpyToSymbol(qnep, &l_qnep, sizeof(double));
cudaMemcpyToSymbol(norr56, &l_norr56, sizeof(double));
cudaMemcpyToSymbol(dn, &l_dn, sizeof(double));
cudaMemcpyToSymbol(lprange, &l_lprange, sizeof(double));
}*/
void CalOnDevice(double *lp, double *lpth, double *bf){
int blocksPerGrid = (NUM_OF_THREADS + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
curandStateXORWOW_t *states;
// Allocate memory for Random Generator State
cudaMalloc((void **)&states, THREADS_PER_BLOCK * blocksPerGrid * sizeof(curandStateXORWOW_t));
// Setup Constant
/*printf("Setup Constant...");
SetupConstant();
printf("Complete.\n");*/
// Setup Random Generator State
printf("Setup Random Generator State...");
SetupKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(states);
printf("Complete.\n");
// Start Calculation
printf("Start Calculation...");
Calculate(lp, lpth, bf, blocksPerGrid, states);
printf("Complete.\n");
cudaFree(states);
}
int main() {
FILE *fpout;
fpout = fopen("out.txt","w");
FILE *fpoutt;
fpoutt = fopen("outt.txt","w");
FILE *fpouttt;
fpouttt = fopen("outtt.txt","w");
double lp[NE]; /*phase array*/
double lpth[NE];
double bf[TURNS];
//double segpb = ((double)numofzpt)/nummb;
printf("Execute calculation on the device.\n");
CalOnDevice(lp, lpth, bf);
int j;
printf("Output: out.txt\n");
for(j = 0; j < TURNS; j++){
fprintf(fpout,"%f\n",sqrt(bf[j]));
}
printf("Output: outt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpoutt,"%f\n",lp[j]);
}
printf("Output: outtt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpouttt,"%f\n",lpth[j]);
}
fclose(fpout);
fclose(fpoutt);
fclose(fpouttt);
return 0;
}
|
539160eefb50281e719628f9e2137d2d30c10785.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SimpleMOC-kernel_header.h"
/* My parallelization scheme here is to basically have a single
* block be a geometrical segment, with each thread within the
* block represent a single energy phase. On the CPU, the
* inner SIMD-ized loop is over energy (i.e, 100 energy groups).
* This should allow for each BLOCK to have:
* - A single state variable for the RNG
* - A set of __shared__ SIMD vectors, each thread id being its idx
*/
__global__ void run_kernel( Input I, Source * S,
Source_Arrays SA, Table * table, hiprandState_t * state,
float * state_fluxes, int N_state_fluxes)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment
if( blockId >= I.segments / I.seg_per_thread )
return;
// Assign RNG state
hiprandState_t * localState = &state[blockId % I.streams];
blockId *= I.seg_per_thread;
blockId--;
int g = threadIdx.x; // Each energy group (g) is one thread in a block
// Thread Local (i.e., specific to E group) variables
// Similar to SIMD vectors in CPU code
float q0 ;
float q1 ;
float q2 ;
float sigT ;
float tau ;
float sigT2 ;
float expVal ;
float reuse ;
float flux_integral;
float tally ;
float t1 ;
float t2 ;
float t3 ;
float t4 ;
// Randomized variables (common accross all thread within block)
extern __shared__ int shm[];
int * state_flux_id = &shm[0];
int * QSR_id = &shm[I.seg_per_thread];
int * FAI_id = &shm[I.seg_per_thread * 2];
if( threadIdx.x == 0 )
{
for( int i = 0; i < I.seg_per_thread; i++ )
{
state_flux_id[i] = hiprand(localState) % N_state_fluxes;
QSR_id[i] = hiprand(localState) % I.source_3D_regions;
FAI_id[i] = hiprand(localState) % I.fine_axial_intervals;
}
}
__syncthreads();
for( int i = 0; i < I.seg_per_thread; i++ )
{
blockId++;
float * state_flux = &state_fluxes[state_flux_id[i]];
__syncthreads();
//////////////////////////////////////////////////////////
// Attenuate Segment
//////////////////////////////////////////////////////////
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
float dz = 0.1f;
float zin = 0.3f;
float weight = 0.5f;
float mu = 0.9f;
float mu2 = 0.3f;
float ds = 0.7f;
const int egroups = I.egroups;
// load fine source region flux vector
float * FSR_flux = &SA.fine_flux_arr[ S[QSR_id[i]].fine_flux_id + FAI_id[i] * egroups];
if( FAI_id[i] == 0 )
{
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else if ( FAI_id[i] == I.fine_axial_intervals - 1 )
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do quadratic "fitting"
float c0 = y2;
float c1 = (y1 - y3) / (2.f*dz);
float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0 = c0 + c1*zin + c2*zin*zin;
q1 = c1 + 2.f*c2*zin;
q2 = c2;
}
// load total cross section
sigT = __ldg(&SA.sigT_arr[ S[QSR_id[i]].sigT_id + g]);
// calculate common values for efficiency
tau = sigT * ds;
sigT2 = sigT * sigT;
#ifdef TABLE
interpolateTable( table, tau, &expVal );
#else
expVal = 1.f - expf( -tau); // EXP function is fater than table lookup
#endif
// Flux Integral
// Re-used Term
reuse = tau * (tau - 2.f) + 2.f * expVal
/ (sigT * sigT2);
// add contribution to new source flux
flux_integral = (q0 * tau + (sigT * __ldg(&state_flux[g]) - q0)
* expVal) / sigT2 + q1 * mu * reuse + q2 * mu2
* (tau * (tau * (tau - 3.f) + 6.f) - 6.f * expVal)
/ (3.f * sigT2 * sigT2);
// Prepare tally
tally = weight * flux_integral;
// SHOULD BE ATOMIC HERE!
//FSR_flux[g] += tally;
atomicAdd(&FSR_flux[g], (float) tally);
// Term 1
t1 = q0 * expVal / sigT;
// Term 2
t2 = q1 * mu * (tau - expVal) / sigT2;
// Term 3
t3 = q2 * mu2 * reuse;
// Term 4
t4 = state_flux[g] * (1.f - expVal);
// Total psi
state_flux[g] = t1 + t2 + t3 + t4;
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
__device__ void interpolateTable(Table * table, float x, float * out)
{
// check to ensure value is in domain
if( x > table->maxVal )
*out = 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
*out = val;
}
}
| 539160eefb50281e719628f9e2137d2d30c10785.cu | #include "SimpleMOC-kernel_header.h"
/* My parallelization scheme here is to basically have a single
* block be a geometrical segment, with each thread within the
* block represent a single energy phase. On the CPU, the
* inner SIMD-ized loop is over energy (i.e, 100 energy groups).
* This should allow for each BLOCK to have:
* - A single state variable for the RNG
* - A set of __shared__ SIMD vectors, each thread id being its idx
*/
__global__ void run_kernel( Input I, Source * S,
Source_Arrays SA, Table * table, curandState * state,
float * state_fluxes, int N_state_fluxes)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment
if( blockId >= I.segments / I.seg_per_thread )
return;
// Assign RNG state
curandState * localState = &state[blockId % I.streams];
blockId *= I.seg_per_thread;
blockId--;
int g = threadIdx.x; // Each energy group (g) is one thread in a block
// Thread Local (i.e., specific to E group) variables
// Similar to SIMD vectors in CPU code
float q0 ;
float q1 ;
float q2 ;
float sigT ;
float tau ;
float sigT2 ;
float expVal ;
float reuse ;
float flux_integral;
float tally ;
float t1 ;
float t2 ;
float t3 ;
float t4 ;
// Randomized variables (common accross all thread within block)
extern __shared__ int shm[];
int * state_flux_id = &shm[0];
int * QSR_id = &shm[I.seg_per_thread];
int * FAI_id = &shm[I.seg_per_thread * 2];
if( threadIdx.x == 0 )
{
for( int i = 0; i < I.seg_per_thread; i++ )
{
state_flux_id[i] = curand(localState) % N_state_fluxes;
QSR_id[i] = curand(localState) % I.source_3D_regions;
FAI_id[i] = curand(localState) % I.fine_axial_intervals;
}
}
__syncthreads();
for( int i = 0; i < I.seg_per_thread; i++ )
{
blockId++;
float * state_flux = &state_fluxes[state_flux_id[i]];
__syncthreads();
//////////////////////////////////////////////////////////
// Attenuate Segment
//////////////////////////////////////////////////////////
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
float dz = 0.1f;
float zin = 0.3f;
float weight = 0.5f;
float mu = 0.9f;
float mu2 = 0.3f;
float ds = 0.7f;
const int egroups = I.egroups;
// load fine source region flux vector
float * FSR_flux = &SA.fine_flux_arr[ S[QSR_id[i]].fine_flux_id + FAI_id[i] * egroups];
if( FAI_id[i] == 0 )
{
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else if ( FAI_id[i] == I.fine_axial_intervals - 1 )
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do quadratic "fitting"
float c0 = y2;
float c1 = (y1 - y3) / (2.f*dz);
float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0 = c0 + c1*zin + c2*zin*zin;
q1 = c1 + 2.f*c2*zin;
q2 = c2;
}
// load total cross section
sigT = __ldg(&SA.sigT_arr[ S[QSR_id[i]].sigT_id + g]);
// calculate common values for efficiency
tau = sigT * ds;
sigT2 = sigT * sigT;
#ifdef TABLE
interpolateTable( table, tau, &expVal );
#else
expVal = 1.f - expf( -tau); // EXP function is fater than table lookup
#endif
// Flux Integral
// Re-used Term
reuse = tau * (tau - 2.f) + 2.f * expVal
/ (sigT * sigT2);
// add contribution to new source flux
flux_integral = (q0 * tau + (sigT * __ldg(&state_flux[g]) - q0)
* expVal) / sigT2 + q1 * mu * reuse + q2 * mu2
* (tau * (tau * (tau - 3.f) + 6.f) - 6.f * expVal)
/ (3.f * sigT2 * sigT2);
// Prepare tally
tally = weight * flux_integral;
// SHOULD BE ATOMIC HERE!
//FSR_flux[g] += tally;
atomicAdd(&FSR_flux[g], (float) tally);
// Term 1
t1 = q0 * expVal / sigT;
// Term 2
t2 = q1 * mu * (tau - expVal) / sigT2;
// Term 3
t3 = q2 * mu2 * reuse;
// Term 4
t4 = state_flux[g] * (1.f - expVal);
// Total psi
state_flux[g] = t1 + t2 + t3 + t4;
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
__device__ void interpolateTable(Table * table, float x, float * out)
{
// check to ensure value is in domain
if( x > table->maxVal )
*out = 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
*out = val;
}
}
|
4f05f907e587b39a373666873232e21af73cbd87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace blend
{
template <typename T>
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
int x_ = x / cn;
float w1 = weights1.ptr(y)[x_];
float w2 = weights2.ptr(y)[x_];
T p1 = img1.ptr(y)[x];
T p2 = img2.ptr(y)[x];
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
}
}
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( blendLinearKernel), dim3(grid), dim3(threads), 0, stream, rows, cols * cn, cn, img1, img2, weights1, weights2, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template void blendLinearCaller<uchar>(int, int, int, PtrStep<uchar>, PtrStep<uchar>, PtrStepf, PtrStepf, PtrStep<uchar>, hipStream_t stream);
template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, hipStream_t stream);
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStepb img1, const PtrStepb img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStepb result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
float w1 = weights1.ptr(y)[x];
float w2 = weights2.ptr(y)[x];
float sum_inv = 1.f / (w1 + w2 + 1e-5f);
w1 *= sum_inv;
w2 *= sum_inv;
uchar4 p1 = ((const uchar4*)img1.ptr(y))[x];
uchar4 p2 = ((const uchar4*)img2.ptr(y))[x];
((uchar4*)result.ptr(y))[x] = make_uchar4(p1.x * w1 + p2.x * w2, p1.y * w1 + p2.y * w2,
p1.z * w1 + p2.z * w2, p1.w * w1 + p2.w * w2);
}
}
void blendLinearCaller8UC4(int rows, int cols, PtrStepb img1, PtrStepb img2, PtrStepf weights1, PtrStepf weights2, PtrStepb result, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( blendLinearKernel8UC4), dim3(grid), dim3(threads), 0, stream, rows, cols, img1, img2, weights1, weights2, result);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
} // namespace blend
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
| 4f05f907e587b39a373666873232e21af73cbd87.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace blend
{
template <typename T>
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
int x_ = x / cn;
float w1 = weights1.ptr(y)[x_];
float w2 = weights2.ptr(y)[x_];
T p1 = img1.ptr(y)[x];
T p2 = img2.ptr(y)[x];
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
}
}
template <typename T>
void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
blendLinearKernel<<<grid, threads, 0, stream>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template void blendLinearCaller<uchar>(int, int, int, PtrStep<uchar>, PtrStep<uchar>, PtrStepf, PtrStepf, PtrStep<uchar>, cudaStream_t stream);
template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, cudaStream_t stream);
__global__ void blendLinearKernel8UC4(int rows, int cols, const PtrStepb img1, const PtrStepb img2,
const PtrStepf weights1, const PtrStepf weights2, PtrStepb result)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < rows && x < cols)
{
float w1 = weights1.ptr(y)[x];
float w2 = weights2.ptr(y)[x];
float sum_inv = 1.f / (w1 + w2 + 1e-5f);
w1 *= sum_inv;
w2 *= sum_inv;
uchar4 p1 = ((const uchar4*)img1.ptr(y))[x];
uchar4 p2 = ((const uchar4*)img2.ptr(y))[x];
((uchar4*)result.ptr(y))[x] = make_uchar4(p1.x * w1 + p2.x * w2, p1.y * w1 + p2.y * w2,
p1.z * w1 + p2.z * w2, p1.w * w1 + p2.w * w2);
}
}
void blendLinearCaller8UC4(int rows, int cols, PtrStepb img1, PtrStepb img2, PtrStepf weights1, PtrStepf weights2, PtrStepb result, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
blendLinearKernel8UC4<<<grid, threads, 0, stream>>>(rows, cols, img1, img2, weights1, weights2, result);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
} // namespace blend
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
dbc33f6bf048698b10cc91b56e997f328be62760.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <hip/hip_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTSPARSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTSPARSEB);
if ((mxIsChar(INPUTSPARSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTSPARSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
double const *d_A_dense;
d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mwIndex nnz2;
mxArray * VLSXY2 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB);
nnz2 = *(mxGetJc(VLSXY2) + numBColumns);
int nnzB = static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(VLSXY2 , pointerrowB, nnzB);
double *pointervalB = (double *)mxGetDoubles(VLSXY2);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(hipMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), hipMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xval_sortB=(double*)mxGPUGetData(val_sortBB);
gpuErrchk(hipMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), hipMemcpyHostToDevice));
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mxDestroyArray(row_sortB);
mxDestroyArray(VLSXY2);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_B_dense = (double *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseDsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
HIPSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; // gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// double *d_A; // gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A = (double *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
//gpuErrchk(hipFree(d_A_dense));
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpDcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpDcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpDcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
double *VALOUT = (double *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpDcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTSPARSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
double *h_A_dense1;
h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA);
mwIndex nnz2;
nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(INPUTSPARSEB , pointerrowB, nnzB);
double *pointervalB = (double *)mxGetDoubles(INPUTSPARSEB);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB =(int *)mxGPUGetData(row_sortBB);
gpuErrchk(hipMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), hipMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xval_sortB=(double*)mxGPUGetData(val_sortBB);
gpuErrchk(hipMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), hipMemcpyHostToDevice));
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
mxDestroyArray(row_sortB);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_B_dense = (double *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseDsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
HIPSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A_dense = (double *)mxGPUGetData(OUTMA);
gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice));
cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// double *d_A; // gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; // gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; // gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A = (double *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(hipsparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
//gpuErrchk(hipFree(d_nnzPerVectorA));
mxGPUDestroyGPUArray(PerVect);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpDcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(hipMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpDcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpDcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
double *VALOUT = (double *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpDcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(hipFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
hipsparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
| dbc33f6bf048698b10cc91b56e997f328be62760.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)).
* AZ=Y -->Z=A\Y
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cusolverSp.h>
#include <cuda_runtime_api.h>
#include "cusolverSp_LOWLEVEL_PREVIEW.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define INPUTDENSEA prhs[0]
#define INPUTSPARSEB prhs[1]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=2)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(INPUTDENSEA);
if ((mxIsChar(INPUTDENSEA))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(INPUTSPARSEB);
if ((mxIsChar(INPUTSPARSEB))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTSPARSEB)) {
mxGPUArray const *INPUTDENSEGPUA;
mxGPUArray const *INPUTSPARSEGPUB;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA);
INPUTSPARSEGPUB = mxGPUCreateFromMxArray(INPUTSPARSEB);
if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (mxGPUIsSparse(INPUTSPARSEGPUB)) ){
const mwSize *dimsGPUSA;
dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA);
int numARows, numAColumns;
numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */
numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */
const mwSize *dimsGPUSB;
dimsGPUSB=mxGPUGetDimensions(INPUTSPARSEGPUB);
int numBRows, numBColumns;
numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */
numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
double const *d_A_dense;
d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA));
mwIndex nnz2;
mxArray * VLSXY2 = mxGPUCreateMxArrayOnCPU(INPUTSPARSEGPUB);
nnz2 = *(mxGetJc(VLSXY2) + numBColumns);
int nnzB = static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(VLSXY2 , pointerrowB, nnzB);
double *pointervalB = (double *)mxGetDoubles(VLSXY2);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB=(int *)mxGPUGetData(row_sortBB);
gpuErrchk(cudaMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), cudaMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xval_sortB=(double*)mxGPUGetData(val_sortBB);
gpuErrchk(cudaMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), cudaMemcpyHostToDevice));
mxGPUDestroyGPUArray(INPUTSPARSEGPUB);
mxDestroyArray(row_sortB);
mxDestroyArray(VLSXY2);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_B_dense = (double *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseDsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
CUSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; // gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
//gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// double *d_A; // gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; //gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; //gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A = (double *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
//gpuErrchk(cudaFree(d_A_dense));
mxGPUDestroyGPUArray(PerVect);
mxGPUDestroyGPUArray(INPUTDENSEGPUA);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpDcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpDcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpDcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
double *VALOUT = (double *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpDcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTSPARSEB))){
// if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) {
// mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
// "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision.");
// }
if((!mxIsSparse(INPUTDENSEA))&& (mxIsSparse(INPUTSPARSEB)) ){
mxInitGPU();
const mwSize *dimsCPUA;
dimsCPUA=mxGetDimensions(INPUTDENSEA);
int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */
int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */
const mwSize *dimsCPUB;
dimsCPUB=mxGetDimensions(INPUTSPARSEB);
int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */
int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */
if ( numARows != numAColumns ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file,first argument must be a sparse/dense square matrix.");
}
if ( (numBColumns!= 1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, second argument must be a dense/sparse column vector.");
}
if ( (numBRows!= numARows) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, array (matrix-vector) dimensions must agree.");
}
double *h_A_dense1;
h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA);
mwIndex nnz2;
nnz2 = *(mxGetJc(INPUTSPARSEB) + numBColumns);
int nnzB= static_cast<int> (nnz2);
mxArray *row_sortB =mxCreateNumericMatrix(nnzB, 1, mxINT32_CLASS, mxREAL);
int *pointerrowB = (int *)mxGetInt32s(row_sortB);
Ir_DataGetSetIXY(INPUTSPARSEB , pointerrowB, nnzB);
double *pointervalB = (double *)mxGetDoubles(INPUTSPARSEB);
size_t pivot_dimensionsrowB[1] = {nnzB};
size_t pivot_dimensionsvalueB[1] = {nnzB};
mxGPUArray *row_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrowB, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *xrow_sortB =(int *)mxGPUGetData(row_sortBB);
gpuErrchk(cudaMemcpy(xrow_sortB, pointerrowB, nnzB * sizeof(*xrow_sortB), cudaMemcpyHostToDevice));
mxGPUArray *val_sortBB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *xval_sortB=(double*)mxGPUGetData(val_sortBB);
gpuErrchk(cudaMemcpy(xval_sortB, pointervalB, nnzB * sizeof(*xval_sortB), cudaMemcpyHostToDevice));
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
mxDestroyArray(row_sortB);
size_t pivot_dimensionsvalueV[1] = {numBRows};
mxGPUArray *DB_dense = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_B_dense = (double *)mxGPUGetData(DB_dense);
cusparseSafeCall(cusparseDsctr(handle, nnzB,
xval_sortB,
xrow_sortB, d_B_dense,
CUSPARSE_INDEX_BASE_ONE));
mxGPUDestroyGPUArray(row_sortBB);
mxGPUDestroyGPUArray(val_sortBB);
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = numARows;
//int *d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA)));
size_t pivot_pervect[1] = {numARows};
mxGPUArray *PerVect = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect);
//double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense)));
size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns};
mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A_dense = (double *)mxGPUGetData(OUTMA);
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice));
cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// double *d_A; // gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
//int *d_A_RowIndices; // gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; // gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
size_t pivot_dimensA[1] = {nnzA};
size_t pivot_dimensROW_A[1] = {numARows+1};
size_t pivot_dimensCOL_A[1] = {nnzA};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
double *d_A = (double *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
cusparseSafeCall(cusparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
mxGPUDestroyGPUArray(OUTMA);
//gpuErrchk(cudaFree(d_nnzPerVectorA));
mxGPUDestroyGPUArray(PerVect);
cusolverSpHandle_t handle_cusolver;
cusolverSpCreate(&handle_cusolver);
csrcholInfo_t chl_info = NULL;
const double tol = 1.e-14;
int singularity = 0;
size_t size_internal = 0;
size_t size_chol = 0;
cusolverSafeCall(cusolverSpCreateCsrcholInfo(&chl_info));
cusolverSafeCall(cusolverSpXcsrcholAnalysis(
handle_cusolver, numARows, nnzA,
descrA, d_A_RowIndices, d_A_ColIndices,
chl_info));
cusolverSafeCall(cusolverSpDcsrcholBufferInfo(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
&size_internal,
&size_chol));
void *buffer_gpu = NULL;
gpuErrchk(cudaMalloc(&buffer_gpu, sizeof(char)*size_chol));
cusolverSafeCall(cusolverSpDcsrcholFactor(
handle_cusolver, numARows, nnzA,
descrA, d_A, d_A_RowIndices, d_A_ColIndices,
chl_info,
buffer_gpu));
cusolverSafeCall(cusolverSpDcsrcholZeroPivot(
handle_cusolver, chl_info, tol, &singularity));
if ( 0 <= singularity){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, (fatal error:) A is not invertible, singularity=%d\n", singularity);
}
size_t pivot_dimensionsvalueVa[1] = {numAColumns};
mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueVa, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES);
double *VALOUT = (double *)mxGPUGetData(VAL);
cusolverSafeCall(cusolverSpDcsrcholSolve(
handle_cusolver, numARows, d_B_dense, VALOUT, chl_info, buffer_gpu));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(DB_dense);
OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL);
gpuErrchk(cudaFree(buffer_gpu));
mxGPUDestroyGPUArray(VAL);
cusolverSpDestroyCsrcholInfo(chl_info);
cusparseDestroyMatDescr(descrA);
cusolverSpDestroy(handle_cusolver);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
e203ba8348d8db71c7d3cb17da41e3194b149d18.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
// This column version is less intense than its record counterpart.
template<size_t stack_size>
struct findall_fn
{
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
bool bcompute_size_only{true};
int column;
thrust::pair<const char*,size_t>* d_indexes;
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
if( !bcompute_size_only && (column >= d_counts[idx]) )
return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars, column_count = 0;
//prog->find(idx,dstr,spos,epos);
//for( int col=0; col <= column; ++c )
while( spos <= nchars )
{
if( prog->find(idx,dstr,spos,epos) <=0 )
break;
if( !bcompute_size_only && (column_count==column) )
break;
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
//prog->find(idx,dstr,spos,epos);
}
if( bcompute_size_only )
d_counts[idx] = column_count;
else
{
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
else
{ // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
}
}
};
// same as findall but strings are returned organized in column-major
int NVStrings::findall( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::findall: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// boundary case: if no columns, return one null column (issue #119)
if( columns==0 )
results.push_back(new NVStrings(count));
// create columns of nvstrings
for( int col_idx=0; col_idx < columns; ++col_idx )
{
// build index for each string -- collect pointers and lengths
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, false, col_idx, d_indexes});
NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
| e203ba8348d8db71c7d3cb17da41e3194b149d18.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <exception>
#include <sstream>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../regex/regex.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// Like the other regex functors, this one has two modes: size/count calculation
// and then the operation itself (findall). This minimizes the inlining of
// the regex code while not causing divergence. Makes the code a bit messy
// but build times are reduced by half since only one regex find() is inlined.
// This column version is less intense than its record counterpart.
template<size_t stack_size>
struct findall_fn
{
dreprog* prog;
custring_view_array d_strings;
int* d_counts;
bool bcompute_size_only{true};
int column;
thrust::pair<const char*,size_t>* d_indexes;
//
__device__ void operator()(unsigned int idx)
{
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
if( !bcompute_size_only && (column >= d_counts[idx]) )
return;
u_char data1[stack_size], data2[stack_size];
prog->set_stack_mem(data1,data2);
if( !bcompute_size_only )
{
d_indexes[idx].first = nullptr; // initialize to
d_indexes[idx].second = 0; // null string
}
int spos = 0, nchars = (int)dstr->chars_count();
int epos = nchars, column_count = 0;
//prog->find(idx,dstr,spos,epos);
//for( int col=0; col <= column; ++c )
while( spos <= nchars )
{
if( prog->find(idx,dstr,spos,epos) <=0 )
break;
if( !bcompute_size_only && (column_count==column) )
break;
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
//prog->find(idx,dstr,spos,epos);
}
if( bcompute_size_only )
d_counts[idx] = column_count;
else
{
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
else
{ // create empty string instead of a null one
d_indexes[idx].first = dstr->data();
}
}
}
};
// same as findall but strings are returned organized in column-major
int NVStrings::findall( const char* pattern, std::vector<NVStrings*>& results )
{
if( pattern==0 )
return -1;
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
// compile regex into device object
const char32_t* ptn32 = to_char32(pattern);
dreprog* prog = dreprog::create_from(ptn32,get_unicode_flags());
delete ptn32;
// allocate regex working memory if necessary
int regex_insts = prog->inst_counts();
if( regex_insts > MAX_STACK_INSTS )
{
if( !prog->alloc_relists(count) )
{
std::ostringstream message;
message << "nvstrings::findall: number of instructions (" << prog->inst_counts() << ") ";
message << "and number of strings (" << count << ") ";
message << "exceeds available memory";
dreprog::destroy(prog);
throw std::invalid_argument(message.str());
}
}
// compute counts of each match and size of the buffers
custring_view_array d_strings = pImpl->getStringsPtr();
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts});
int columns = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// boundary case: if no columns, return one null column (issue #119)
if( columns==0 )
results.push_back(new NVStrings(count));
// create columns of nvstrings
for( int col_idx=0; col_idx < columns; ++col_idx )
{
// build index for each string -- collect pointers and lengths
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= 10) )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_SMALL>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else if( regex_insts <= 100 )
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_MEDIUM>{prog, d_strings, d_counts, false, col_idx, d_indexes});
else
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
findall_fn<RX_STACK_LARGE>{prog, d_strings, d_counts, false, col_idx, d_indexes});
NVStrings* column = NVStrings::create_from_index((std::pair<const char*,size_t>*)d_indexes,count);
results.push_back(column);
}
dreprog::destroy(prog);
return (unsigned int)results.size();
}
|
302dfa11cdda431fa8299bc068c49da0a767b3b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <spconv/indice.cuh>
#include <type_traits>
#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp"
namespace functor {
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP1<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose) {
Index batchSize = gridsOut.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
if (transpose)
hipLaunchKernelGGL(( prepareDeConvIndicePairsKernel<Index, IndexGrid, NDim, 4096>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
else
hipLaunchKernelGGL(( prepareIndicePairsKernel<Index, IndexGrid, NDim, 4096>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
return 1;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP2<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
Index batchSize = gridsOut.dim(0);
auto kernelVolume = indicePairs.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
Index numAct = indicePairUnique.dim(0) - 1;
hipLaunchKernelGGL(( assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numAct)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesOut, gridsOut, numAct, indicePairs,
indicePairUnique, outSpatialShape, batchSize);
TV_CHECK_CUDA_ERR();
hipLaunchKernelGGL(( assignIndicePairsKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesOut, gridsOut, numActIn, indicePairs,
indicePairUnique, outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
hipLaunchKernelGGL(( resetGridKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numAct)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicePairUnique.data(), gridsOut, numAct);
TV_CHECK_CUDA_ERR();
}
return numAct;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateSubMIndicePairFunctor<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
hipLaunchKernelGGL(( prepareSubMGridKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesIn, gridsOut, outSpatialShape);
TV_CHECK_CUDA_ERR();
hipLaunchKernelGGL(( getSubMIndicePairsKernel<Index, IndexGrid, NDim, 4096>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesIn, gridsOut, indicePairs, indiceNum,
kernelSize, stride, padding, dilation,
outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
hipLaunchKernelGGL(( resetGridSubMKernel<Index, IndexGrid, NDim>)
, dim3(tv::launch::getBlocks(numActIn)), dim3(tv::launch::CUDA_NUM_THREADS), 0,
d.getStream(), indicesIn.data(), gridsOut, outSpatialShape,
numActIn);
TV_CHECK_CUDA_ERR();
}
return numActIn;
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_INDEX_NDIM(Index, NDIM) \
template struct functor::CreateConvIndicePairFunctor<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateConvIndicePairFunctorP1<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateConvIndicePairFunctorP2<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateSubMIndicePairFunctor<tv::TorchGPU, Index, \
int, NDIM>;
#define DECLARE_GPU_INDEX(Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 4);
DECLARE_GPU_INDEX(int);
#undef DECLARE_GPU_INDEX
#undef DECLARE_GPU_SPECS_INDEX_NDIM
| 302dfa11cdda431fa8299bc068c49da0a767b3b8.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <spconv/indice.cuh>
#include <type_traits>
#include "../spconv_utils.h"
#include "pytorch_cuda_helper.hpp"
namespace functor {
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP1<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose) {
Index batchSize = gridsOut.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
if (transpose)
prepareDeConvIndicePairsKernel<Index, IndexGrid, NDim, 4096>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
else
prepareIndicePairsKernel<Index, IndexGrid, NDim, 4096>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesIn, indicesOut, gridsOut, indicePairs,
indiceNum, indicePairUnique, kernelSize, stride,
padding, dilation, outSpatialShape);
TV_CHECK_CUDA_ERR();
return 1;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateConvIndicePairFunctorP2<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<Index> indicesOut,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
tv::TensorView<Index> indicePairUnique,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
Index batchSize = gridsOut.dim(0);
auto kernelVolume = indicePairs.dim(0);
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
Index numAct = indicePairUnique.dim(0) - 1;
assignGridAndIndiceOutKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numAct), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesOut, gridsOut, numAct, indicePairs,
indicePairUnique, outSpatialShape, batchSize);
TV_CHECK_CUDA_ERR();
assignIndicePairsKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesOut, gridsOut, numActIn, indicePairs,
indicePairUnique, outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
resetGridKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numAct), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicePairUnique.data(), gridsOut, numAct);
TV_CHECK_CUDA_ERR();
}
return numAct;
}
};
template <typename Index, typename IndexGrid, unsigned NDim>
struct CreateSubMIndicePairFunctor<tv::TorchGPU, Index, IndexGrid, NDim> {
Index operator()(const tv::TorchGPU &d, tv::TensorView<const Index> indicesIn,
tv::TensorView<IndexGrid> gridsOut,
tv::TensorView<Index> indicePairs,
tv::TensorView<Index> indiceNum,
const tv::SimpleVector<Index, NDim> kernelSize,
const tv::SimpleVector<Index, NDim> stride,
const tv::SimpleVector<Index, NDim> padding,
const tv::SimpleVector<Index, NDim> dilation,
const tv::SimpleVector<Index, NDim> outSpatialShape,
bool transpose, bool resetGrid) {
auto numActIn = indicesIn.dim(0);
if (numActIn == 0) return 0;
prepareSubMGridKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesIn, gridsOut, outSpatialShape);
TV_CHECK_CUDA_ERR();
getSubMIndicePairsKernel<Index, IndexGrid, NDim, 4096>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesIn, gridsOut, indicePairs, indiceNum,
kernelSize, stride, padding, dilation,
outSpatialShape);
TV_CHECK_CUDA_ERR();
if (resetGrid) {
resetGridSubMKernel<Index, IndexGrid, NDim>
<<<tv::launch::getBlocks(numActIn), tv::launch::CUDA_NUM_THREADS, 0,
d.getStream()>>>(indicesIn.data(), gridsOut, outSpatialShape,
numActIn);
TV_CHECK_CUDA_ERR();
}
return numActIn;
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_INDEX_NDIM(Index, NDIM) \
template struct functor::CreateConvIndicePairFunctor<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateConvIndicePairFunctorP1<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateConvIndicePairFunctorP2<tv::TorchGPU, Index, \
int, NDIM>; \
template struct functor::CreateSubMIndicePairFunctor<tv::TorchGPU, Index, \
int, NDIM>;
#define DECLARE_GPU_INDEX(Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(Index, 4);
DECLARE_GPU_INDEX(int);
#undef DECLARE_GPU_INDEX
#undef DECLARE_GPU_SPECS_INDEX_NDIM
|
7c446dca1da8c0801eb5c8818736e71cc471b9e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
//#include <sys/time.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <algorithm>
using std::cout;
using std::endl;
using std::cerr;
#define DECLINE_HORIZONTAL 0.1
#define DECLINE_VERTICAL 0.1
#define STEPS 1000 /* number of time steps */
/* error check on hip macro */
#define CUDA_CHECK(command) \
{ \
hipError_t status = command; \
if(status != hipSuccess) \
{ \
cerr << "Error : Cuda reports " << hipGetErrorString(status) << endl; \
heatCleanExit(-6); \
} \
}
/* Device(GPU) FUNCTIONS TO BE LAUNCHED AS KERNELS FROM HOST(CPU) */
/* This function is used to discover the local working area of each thread */
__global__ void heatDiscover(int * __restrict__ workRowS,
int * __restrict__ workRowE,
int * __restrict__ workColS,
int * __restrict__ workColE,
const int blockRows, const int blockCols,
const int threadsPerRow, const int threadsPerCol,
const int probCase,
const int gridRows, const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
if(probCase == 1)//2 threads
{
if(thread_Id == 0)//west thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else//east thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = blockCols;
workColE[thread_Id] = gridCols - 2;
}
}
else if(probCase == 2)//6, 8, 10, ... OR 4, 16, 64 ... threads
{
if(thread_Id == 0)//NW corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == (threadsPerCol - 1))//NE corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id == ((threadsPerRow * threadsPerCol) - threadsPerCol))//SW corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == ((threadsPerCol * threadsPerRow) - 1))//SE corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id < threadsPerCol)//NN side
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = thread_Id * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id > ((threadsPerCol * threadsPerRow) - threadsPerCol)) &&
(thread_Id < ((threadsPerCol * threadsPerRow) - 1)))//SS side
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id % threadsPerCol) == 0)//WW side
{
workRowS[thread_Id] = (thread_Id / threadsPerCol) * blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if((thread_Id + 1) % threadsPerCol == 0)//EE side
{
workRowS[thread_Id] = ((thread_Id + 1 - threadsPerCol) / threadsPerCol) *
blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else//general case middle location
{
int rowMarginS = threadsPerCol;
int rowMarginE = rowMarginS + threadsPerCol - 1;
int rowOffset = 1;
while(1)
{
if((thread_Id > rowMarginS) && (thread_Id < rowMarginE))
{
workRowS[thread_Id] = rowOffset * blockRows;
break;
}
else
{
rowMarginS += threadsPerCol;
rowMarginE += threadsPerCol;
++rowOffset;
}
}
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
}
}
/* This function updates the grid and is invoked on serial executions */
__global__ void heatUpdateSerial(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int gridRows, const int gridCols)
{
for(int i = 1; i < gridRows - 1; ++i)
{
for(int j = 1; j < gridCols - 1; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2 * devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2 * devOldHeatGrid[i*gridCols+j]));
}
}
}
/* This function updates the grid and is invoked on parallel executions */
__global__ void heatUpdateParallel(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int * __restrict__ workRowS,
const int * __restrict__ workRowE,
const int * __restrict__ workColS,
const int * __restrict__ workColE,
const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
//get the borders in registers for 1 cycle memory access
const int wRowS = workRowS[thread_Id];
const int wRowE = workRowE[thread_Id];
const int wColS = workColS[thread_Id];
const int wColE = workColE[thread_Id];
for(int i = wRowS; i <= wRowE; ++i)
{
for(int j = wColS; j <= wColE; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2*devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2*devOldHeatGrid[i*gridCols+j]));
}
}
}
//declare dynamic variables
double * heatGrid = nullptr;
double * devNewHeatGrid = nullptr;
double * devOldHeatGrid = nullptr;
int * workRowS = nullptr;
int * workRowE = nullptr;
int * workColS = nullptr;
int * workColE = nullptr;
int * devWorkRowS = nullptr;
int * devWorkRowE = nullptr;
int * devWorkColS = nullptr;
int * devWorkColE = nullptr;
/* HOST(CPU) FUNCTIONS */
/* This function intialises the temperature on the given grid with higher
temperatures at the centre, progressively lower ones until the sides
and 0s at the perimetre
*/
static inline void heatInit(double * heatGrid,
const int gridRows, const int gridCols)
{
for(int i = 0; i < gridRows; ++i)//avoid halo area
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = (double) (i * (gridRows - i - 1) * j * (gridCols - j - 1));
}
/* This function writes out the input grid to a .dat file in current path */
static inline int heatWrite(const double * heatGrid, const int flag,
const int gridRows, const int gridCols,
const int threadsPerBlock, const int blocksPerGrid)
{
char filePath[70] = "";
if(flag == 0)
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Initial.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
else
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Final.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
FILE * fp = fopen(filePath, "w");
if(fp == nullptr)
return -1;
for(int i = 0; i < gridRows; ++i)
{
for(int j = 0; j < gridCols; ++j)
{
fprintf(fp, "%-.1lf", fabs(heatGrid[i*gridCols+j]));//some 0.0s appear as -0.0s
if(j != (gridCols - 1))
fprintf(fp, " ");
}
fprintf(fp, "\n");
}
fclose(fp);
return 0;//all ok
}
/* This function swaps between the 2 grids to avoid assignmenets */
static inline void heatSwap(double ** a, double ** b)
{
double *temp = *a;
*a = *b;
*b = temp;
}
/* This function cleans up memory to prevent leaks on any exit error */
static inline void heatCleanExit(const int errorCode)
{
if(heatGrid != nullptr)
{
free(heatGrid);
heatGrid = nullptr;
}
if(devOldHeatGrid != nullptr)
{
CUDA_CHECK(hipFree(devOldHeatGrid));
devOldHeatGrid = nullptr;
}
if(devNewHeatGrid != nullptr)
{
CUDA_CHECK(hipFree(devNewHeatGrid));
devNewHeatGrid = nullptr;
}
if(workRowS != nullptr)
{
free(workRowS);
workRowS = nullptr;
}
if(devWorkRowS != nullptr)
{
CUDA_CHECK(hipFree(devWorkRowS));
devWorkRowS = nullptr;
}
if(workRowE != nullptr)
{
free(workRowE);
workRowE = nullptr;
}
if(devWorkRowE != nullptr)
{
CUDA_CHECK(hipFree(devWorkRowE));
devWorkRowE = nullptr;
}
if(workColS != nullptr)
{
free(workColS);
workColS = nullptr;
}
if(devWorkColS != nullptr)
{
CUDA_CHECK(hipFree(devWorkColS));
devWorkColS = nullptr;
}
if(workColE != nullptr)
{
free(workColE);
workColE = nullptr;
}
if(devWorkColE != nullptr)
{
CUDA_CHECK(hipFree(devWorkColE));
devWorkColE = nullptr;
}
exit(errorCode);
}
/* Main program function */
int main(int argc, char *argv[])
{
//get the properties
hipDeviceProp_t deviceProp;
CUDA_CHECK(hipGetDeviceProperties(&deviceProp, 0));
cout << "GPU PROPERTIES\n";
cout << "******************************************************************\n";
cout << "Cuda Device prop succeeded" << endl;
cout << "System minor " << deviceProp.minor << endl;
cout << "System major " << deviceProp.major << endl;
cout << "Agent Prop Name " << deviceProp.name << endl;
cout << "Total Global Memory " << deviceProp.totalGlobalMem << " bytes\n";
cout << "Shared Memory Per Block " << deviceProp.sharedMemPerBlock << " bytes\n";
cout << "Registers per block " << deviceProp.regsPerBlock << endl;
cout << "Warp size " << deviceProp.warpSize << endl;
cout << "Max Threads Per Block " << deviceProp.maxThreadsPerBlock << endl;
cout << "Max clock frequency of the multiProcessors " << deviceProp.clockRate << " kHz\n";
cout << "Size of shared memory region " << deviceProp.totalConstMem << " bytes\n";
cout << "Number of multi-processors (compute units) " << deviceProp.multiProcessorCount << endl;
cout << "******************************************************************\n\n";
//get properties to check on input data possible run scenarios
const int devMaxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
const int devMaxConcurrentThreads = deviceProp.multiProcessorCount *
deviceProp.warpSize;
//get the command line input data and do initial checks
if(argc != 5)
{
cerr << "Not enough input data, need 4\n";
cerr << "Grid_Rows Grid_Collumns Threads_Per_Block Blocks_Per_Grid\n";
cerr << "Aborting...\n";
heatCleanExit(-1);
}
const int gridRows = atoi(argv[1]);
const int gridCols = atoi(argv[2]);
const int threadsPerBlock = atoi(argv[3]);
const int blocksPerGrid = atoi(argv[4]);
const int gridSize = gridRows * gridCols;
const int totalThreads = threadsPerBlock * blocksPerGrid;
if(gridRows < 0 || gridCols < 0 || threadsPerBlock < 1 || blocksPerGrid < 1)
{
cerr << "Invalid Input Data\n";
cerr << "Grid Rows = " << gridRows << endl;
cerr << "Grid Cols = " << gridCols << endl;
cerr << "Threads Per Block = " << threadsPerBlock << endl;
cerr << "Blocks Per Grid = " << blocksPerGrid << endl;
cerr << "Aborting...\n";
heatCleanExit(-1);
}
//do checks based on device(GPU) capabilities
if(threadsPerBlock > devMaxThreadsPerBlock)
{
cerr << "Maximum threads per block exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
if(totalThreads > devMaxConcurrentThreads)
{
cerr << "Maximum concurrent threads exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
//allocate host(CPU) memory
heatGrid = (double *) malloc(gridSize * sizeof(double));
if(heatGrid == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(1);
}
//initialise with 0.0s the heat grid
for(int i = 0; i < gridRows; ++i)
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = 0.0;
//allocate device(GPU) global memory
CUDA_CHECK(hipMalloc((void **)&devNewHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(hipMemcpy(devNewHeatGrid, heatGrid, gridSize * sizeof(double),
hipMemcpyHostToDevice));
//initialise the heat grid with actual data
heatInit(heatGrid, gridRows, gridCols);
//allocate device(GPU) global memory
CUDA_CHECK(hipMalloc((void **)&devOldHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(hipMemcpy(devOldHeatGrid, heatGrid, gridSize * sizeof(double),
hipMemcpyHostToDevice));
//write out the initial grid to the corresponding file
// if(heatWrite(heatGrid, 0, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(2);
// }
//calculate the kernel dimensions (x,y,z) threads/block and blocks/grid
dim3 cudaThreads(threadsPerBlock, 1, 1);
dim3 cudaBlocks(blocksPerGrid, 1 ,1);
//define the timer structs to be used
// struct timespec start;
// struct timespec end;
// double totalTime = 0.0;
if(totalThreads == 1)//serial execution
{
cout << "Serial execution with 1 cuda thread\n";
//start the timer
//clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//solve the problem
for(int steps = 0; steps < STEPS; ++steps)
{
//launch the kernel
hipLaunchKernelGGL(( heatUpdateSerial), dim3(cudaBlocks), dim3(cudaThreads), 0, 0, devOldHeatGrid, devNewHeatGrid,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(hipDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
//clock_gettime(CLOCK_MONOTONIC, &end);
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
//totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
else//parallel execution
{
//initial check on number of threads
if(totalThreads % 2 != 0)
{
cout << "Can't parition grid fairly with odd number of threads = "
<< totalThreads
<< "\nAborting...\n";
heatCleanExit(4);
}
cout << "Parallel execution with ";
cout << "Threads Per Block : " << threadsPerBlock << endl;
cout << "Blocks Per Grid : " << blocksPerGrid << endl;
cout << "Total Cuda Threads : " << totalThreads << endl;
//allocate arrays for neighbour discovery
//working border rows
workRowS = (int *) malloc(totalThreads * sizeof(int));
if(workRowS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workRowE = (int *) malloc(totalThreads * sizeof(int));
if(workRowE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
//working border collumns
workColS = (int *) malloc(totalThreads * sizeof(int));
if(workColS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workColE = (int *) malloc(totalThreads * sizeof(int));
if(workColE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
int blockRows = 0;//total rows for each block of threads data block
int blockCols = 0;//total cols for each block of threads data block
int threadsPerRow = 0, threadsPerCol = 0;//vertical and horizontal distrib
//classify problem cases based on total threads
int cut = (int) sqrt(totalThreads);
double cutF = sqrt(totalThreads);
int probCase;
if(totalThreads == 2)//case 1 : handling 2 threads
{
blockRows = gridRows;
blockCols = gridCols / totalThreads;
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(6);
}
probCase = 1;
}
else if(cutF > (double) cut)//case 2.1 : handling 6, 8, 10, ... threads
{
if(gridSize % totalThreads != 0)//can't cut it without remains
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
const int localProbSize = gridSize / totalThreads;
int spread = gridSize;
//find the best possible partition
for(int i = gridRows; i > 0; --i)//priority to rows
{
for(int j = gridCols; j > 0; --j)
{
if((i * j) == localProbSize)
{
if(gridRows % i != 0 || gridCols % j != 0)
continue;
if(abs(i -j) < spread)
{
spread = abs(i - j);
blockRows = i;
blockCols = j;
}
}
}
}
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
probCase = 2;
}
else//case 2.2 : handling 4, 9, 16, ... threads
{
threadsPerRow = cut;
threadsPerCol = cut;
if((gridRows % cut != 0) || (gridCols % cut != 0))//can't cut even blocks
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(8);
}
blockRows = gridRows / cut;
blockCols = gridCols / cut;
probCase = 2;
}
printf("Grid can be partioned without remains...\n"
"Rows per block : %d, Columns per block : %d\n"
"Vertical threads : %d, Horizontal threads : %d\n\n",
blockRows, blockCols, threadsPerRow, threadsPerCol);
//working and global discovery phase
//allocate device(GPU) global memory
CUDA_CHECK(hipMalloc((void **)&devWorkRowS, totalThreads * sizeof(int)));
CUDA_CHECK(hipMalloc((void **)&devWorkRowE, totalThreads * sizeof(int)));
CUDA_CHECK(hipMalloc((void **)&devWorkColS, totalThreads * sizeof(int)));
CUDA_CHECK(hipMalloc((void **)&devWorkColE, totalThreads * sizeof(int)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(hipMemcpy(devWorkRowS, workRowS, totalThreads * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(devWorkRowE, workRowE, totalThreads * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(devWorkColS, workColS, totalThreads * sizeof(int),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(devWorkColE, workColE, totalThreads * sizeof(int),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( heatDiscover), dim3(cudaBlocks), dim3(cudaThreads), 0, 0,
devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
blockRows, blockCols,
threadsPerRow, threadsPerCol, probCase,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(hipDeviceSynchronize());
//start the timer
// clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//launch the kernel
for(int steps = 0; steps < STEPS; ++steps)
{
hipLaunchKernelGGL(( heatUpdateParallel), dim3(cudaBlocks), dim3(cudaThreads), 0, 0,
devOldHeatGrid, devNewHeatGrid,
devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(hipDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
// clock_gettime(CLOCK_MONOTONIC, &end);
// totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
if(STEPS % 2 == 0)//get the correct version
{
CUDA_CHECK(hipMemcpy(heatGrid, devOldHeatGrid, gridSize * sizeof(double),
hipMemcpyDeviceToHost));
}
else
{
CUDA_CHECK(hipMemcpy(heatGrid, devNewHeatGrid, gridSize * sizeof(double),
hipMemcpyDeviceToHost));
}
//write out the final grid to the corresponding file
// if(heatWrite(heatGrid, 1, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(3);
// }
//clear memory and exit
heatCleanExit(0);
} | 7c446dca1da8c0801eb5c8818736e71cc471b9e8.cu | #include <assert.h>
//#include <sys/time.h>
#include <time.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <algorithm>
using std::cout;
using std::endl;
using std::cerr;
#define DECLINE_HORIZONTAL 0.1
#define DECLINE_VERTICAL 0.1
#define STEPS 1000 /* number of time steps */
/* error check on hip macro */
#define CUDA_CHECK(command) \
{ \
cudaError_t status = command; \
if(status != cudaSuccess) \
{ \
cerr << "Error : Cuda reports " << cudaGetErrorString(status) << endl; \
heatCleanExit(-6); \
} \
}
/* Device(GPU) FUNCTIONS TO BE LAUNCHED AS KERNELS FROM HOST(CPU) */
/* This function is used to discover the local working area of each thread */
__global__ void heatDiscover(int * __restrict__ workRowS,
int * __restrict__ workRowE,
int * __restrict__ workColS,
int * __restrict__ workColE,
const int blockRows, const int blockCols,
const int threadsPerRow, const int threadsPerCol,
const int probCase,
const int gridRows, const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
if(probCase == 1)//2 threads
{
if(thread_Id == 0)//west thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else//east thread
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 2;
workColS[thread_Id] = blockCols;
workColE[thread_Id] = gridCols - 2;
}
}
else if(probCase == 2)//6, 8, 10, ... OR 4, 16, 64 ... threads
{
if(thread_Id == 0)//NW corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == (threadsPerCol - 1))//NE corner
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id == ((threadsPerRow * threadsPerCol) - threadsPerCol))//SW corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if(thread_Id == ((threadsPerCol * threadsPerRow) - 1))//SE corner
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else if(thread_Id < threadsPerCol)//NN side
{
workRowS[thread_Id] = 1;
workRowE[thread_Id] = blockRows - 1;
workColS[thread_Id] = thread_Id * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id > ((threadsPerCol * threadsPerRow) - threadsPerCol)) &&
(thread_Id < ((threadsPerCol * threadsPerRow) - 1)))//SS side
{
workRowS[thread_Id] = gridRows - blockRows;
workRowE[thread_Id] = gridRows - 2;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
else if((thread_Id % threadsPerCol) == 0)//WW side
{
workRowS[thread_Id] = (thread_Id / threadsPerCol) * blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = 1;
workColE[thread_Id] = blockCols - 1;
}
else if((thread_Id + 1) % threadsPerCol == 0)//EE side
{
workRowS[thread_Id] = ((thread_Id + 1 - threadsPerCol) / threadsPerCol) *
blockRows;
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = gridCols - blockCols;
workColE[thread_Id] = gridCols - 2;
}
else//general case middle location
{
int rowMarginS = threadsPerCol;
int rowMarginE = rowMarginS + threadsPerCol - 1;
int rowOffset = 1;
while(1)
{
if((thread_Id > rowMarginS) && (thread_Id < rowMarginE))
{
workRowS[thread_Id] = rowOffset * blockRows;
break;
}
else
{
rowMarginS += threadsPerCol;
rowMarginE += threadsPerCol;
++rowOffset;
}
}
workRowE[thread_Id] = workRowS[thread_Id] + blockRows - 1;
workColS[thread_Id] = (thread_Id % threadsPerCol) * blockCols;
workColE[thread_Id] = workColS[thread_Id] + blockCols - 1;
}
}
}
/* This function updates the grid and is invoked on serial executions */
__global__ void heatUpdateSerial(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int gridRows, const int gridCols)
{
for(int i = 1; i < gridRows - 1; ++i)
{
for(int j = 1; j < gridCols - 1; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2 * devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2 * devOldHeatGrid[i*gridCols+j]));
}
}
}
/* This function updates the grid and is invoked on parallel executions */
__global__ void heatUpdateParallel(const double * __restrict__ devOldHeatGrid,
double * __restrict__ devNewHeatGrid,
const int * __restrict__ workRowS,
const int * __restrict__ workRowE,
const int * __restrict__ workColS,
const int * __restrict__ workColE,
const int gridCols)
{
const int thread_Id = blockIdx.x * blockDim.x + threadIdx.x;
//get the borders in registers for 1 cycle memory access
const int wRowS = workRowS[thread_Id];
const int wRowE = workRowE[thread_Id];
const int wColS = workColS[thread_Id];
const int wColE = workColE[thread_Id];
for(int i = wRowS; i <= wRowE; ++i)
{
for(int j = wColS; j <= wColE; ++j)
{
devNewHeatGrid[i*gridCols+j] = devOldHeatGrid[i*gridCols+j] +
DECLINE_HORIZONTAL *
(devOldHeatGrid[(i+1)*gridCols+j] +
devOldHeatGrid[(i-1)*gridCols+j] -
(2*devOldHeatGrid[i*gridCols+j])) +
DECLINE_VERTICAL *
(devOldHeatGrid[i*gridCols+j+1] +
devOldHeatGrid[i*gridCols+j-1] -
(2*devOldHeatGrid[i*gridCols+j]));
}
}
}
//declare dynamic variables
double * heatGrid = nullptr;
double * devNewHeatGrid = nullptr;
double * devOldHeatGrid = nullptr;
int * workRowS = nullptr;
int * workRowE = nullptr;
int * workColS = nullptr;
int * workColE = nullptr;
int * devWorkRowS = nullptr;
int * devWorkRowE = nullptr;
int * devWorkColS = nullptr;
int * devWorkColE = nullptr;
/* HOST(CPU) FUNCTIONS */
/* This function intialises the temperature on the given grid with higher
temperatures at the centre, progressively lower ones until the sides
and 0s at the perimetre
*/
static inline void heatInit(double * heatGrid,
const int gridRows, const int gridCols)
{
for(int i = 0; i < gridRows; ++i)//avoid halo area
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = (double) (i * (gridRows - i - 1) * j * (gridCols - j - 1));
}
/* This function writes out the input grid to a .dat file in current path */
static inline int heatWrite(const double * heatGrid, const int flag,
const int gridRows, const int gridCols,
const int threadsPerBlock, const int blocksPerGrid)
{
char filePath[70] = "";
if(flag == 0)
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Initial.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
else
{
sprintf(filePath, "%d_%d_cuda_%d_%d_Final.dat", gridRows, gridCols,
threadsPerBlock, blocksPerGrid);
}
FILE * fp = fopen(filePath, "w");
if(fp == nullptr)
return -1;
for(int i = 0; i < gridRows; ++i)
{
for(int j = 0; j < gridCols; ++j)
{
fprintf(fp, "%-.1lf", fabs(heatGrid[i*gridCols+j]));//some 0.0s appear as -0.0s
if(j != (gridCols - 1))
fprintf(fp, " ");
}
fprintf(fp, "\n");
}
fclose(fp);
return 0;//all ok
}
/* This function swaps between the 2 grids to avoid assignmenets */
static inline void heatSwap(double ** a, double ** b)
{
double *temp = *a;
*a = *b;
*b = temp;
}
/* This function cleans up memory to prevent leaks on any exit error */
static inline void heatCleanExit(const int errorCode)
{
if(heatGrid != nullptr)
{
free(heatGrid);
heatGrid = nullptr;
}
if(devOldHeatGrid != nullptr)
{
CUDA_CHECK(cudaFree(devOldHeatGrid));
devOldHeatGrid = nullptr;
}
if(devNewHeatGrid != nullptr)
{
CUDA_CHECK(cudaFree(devNewHeatGrid));
devNewHeatGrid = nullptr;
}
if(workRowS != nullptr)
{
free(workRowS);
workRowS = nullptr;
}
if(devWorkRowS != nullptr)
{
CUDA_CHECK(cudaFree(devWorkRowS));
devWorkRowS = nullptr;
}
if(workRowE != nullptr)
{
free(workRowE);
workRowE = nullptr;
}
if(devWorkRowE != nullptr)
{
CUDA_CHECK(cudaFree(devWorkRowE));
devWorkRowE = nullptr;
}
if(workColS != nullptr)
{
free(workColS);
workColS = nullptr;
}
if(devWorkColS != nullptr)
{
CUDA_CHECK(cudaFree(devWorkColS));
devWorkColS = nullptr;
}
if(workColE != nullptr)
{
free(workColE);
workColE = nullptr;
}
if(devWorkColE != nullptr)
{
CUDA_CHECK(cudaFree(devWorkColE));
devWorkColE = nullptr;
}
exit(errorCode);
}
/* Main program function */
int main(int argc, char *argv[])
{
//get the properties
cudaDeviceProp deviceProp;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, 0));
cout << "GPU PROPERTIES\n";
cout << "******************************************************************\n";
cout << "Cuda Device prop succeeded" << endl;
cout << "System minor " << deviceProp.minor << endl;
cout << "System major " << deviceProp.major << endl;
cout << "Agent Prop Name " << deviceProp.name << endl;
cout << "Total Global Memory " << deviceProp.totalGlobalMem << " bytes\n";
cout << "Shared Memory Per Block " << deviceProp.sharedMemPerBlock << " bytes\n";
cout << "Registers per block " << deviceProp.regsPerBlock << endl;
cout << "Warp size " << deviceProp.warpSize << endl;
cout << "Max Threads Per Block " << deviceProp.maxThreadsPerBlock << endl;
cout << "Max clock frequency of the multiProcessors " << deviceProp.clockRate << " kHz\n";
cout << "Size of shared memory region " << deviceProp.totalConstMem << " bytes\n";
cout << "Number of multi-processors (compute units) " << deviceProp.multiProcessorCount << endl;
cout << "******************************************************************\n\n";
//get properties to check on input data possible run scenarios
const int devMaxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
const int devMaxConcurrentThreads = deviceProp.multiProcessorCount *
deviceProp.warpSize;
//get the command line input data and do initial checks
if(argc != 5)
{
cerr << "Not enough input data, need 4\n";
cerr << "Grid_Rows Grid_Collumns Threads_Per_Block Blocks_Per_Grid\n";
cerr << "Aborting...\n";
heatCleanExit(-1);
}
const int gridRows = atoi(argv[1]);
const int gridCols = atoi(argv[2]);
const int threadsPerBlock = atoi(argv[3]);
const int blocksPerGrid = atoi(argv[4]);
const int gridSize = gridRows * gridCols;
const int totalThreads = threadsPerBlock * blocksPerGrid;
if(gridRows < 0 || gridCols < 0 || threadsPerBlock < 1 || blocksPerGrid < 1)
{
cerr << "Invalid Input Data\n";
cerr << "Grid Rows = " << gridRows << endl;
cerr << "Grid Cols = " << gridCols << endl;
cerr << "Threads Per Block = " << threadsPerBlock << endl;
cerr << "Blocks Per Grid = " << blocksPerGrid << endl;
cerr << "Aborting...\n";
heatCleanExit(-1);
}
//do checks based on device(GPU) capabilities
if(threadsPerBlock > devMaxThreadsPerBlock)
{
cerr << "Maximum threads per block exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
if(totalThreads > devMaxConcurrentThreads)
{
cerr << "Maximum concurrent threads exceeded for current device\n";
cerr << "Aborting...\n";
heatCleanExit(-2);
}
//allocate host(CPU) memory
heatGrid = (double *) malloc(gridSize * sizeof(double));
if(heatGrid == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(1);
}
//initialise with 0.0s the heat grid
for(int i = 0; i < gridRows; ++i)
for(int j = 0; j < gridCols; ++j)
heatGrid[i*gridCols+j] = 0.0;
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devNewHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devNewHeatGrid, heatGrid, gridSize * sizeof(double),
cudaMemcpyHostToDevice));
//initialise the heat grid with actual data
heatInit(heatGrid, gridRows, gridCols);
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devOldHeatGrid, gridSize * sizeof(double)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devOldHeatGrid, heatGrid, gridSize * sizeof(double),
cudaMemcpyHostToDevice));
//write out the initial grid to the corresponding file
// if(heatWrite(heatGrid, 0, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(2);
// }
//calculate the kernel dimensions (x,y,z) threads/block and blocks/grid
dim3 cudaThreads(threadsPerBlock, 1, 1);
dim3 cudaBlocks(blocksPerGrid, 1 ,1);
//define the timer structs to be used
// struct timespec start;
// struct timespec end;
// double totalTime = 0.0;
if(totalThreads == 1)//serial execution
{
cout << "Serial execution with 1 cuda thread\n";
//start the timer
//clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//solve the problem
for(int steps = 0; steps < STEPS; ++steps)
{
//launch the kernel
heatUpdateSerial<<<cudaBlocks, cudaThreads>>>(devOldHeatGrid, devNewHeatGrid,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
//clock_gettime(CLOCK_MONOTONIC, &end);
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
//totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
else//parallel execution
{
//initial check on number of threads
if(totalThreads % 2 != 0)
{
cout << "Can't parition grid fairly with odd number of threads = "
<< totalThreads
<< "\nAborting...\n";
heatCleanExit(4);
}
cout << "Parallel execution with ";
cout << "Threads Per Block : " << threadsPerBlock << endl;
cout << "Blocks Per Grid : " << blocksPerGrid << endl;
cout << "Total Cuda Threads : " << totalThreads << endl;
//allocate arrays for neighbour discovery
//working border rows
workRowS = (int *) malloc(totalThreads * sizeof(int));
if(workRowS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workRowE = (int *) malloc(totalThreads * sizeof(int));
if(workRowE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
//working border collumns
workColS = (int *) malloc(totalThreads * sizeof(int));
if(workColS == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
workColE = (int *) malloc(totalThreads * sizeof(int));
if(workColE == nullptr)
{
cerr << "Error, not enough memory...\nAborting...\n";
heatCleanExit(5);
}
int blockRows = 0;//total rows for each block of threads data block
int blockCols = 0;//total cols for each block of threads data block
int threadsPerRow = 0, threadsPerCol = 0;//vertical and horizontal distrib
//classify problem cases based on total threads
int cut = (int) sqrt(totalThreads);
double cutF = sqrt(totalThreads);
int probCase;
if(totalThreads == 2)//case 1 : handling 2 threads
{
blockRows = gridRows;
blockCols = gridCols / totalThreads;
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(6);
}
probCase = 1;
}
else if(cutF > (double) cut)//case 2.1 : handling 6, 8, 10, ... threads
{
if(gridSize % totalThreads != 0)//can't cut it without remains
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
const int localProbSize = gridSize / totalThreads;
int spread = gridSize;
//find the best possible partition
for(int i = gridRows; i > 0; --i)//priority to rows
{
for(int j = gridCols; j > 0; --j)
{
if((i * j) == localProbSize)
{
if(gridRows % i != 0 || gridCols % j != 0)
continue;
if(abs(i -j) < spread)
{
spread = abs(i - j);
blockRows = i;
blockCols = j;
}
}
}
}
threadsPerRow = gridRows / blockRows;
threadsPerCol = gridCols / blockCols;
if((threadsPerRow * threadsPerCol) != totalThreads)
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(7);
}
probCase = 2;
}
else//case 2.2 : handling 4, 9, 16, ... threads
{
threadsPerRow = cut;
threadsPerCol = cut;
if((gridRows % cut != 0) || (gridCols % cut != 0))//can't cut even blocks
{
cout << "Grid partitioning results to remains...\nAborting...\n";
heatCleanExit(8);
}
blockRows = gridRows / cut;
blockCols = gridCols / cut;
probCase = 2;
}
printf("Grid can be partioned without remains...\n"
"Rows per block : %d, Columns per block : %d\n"
"Vertical threads : %d, Horizontal threads : %d\n\n",
blockRows, blockCols, threadsPerRow, threadsPerCol);
//working and global discovery phase
//allocate device(GPU) global memory
CUDA_CHECK(cudaMalloc((void **)&devWorkRowS, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkRowE, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkColS, totalThreads * sizeof(int)));
CUDA_CHECK(cudaMalloc((void **)&devWorkColE, totalThreads * sizeof(int)));
//transfer data from host(CPU) to device(GPU) memory
CUDA_CHECK(cudaMemcpy(devWorkRowS, workRowS, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkRowE, workRowE, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkColS, workColS, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(devWorkColE, workColE, totalThreads * sizeof(int),
cudaMemcpyHostToDevice));
heatDiscover<<<cudaBlocks, cudaThreads>>>
(devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
blockRows, blockCols,
threadsPerRow, threadsPerCol, probCase,
gridRows, gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//start the timer
// clock_gettime(CLOCK_MONOTONIC, &start);
clock_t begin = clock();
//launch the kernel
for(int steps = 0; steps < STEPS; ++steps)
{
heatUpdateParallel<<<cudaBlocks, cudaThreads>>>
(devOldHeatGrid, devNewHeatGrid,
devWorkRowS, devWorkRowE, devWorkColS, devWorkColE,
gridCols);
//wait for device(GPU) to finish it's work
CUDA_CHECK(cudaDeviceSynchronize());
//old = new
heatSwap(&devNewHeatGrid, &devOldHeatGrid);
}
//stop the timer and print the result
clock_t end = clock();
double totalTime = (double)(end - begin) / CLOCKS_PER_SEC;
// clock_gettime(CLOCK_MONOTONIC, &end);
// totalTime = ((end.tv_sec - start.tv_sec) * 1000.0) +
// ((end.tv_nsec - start.tv_nsec) / 1000000.0);
cout << "\nElapsed time was " << totalTime << " ms\n";
}
if(STEPS % 2 == 0)//get the correct version
{
CUDA_CHECK(cudaMemcpy(heatGrid, devOldHeatGrid, gridSize * sizeof(double),
cudaMemcpyDeviceToHost));
}
else
{
CUDA_CHECK(cudaMemcpy(heatGrid, devNewHeatGrid, gridSize * sizeof(double),
cudaMemcpyDeviceToHost));
}
//write out the final grid to the corresponding file
// if(heatWrite(heatGrid, 1, gridRows, gridCols,
// threadsPerBlock, blocksPerGrid) == -1)
// {
// cerr << "Error, could not create the initial file...\nAborting...\n";
// heatCleanExit(3);
// }
//clear memory and exit
heatCleanExit(0);
} |
d7ee900fc20d74f1f515c8672097e43a49f3091b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updF(float *f, float *z, float *g, float tf, float lambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z[2 * (idx)+0];
if ((px>0)) DIVZ -= z[2 * (idx - 1) + 0];
if ((py<(ny - 1))) DIVZ += z[2 * (idx)+1];
if ((py>0)) DIVZ -= z[2 * (idx - nx) + 1];
// update f
//f[idx] = (1.-tf*lambda)*f[idx] + tf * DIVZ + tf*lambda*g[idx];
f[idx] = (f[idx] + tf * DIVZ + tf*lambda*g[idx]) / (1 + tf*lambda);
}
} | d7ee900fc20d74f1f515c8672097e43a49f3091b.cu | #include "includes.h"
__global__ void updF(float *f, float *z, float *g, float tf, float lambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z[2 * (idx)+0];
if ((px>0)) DIVZ -= z[2 * (idx - 1) + 0];
if ((py<(ny - 1))) DIVZ += z[2 * (idx)+1];
if ((py>0)) DIVZ -= z[2 * (idx - nx) + 1];
// update f
//f[idx] = (1.-tf*lambda)*f[idx] + tf * DIVZ + tf*lambda*g[idx];
f[idx] = (f[idx] + tf * DIVZ + tf*lambda*g[idx]) / (1 + tf*lambda);
}
} |
39261be99632fc26bd89a9e3fc4396437f39e6b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common.h"
#define RADIUS 4
__constant__ float coef[RADIUS + 1];
void setup_coef_constant(void) {
const float h_coef[] = {a0, a1, a2, a3, a4};
hipMemcpyToSymbol(coef, h_coef, (RADIUS + 1) * sizeof(float));
}
__global__ void stencil_1d(float *in, float *out) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int sidx = threadIdx.x + RADIUS;
smem[sidx] = in[idx];
if (trheadIdx.x < RADIUS) {
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
float tmp = 0.0f;
#pragma unroll
for (int i=1; i<=RADIUS; i++) {
tmp += coef[i] * (smem[sidx+i] - smem[sidx-i]);
}
out[idx] = tmp;
}
__global__ void stencil_1d_read_only(float *in, float *out, const float *__restrict__ dcoef) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int sidx = threadIdx.x + RADIUS;
smem[sidx] = in[idx];
if (trheadIdx.x < RADIUS) {
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
float tmp = 0.0f;
#pragma unroll
for (int i=1; i<=RADIUS; i++) {
tmp += dcoef[i] * (smem[sidx+i] - smem[sidx-i]);
}
out[idx] = tmp;
} | 39261be99632fc26bd89a9e3fc4396437f39e6b7.cu | #include "../common.h"
#define RADIUS 4
__constant__ float coef[RADIUS + 1];
void setup_coef_constant(void) {
const float h_coef[] = {a0, a1, a2, a3, a4};
cudaMemcpyToSymbol(coef, h_coef, (RADIUS + 1) * sizeof(float));
}
__global__ void stencil_1d(float *in, float *out) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int sidx = threadIdx.x + RADIUS;
smem[sidx] = in[idx];
if (trheadIdx.x < RADIUS) {
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
float tmp = 0.0f;
#pragma unroll
for (int i=1; i<=RADIUS; i++) {
tmp += coef[i] * (smem[sidx+i] - smem[sidx-i]);
}
out[idx] = tmp;
}
__global__ void stencil_1d_read_only(float *in, float *out, const float *__restrict__ dcoef) {
__shared__ float smem[BDIM + 2 * RADIUS];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int sidx = threadIdx.x + RADIUS;
smem[sidx] = in[idx];
if (trheadIdx.x < RADIUS) {
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
__syncthreads();
float tmp = 0.0f;
#pragma unroll
for (int i=1; i<=RADIUS; i++) {
tmp += dcoef[i] * (smem[sidx+i] - smem[sidx-i]);
}
out[idx] = tmp;
} |
c83c3c878cda2e695a914b7ba23f61e5c296cb54.hip | // !!! This is a file automatically generated by hipify!!!
// ######################################################
// ## :
// ## :
// ##
// ## log
// ######################################################
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <cmath>
//
__constant__ float mylog[25];
// ######################################################
// ## INIT,
// ## A_host: ,0-15
// ## B_host: ,0
// ## ROWS,COLS
// ######################################################
void INIT(int* A_host,float* B_host,int ROWS,int COLS){
// srand(time(NULL));
for(int i = 0; i < ROWS; i++){
for(int j = 0; j < COLS; j++){
A_host[i*COLS+j] = rand() % 16;
B_host[i*COLS+j] = 0;
}
}
}
// ######################################################
// ## ,
// ## A:
// ## B:
// ## rows,cols
// ######################################################
__global__ void cal_entropy(hipTextureObject_t tex,float *B, int rows, int cols){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < rows*cols){
//
int row = idx / cols;
int col = idx - row*cols;
// if
int up = max(row-2,0);
int down = min(row+2,rows-1);
int left = max(col-2,0);
int right = min(col+2,cols-1);
// char
char digit[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
// printf("(%d,%d) l:%d,r:%d,u:%d,d:%d\n",row,col,left,right,up,down);
//
int count = (right-left+1)*(down-up+1);
//
float result = 0;
//
for(int i = up; i <= down; i++){
for(int j = left; j <= right; j++){
++digit[tex2D<int>(tex,j,i)];
}
}
//
for(int i = 0; i < 16; i++){
if(digit[i] != 0){
result += (((float)digit[i])/count)*(mylog[digit[i]-1]-mylog[count-1]);
}
}
//
B[idx] = -result;
// printf("c:%d idx:%d r:%f\n",count,idx,B[idx]);
}
}
//
int main(int argc,char *argv[])
{
int i;
//
int ROWS = 5;
//
int COLS = 5;
//
int block = 256;
for(i = 1; i < argc; i++)
{
if(i == 1){
ROWS = atoi(argv[i]);
}
else if(i == 2){
COLS = atoi(argv[i]);
}
else if(i == 3){
block = atoi(argv[i]);
}
}
// ,0-15
int Bytes = ROWS*COLS*sizeof(int);
// ,float
int FBytes = ROWS*COLS*sizeof(float);
//
int* A_host = (int*)malloc(Bytes);
float* B_host = (float*)malloc(FBytes);
//
INIT(A_host,B_host,ROWS,COLS);
//
int* A_dev = NULL;
float* B_dev = NULL;
hipMalloc((void**)&A_dev, Bytes);
hipMalloc((void**)&B_dev, FBytes);
// size_t pitch,tex_ofs;
// hipMallocPitch((void**)&A_dev,&pitch,COLS*sizeof(int),ROWS);
// hipMemcpy2D(A_dev,pitch,A_host,COLS*ROWS*sizeof(int),COLS*sizeof(int),ROWS,hipMemcpyDeviceToHost);
// tex.normalized = false;
// hipBindTexture2D(&tex_ofs, &tex, A_dev, &tex.channelDesc,COLS, ROWS, pitch);
hipArray* carray;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int>();
hipMallocArray(&carray, &channelDesc, COLS, ROWS);
hipMemcpy2DToArray(carray, 0, 0, A_host, COLS*sizeof(int), COLS*sizeof(int), ROWS, hipMemcpyHostToDevice);
//
// hipMemcpy(A_dev, A_host, Bytes, hipMemcpyHostToDevice);
hipMemcpy(B_dev, B_host, FBytes, hipMemcpyHostToDevice);
// log
const float my_log[25] = {
0.000000,
0.693147,
1.098612,
1.386294,
1.609438,
1.791759,
1.945910,
2.079442,
2.197225,
2.302585,
2.397895,
2.564949,
2.639057,
2.708050,
2.772589,
2.833213,
2.890372,
2.944439,
2.995732,
3.044522,
3.091042,
3.135494,
3.178054};
// log
hipMemcpyToSymbol(mylog, (const float*)my_log, sizeof(my_log));
//
// ,array
hipResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = carray;
//
hipTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
//
hipTextureObject_t tex;
hipCreateTextureObject(&tex,&resDesc,&texDesc,NULL);
//GPU
hipEvent_t start, stop;
float elapsedTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//
hipLaunchKernelGGL(( cal_entropy), dim3((COLS*ROWS-1)/block+1),dim3(block), 0, 0, tex,B_dev, ROWS, COLS);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//GPU
printf("gpu_time:%fms\n",elapsedTime);
//
hipDestroyTextureObject(tex);
//
hipFree(B_dev);
hipFree(A_dev);
free(B_host);
free(A_host);
return 0;
} | c83c3c878cda2e695a914b7ba23f61e5c296cb54.cu | // ######################################################
// ## 姓名: 刘羽丰
// ## 文件说明: 第六版,与第五版不同的是这版在输入数组上使用
// ## 二维纹理内存,利用高维空间局部性加快运算,
// ## log数组使用常量内存存储。
// ######################################################
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cmath>
// 声明常量内存
__constant__ float mylog[25];
// ######################################################
// ## INIT函数,将主机端的两数组进行初始化
// ## A_host: 输入的矩阵,初始化时随机0-15
// ## B_host: 输出的矩阵,初始化时初始为0
// ## ROWS,COLS分别为矩阵的高和宽
// ######################################################
void INIT(int* A_host,float* B_host,int ROWS,int COLS){
// srand(time(NULL));
for(int i = 0; i < ROWS; i++){
for(int j = 0; j < COLS; j++){
A_host[i*COLS+j] = rand() % 16;
B_host[i*COLS+j] = 0;
}
}
}
// ######################################################
// ## 核函数,计算二维数组中以每个元素为中心的熵
// ## A: 输入的矩阵
// ## B: 输出的矩阵
// ## rows,cols分别为矩阵的高和宽
// ######################################################
__global__ void cal_entropy(cudaTextureObject_t tex,float *B, int rows, int cols){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < rows*cols){
// 变换得到坐标
int row = idx / cols;
int col = idx - row*cols;
// 避免大量重复if,计算窗口四边界
int up = max(row-2,0);
int down = min(row+2,rows-1);
int left = max(col-2,0);
int right = min(col+2,cols-1);
// 利用char存储降低寄存器压力
char digit[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
// printf("(%d,%d) l:%d,r:%d,u:%d,d:%d\n",row,col,left,right,up,down);
// 根据窗口四边界可直接得到窗口大小
int count = (right-left+1)*(down-up+1);
// 记录熵
float result = 0;
// 遍历周围元素并统计
for(int i = up; i <= down; i++){
for(int j = left; j <= right; j++){
++digit[tex2D<int>(tex,j,i)];
}
}
// 计算熵
for(int i = 0; i < 16; i++){
if(digit[i] != 0){
result += (((float)digit[i])/count)*(mylog[digit[i]-1]-mylog[count-1]);
}
}
// 读入到输出矩阵
B[idx] = -result;
// printf("c:%d idx:%d r:%f\n",count,idx,B[idx]);
}
}
//主函数
int main(int argc,char *argv[])
{
int i;
// 矩阵高度
int ROWS = 5;
// 矩阵宽度
int COLS = 5;
// 一个块的线程数
int block = 256;
for(i = 1; i < argc; i++)
{
if(i == 1){
ROWS = atoi(argv[i]);
}
else if(i == 2){
COLS = atoi(argv[i]);
}
else if(i == 3){
block = atoi(argv[i]);
}
}
// 输入的二维数组,值为0-15
int Bytes = ROWS*COLS*sizeof(int);
// 输出的二维数组,元素类型为float
int FBytes = ROWS*COLS*sizeof(float);
//开辟主机内存
int* A_host = (int*)malloc(Bytes);
float* B_host = (float*)malloc(FBytes);
// 初始化
INIT(A_host,B_host,ROWS,COLS);
//开辟设备内存
int* A_dev = NULL;
float* B_dev = NULL;
cudaMalloc((void**)&A_dev, Bytes);
cudaMalloc((void**)&B_dev, FBytes);
// size_t pitch,tex_ofs;
// cudaMallocPitch((void**)&A_dev,&pitch,COLS*sizeof(int),ROWS);
// cudaMemcpy2D(A_dev,pitch,A_host,COLS*ROWS*sizeof(int),COLS*sizeof(int),ROWS,cudaMemcpyDeviceToHost);
// tex.normalized = false;
// cudaBindTexture2D(&tex_ofs, &tex, A_dev, &tex.channelDesc,COLS, ROWS, pitch);
cudaArray* carray;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int>();
cudaMallocArray(&carray, &channelDesc, COLS, ROWS);
cudaMemcpy2DToArray(carray, 0, 0, A_host, COLS*sizeof(int), COLS*sizeof(int), ROWS, cudaMemcpyHostToDevice);
//输入数据从主机内存拷贝到设备内存
// cudaMemcpy(A_dev, A_host, Bytes, cudaMemcpyHostToDevice);
cudaMemcpy(B_dev, B_host, FBytes, cudaMemcpyHostToDevice);
// log数组
const float my_log[25] = {
0.000000,
0.693147,
1.098612,
1.386294,
1.609438,
1.791759,
1.945910,
2.079442,
2.197225,
2.302585,
2.397895,
2.564949,
2.639057,
2.708050,
2.772589,
2.833213,
2.890372,
2.944439,
2.995732,
3.044522,
3.091042,
3.135494,
3.178054};
// 拷贝log数组到常量内存中
cudaMemcpyToSymbol(mylog, (const float*)my_log, sizeof(my_log));
// 纹理内存
// 描述通道信息,绑定纹理到array上
cudaResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = carray;
// 设置纹理为只读
cudaTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
// 创建纹理对象
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex,&resDesc,&texDesc,NULL);
//GPU计时
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//运行程序
cal_entropy<<<(COLS*ROWS-1)/block+1,block>>>(tex,B_dev, ROWS, COLS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//输出GPU执行时间
printf("gpu_time:%fms\n",elapsedTime);
// 销毁纹理对象
cudaDestroyTextureObject(tex);
//释放内存
cudaFree(B_dev);
cudaFree(A_dev);
free(B_host);
free(A_host);
return 0;
} |
4cdd5b84ccc14c0dbe5ca37bc738bf0b17568a43.hip | // !!! This is a file automatically generated by hipify!!!
#include "memoryWrapper.cuh"
#include <cassert>
void hipMemcpy(Memory const& dest, Memory const& src) {
assert(dest.bytes >= src.bytes);
hipMemcpyKind kind = hipMemcpyDefault;
if (dest.kind == memKind::device) {
if (src.kind == memKind::device)
kind = hipMemcpyDeviceToDevice;
else if (src.kind == memKind::pinned || src.kind == memKind::host)
kind = hipMemcpyHostToDevice;
else
throw (std::logic_error{"unknown memory type encountered"});
} else if (dest.kind == memKind::pinned || dest.kind == memKind::host) {
if (src.kind == memKind::device)
kind = hipMemcpyDeviceToHost;
else if (src.kind == memKind::pinned || src.kind == memKind::host)
kind = hipMemcpyHostToHost;
else
throw (std::logic_error{"unknown memory type encountered"});
}
checkCuda(hipMemcpy(dest._mem, src._mem, dest.bytes, kind));
}
| 4cdd5b84ccc14c0dbe5ca37bc738bf0b17568a43.cu | #include "memoryWrapper.cuh"
#include <cassert>
void cudaMemcpy(Memory const& dest, Memory const& src) {
assert(dest.bytes >= src.bytes);
cudaMemcpyKind kind = cudaMemcpyDefault;
if (dest.kind == memKind::device) {
if (src.kind == memKind::device)
kind = cudaMemcpyDeviceToDevice;
else if (src.kind == memKind::pinned || src.kind == memKind::host)
kind = cudaMemcpyHostToDevice;
else
throw (std::logic_error{"unknown memory type encountered"});
} else if (dest.kind == memKind::pinned || dest.kind == memKind::host) {
if (src.kind == memKind::device)
kind = cudaMemcpyDeviceToHost;
else if (src.kind == memKind::pinned || src.kind == memKind::host)
kind = cudaMemcpyHostToHost;
else
throw (std::logic_error{"unknown memory type encountered"});
}
checkCuda(cudaMemcpy(dest._mem, src._mem, dest.bytes, kind));
}
|
ebb7fdf9c5387ca4ee00bc37ae07840cce07f7e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
__global__ void print_from_gpu(void) {
printf("Hello World! from thread [%d,%d] \
From device\n", threadIdx.x,blockIdx.x);
}
int main(void) {
printf("Hello World from host!\n");
hipLaunchKernelGGL(( print_from_gpu), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| ebb7fdf9c5387ca4ee00bc37ae07840cce07f7e3.cu | #include<stdio.h>
#include<stdlib.h>
__global__ void print_from_gpu(void) {
printf("Hello World! from thread [%d,%d] \
From device\n", threadIdx.x,blockIdx.x);
}
int main(void) {
printf("Hello World from host!\n");
print_from_gpu<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
}
|
16842e0afe12751921c602236f5ab008ccf80e2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "common_magmasparse.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
zcompact_kernel(
int m, int n,
magmaDoubleComplex *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
zcompactactive_kernel(
int m,
int n,
magmaDoubleComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcompact(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( zcompact_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1 );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zcompactActive(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( zcompactactive_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
| 16842e0afe12751921c602236f5ab008ccf80e2a.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "common_magmasparse.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
zcompact_kernel(
int m, int n,
magmaDoubleComplex *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
zcompactactive_kernel(
int m,
int n,
magmaDoubleComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcompact(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
zcompact_kernel<<< grid, threads, 0, queue >>>(
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1 );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zcompactActive(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
zcompactactive_kernel<<< grid, threads, 0, queue >>>(
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
5d3f7a48e7ab3c97b7b513ab2e5a765906d981d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reference_calc.cpp"
#include "utils.h"
static inst const threadLimit = 512;
// Adapted from udacity code snippets
__global__ void find_optimum(float * d_out, float * d_in, bool isMinimum, int numEntries)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numEntries) {
return;
}
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if (isMinimum) {
d_in[myId] = min(d_in[myId + s], d_in[myId]);
} else {
d_in[myId] = max(d_in[myId + s], d_in[myId]);
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
float calculateDifference(const float * const d_immutable_input, int numEntries, float * h_minValue) {
float *h_maxValue;
float *d_input;
float *d_intermediate_out;
float *d_out;
int blockWidth = threadLimit;
int numBlocks = numEntries / threadLimit + (numBlocks % threadLimit != 0);
checkCudaErrors(hipMalloc(&d_input, sizeof(float) * numEntries));
checkCudaErrors(hipMalloc(&d_intermediate_out, sizeof(float) * numBlocks));
checkCudaErrors(hipMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( find_optimum), dim3(numBlocks), dim3(blockWidth), 0, 0, d_intermediate_out, d_input, true, numEntries);
hipLaunchKernelGGL(( find_optimum), dim3(1), dim3(numBlocks), 0, 0, d_out, d_intermediate_out, true, numEntries);
checkCudaErrors(hipMemcpy(h_minValue, d_out, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( find_optimum), dim3(numBlocks), dim3(blockWidth), 0, 0, d_intermediate_out, d_input, false, numEntries);
hipLaunchKernelGGL(( find_optimum), dim3(1), dim3(numBlocks), 0, 0, d_out, d_intermediate_out, false, numEntries);
checkCudaErrors(hipMemcpy(h_maxValue, d_out, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_input));
checkCudaErrors(hipFree(d_intermediate_out));
checkCudaErrors(hipFree(d_out));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
float difference = *h_maxValue - *h_minValue;
return difference;
}
// Adapted from udacity code snippet
__global__ void simple_histo(int *d_bins, int *d_in, float min, float range, const int numBins, numEntries)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numEntries) {
return;
}
float value = d_in[myId];
unsigned int bin = min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((value - min) / range * numBins));
atomicAdd(&(d_bins[bin]), 1);
}
void histogram(const float* const d_immutable_input, float min, float range, int numBins, int numEntries, int * d_bins) {
int blockWidth = threadLimit;
int numBlocks = numEntries / threadLimit + (numBlocks % threadLimit != 0);
float *d_input;
checkCudaErrors(hipMalloc(&d_input, sizeof(float) * numEntries));
checkCudaErrors(hipMalloc(&d_bins, sizeof(int) * numBins));
checkCudaErrors(hipMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemset(d_input, 0, sizeof(int) * numBins, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( simple_histo), dim3(numBlocks), dim3(blockWidth), 0, 0, d_bins, d_input, min, range, numBins, numEntries);
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
int numEntries = numRows * numCols;
float difference = calculateDifference(d_logLuminance, numEntries, minlogLum);
int *d_bins;
void histogram(d_logLuminance, *min_logLum, difference, numBins, numEntries, d_bins);
int *h_bins;
checkCudaErrors(hipMemcpy(h_bins, d_bins, sizeof(int) * numBins, hipMemcpyDeviceToDevice));
for (int i = 1; i < numBins; i++) {
d_cdf[i] = d_cdf[i-1] + h_bins[i-1];
}
}
| 5d3f7a48e7ab3c97b7b513ab2e5a765906d981d5.cu | #include "reference_calc.cpp"
#include "utils.h"
static inst const threadLimit = 512;
// Adapted from udacity code snippets
__global__ void find_optimum(float * d_out, float * d_in, bool isMinimum, int numEntries)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numEntries) {
return;
}
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if (isMinimum) {
d_in[myId] = min(d_in[myId + s], d_in[myId]);
} else {
d_in[myId] = max(d_in[myId + s], d_in[myId]);
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
float calculateDifference(const float * const d_immutable_input, int numEntries, float * h_minValue) {
float *h_maxValue;
float *d_input;
float *d_intermediate_out;
float *d_out;
int blockWidth = threadLimit;
int numBlocks = numEntries / threadLimit + (numBlocks % threadLimit != 0);
checkCudaErrors(cudaMalloc(&d_input, sizeof(float) * numEntries));
checkCudaErrors(cudaMalloc(&d_intermediate_out, sizeof(float) * numBlocks));
checkCudaErrors(cudaMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, cudaMemcpyDeviceToDevice));
find_optimum<<<numBlocks, blockWidth>>>(d_intermediate_out, d_input, true, numEntries);
find_optimum<<<1, numBlocks>>>(d_out, d_intermediate_out, true, numEntries);
checkCudaErrors(cudaMemcpy(h_minValue, d_out, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, cudaMemcpyHostToDevice));
find_optimum<<<numBlocks, blockWidth>>>(d_intermediate_out, d_input, false, numEntries);
find_optimum<<<1, numBlocks>>>(d_out, d_intermediate_out, false, numEntries);
checkCudaErrors(cudaMemcpy(h_maxValue, d_out, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_input));
checkCudaErrors(cudaFree(d_intermediate_out));
checkCudaErrors(cudaFree(d_out));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
float difference = *h_maxValue - *h_minValue;
return difference;
}
// Adapted from udacity code snippet
__global__ void simple_histo(int *d_bins, int *d_in, float min, float range, const int numBins, numEntries)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numEntries) {
return;
}
float value = d_in[myId];
unsigned int bin = min(static_cast<unsigned int>(numBins - 1),
static_cast<unsigned int>((value - min) / range * numBins));
atomicAdd(&(d_bins[bin]), 1);
}
void histogram(const float* const d_immutable_input, float min, float range, int numBins, int numEntries, int * d_bins) {
int blockWidth = threadLimit;
int numBlocks = numEntries / threadLimit + (numBlocks % threadLimit != 0);
float *d_input;
checkCudaErrors(cudaMalloc(&d_input, sizeof(float) * numEntries));
checkCudaErrors(cudaMalloc(&d_bins, sizeof(int) * numBins));
checkCudaErrors(cudaMemcpy(d_input, d_immutable_input, sizeof(float) * numEntries, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemset(d_input, 0, sizeof(int) * numBins, cudaMemcpyDeviceToDevice));
simple_histo<<<numBlocks, blockWidth>>>(d_bins, d_input, min, range, numBins, numEntries);
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
int numEntries = numRows * numCols;
float difference = calculateDifference(d_logLuminance, numEntries, minlogLum);
int *d_bins;
void histogram(d_logLuminance, *min_logLum, difference, numBins, numEntries, d_bins);
int *h_bins;
checkCudaErrors(cudaMemcpy(h_bins, d_bins, sizeof(int) * numBins, cudaMemcpyDeviceToDevice));
for (int i = 1; i < numBins; i++) {
d_cdf[i] = d_cdf[i-1] + h_bins[i-1];
}
}
|
826cabcbb3ec66fe120ff63579922be230b8f985.hip | // !!! This is a file automatically generated by hipify!!!
// C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <hip/hip_runtime.h>
// CMSSW headers
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "plugin-SiPixelClusterizer/SiPixelRawToClusterGPUKernel.h" // !
#include "plugin-SiPixelClusterizer/gpuClusteringConstants.h" // !
#include "PixelRecHits.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DCUDA PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
hipStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DCUDA hits_d(nHits, cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
if (blocks) // protect from empty events
hipLaunchKernelGGL(( gpuPixelRecHits::getHits), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// assuming full warp of threads is better than a smaller number...
if (nHits) {
hipLaunchKernelGGL(( setHitsLayerStart), dim3(1), dim3(32), 0, stream, clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(hipGetLastError());
}
if (nHits) {
cms::cuda::fillManyFromVector(hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, stream);
cudaCheck(hipGetLastError());
}
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
return hits_d;
}
} // namespace pixelgpudetails
| 826cabcbb3ec66fe120ff63579922be230b8f985.cu | // C++ headers
#include <algorithm>
#include <numeric>
// CUDA runtime
#include <cuda_runtime.h>
// CMSSW headers
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "plugin-SiPixelClusterizer/SiPixelRawToClusterGPUKernel.h" // !
#include "plugin-SiPixelClusterizer/gpuClusteringConstants.h" // !
#include "PixelRecHits.h"
#include "gpuPixelRecHits.h"
namespace {
__global__ void setHitsLayerStart(uint32_t const* __restrict__ hitsModuleStart,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
uint32_t* hitsLayerStart) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
assert(0 == hitsModuleStart[0]);
if (i < 11) {
hitsLayerStart[i] = hitsModuleStart[cpeParams->layerGeometry().layerStart[i]];
#ifdef GPU_DEBUG
printf("LayerStart %d %d: %d\n", i, cpeParams->layerGeometry().layerStart[i], hitsLayerStart[i]);
#endif
}
}
} // namespace
namespace pixelgpudetails {
TrackingRecHit2DCUDA PixelRecHitGPUKernel::makeHitsAsync(SiPixelDigisCUDA const& digis_d,
SiPixelClustersCUDA const& clusters_d,
BeamSpotCUDA const& bs_d,
pixelCPEforGPU::ParamsOnGPU const* cpeParams,
cudaStream_t stream) const {
auto nHits = clusters_d.nClusters();
TrackingRecHit2DCUDA hits_d(nHits, cpeParams, clusters_d.clusModuleStart(), stream);
int threadsPerBlock = 128;
int blocks = digis_d.nModules(); // active modules (with digis)
#ifdef GPU_DEBUG
std::cout << "launching getHits kernel for " << blocks << " blocks" << std::endl;
#endif
if (blocks) // protect from empty events
gpuPixelRecHits::getHits<<<blocks, threadsPerBlock, 0, stream>>>(
cpeParams, bs_d.data(), digis_d.view(), digis_d.nDigis(), clusters_d.view(), hits_d.view());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// assuming full warp of threads is better than a smaller number...
if (nHits) {
setHitsLayerStart<<<1, 32, 0, stream>>>(clusters_d.clusModuleStart(), cpeParams, hits_d.hitsLayerStart());
cudaCheck(cudaGetLastError());
}
if (nHits) {
cms::cuda::fillManyFromVector(hits_d.phiBinner(), 10, hits_d.iphi(), hits_d.hitsLayerStart(), nHits, 256, stream);
cudaCheck(cudaGetLastError());
}
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
return hits_d;
}
} // namespace pixelgpudetails
|
a0211e2e003d56b4bdbf1b40727aa12c17eeaa8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LayerCalculation.cuh"
//Checks if an Error has occured
int CheckCudaError(hipError_t err) {
if (err != hipSuccess) {
return 1;
}
return 0;
}
int LayerCalculation::offsetCalculation(int *arraySizes, int* poolingLayers, int batchCount, int layerNum, int Layers) {
if (batchDim == 0) {
for (int i = 0; i < Layers + 1; i++) {
batchDim += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2);
if (poolingLayers[i * 3] == 1) {
poolingBatchDim += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2) / pow(poolingLayers[i * 3 + 1], 2);
}
}
return 0;
}
int offset = 0;
for (int i = 0; i < layerNum - 1; i++) {
offset += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2);
}
offset += batchCount * batchDim;
return offset;
}
int LayerCalculation::poolingOffsetCalculation(int *arraySizes, int* poolingLayers, int batchCount, int layerNum) {
int offset = 0;
for (int i = 0; i < layerNum - 1; i++) {
if (poolingLayers[i * 3] == 1) {
offset += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2) / pow(poolingLayers[i * 3 + 1], 2);
}
}
offset += poolingBatchDim * batchCount;
return offset;
}
//SoftMax Function (Probability for each Category with an exponetial function)
__global__
void CudaSoftMaxCalculation1(float* results, float* softMaxResults, int s_offset, int r_offset) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = exp(results[r_offset + tid]);
__syncthreads();
//Cuda Reduction Method
//Source: http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf (page 7)
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
softMaxResults[s_offset + tid] = (exp(results[r_offset + tid])) / sdata[0];
}
int LayerCalculation::cudaSoftMaxCalculation(float* results, float* softMaxResults, int* h_ArraySizes, int* poolingLayers, int batchCount, int layerNum, int softMaxOffset, dim3 KernelSizes[]) {
int r_offset = offsetCalculation(h_ArraySizes, poolingLayers, batchCount, layerNum, 0);
CudaSoftMaxCalculation1 << <1, KernelSizes[0], KernelSizes[0].x * sizeof(float) >> > (results, softMaxResults, softMaxOffset, r_offset);
int ret = CheckCudaError(hipGetLastError());
return ret;
}
| a0211e2e003d56b4bdbf1b40727aa12c17eeaa8a.cu | #include "LayerCalculation.cuh"
//Checks if an Error has occured
int CheckCudaError(cudaError_t err) {
if (err != cudaSuccess) {
return 1;
}
return 0;
}
int LayerCalculation::offsetCalculation(int *arraySizes, int* poolingLayers, int batchCount, int layerNum, int Layers) {
if (batchDim == 0) {
for (int i = 0; i < Layers + 1; i++) {
batchDim += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2);
if (poolingLayers[i * 3] == 1) {
poolingBatchDim += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2) / pow(poolingLayers[i * 3 + 1], 2);
}
}
return 0;
}
int offset = 0;
for (int i = 0; i < layerNum - 1; i++) {
offset += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2);
}
offset += batchCount * batchDim;
return offset;
}
int LayerCalculation::poolingOffsetCalculation(int *arraySizes, int* poolingLayers, int batchCount, int layerNum) {
int offset = 0;
for (int i = 0; i < layerNum - 1; i++) {
if (poolingLayers[i * 3] == 1) {
offset += arraySizes[i * 4 + 1] * (int)pow(arraySizes[i * 4], 2) / pow(poolingLayers[i * 3 + 1], 2);
}
}
offset += poolingBatchDim * batchCount;
return offset;
}
//SoftMax Function (Probability for each Category with an exponetial function)
__global__
void CudaSoftMaxCalculation1(float* results, float* softMaxResults, int s_offset, int r_offset) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = exp(results[r_offset + tid]);
__syncthreads();
//Cuda Reduction Method
//Source: http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf (page 7)
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
softMaxResults[s_offset + tid] = (exp(results[r_offset + tid])) / sdata[0];
}
int LayerCalculation::cudaSoftMaxCalculation(float* results, float* softMaxResults, int* h_ArraySizes, int* poolingLayers, int batchCount, int layerNum, int softMaxOffset, dim3 KernelSizes[]) {
int r_offset = offsetCalculation(h_ArraySizes, poolingLayers, batchCount, layerNum, 0);
CudaSoftMaxCalculation1 << <1, KernelSizes[0], KernelSizes[0].x * sizeof(float) >> > (results, softMaxResults, softMaxOffset, r_offset);
int ret = CheckCudaError(cudaGetLastError());
return ret;
}
|
0df61c25461e4313aaacb19ed9e1d981263507b2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/math.cuh>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/stddev.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
template <typename T>
struct StdDevInputs {
T tolerance, mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims)
{
return os;
}
template <typename T>
class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> {
public:
StdDevTest()
: params(::testing::TestWithParam<StdDevInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
data(rows * cols, stream),
mean_act(cols, stream),
stddev_act(cols, stream),
vars_act(cols, stream)
{
}
protected:
void SetUp() override
{
random::RngState r(params.seed);
int len = rows * cols;
data.resize(len, stream);
mean_act.resize(cols, stream);
stddev_act.resize(cols, stream);
vars_act.resize(cols, stream);
normal(handle, r, data.data(), len, params.mean, params.stddev);
stdVarSGtest(data.data(), stream);
resource::sync_stream(handle, stream);
}
void stdVarSGtest(T* data, hipStream_t stream)
{
int rows = params.rows, cols = params.cols;
if (params.rowMajor) {
using layout_t = raft::row_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T, int>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
} else {
using layout_t = raft::col_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
}
raft::matrix::seqRoot(vars_act.data(), T(1), cols, stream);
}
protected:
raft::resources handle;
hipStream_t stream;
StdDevInputs<T> params;
int rows, cols;
rmm::device_uvector<T> data, mean_act, stddev_act, vars_act;
};
const std::vector<StdDevInputs<float>> inputsf = {
{0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}};
const std::vector<StdDevInputs<double>> inputsd = {
{0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}};
typedef StdDevTest<float> StdDevTestF;
TEST_P(StdDevTestF, Result)
{
ASSERT_TRUE(devArrMatch(
params.stddev, stddev_act.data(), params.cols, CompareApprox<float>(params.tolerance), stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<float>(params.tolerance),
stream));
}
typedef StdDevTest<double> StdDevTestD;
TEST_P(StdDevTestD, Result)
{
ASSERT_TRUE(devArrMatch(params.stddev,
stddev_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
| 0df61c25461e4313aaacb19ed9e1d981263507b2.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/math.cuh>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/stddev.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace stats {
template <typename T>
struct StdDevInputs {
T tolerance, mean, stddev;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const StdDevInputs<T>& dims)
{
return os;
}
template <typename T>
class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> {
public:
StdDevTest()
: params(::testing::TestWithParam<StdDevInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
rows(params.rows),
cols(params.cols),
data(rows * cols, stream),
mean_act(cols, stream),
stddev_act(cols, stream),
vars_act(cols, stream)
{
}
protected:
void SetUp() override
{
random::RngState r(params.seed);
int len = rows * cols;
data.resize(len, stream);
mean_act.resize(cols, stream);
stddev_act.resize(cols, stream);
vars_act.resize(cols, stream);
normal(handle, r, data.data(), len, params.mean, params.stddev);
stdVarSGtest(data.data(), stream);
resource::sync_stream(handle, stream);
}
void stdVarSGtest(T* data, cudaStream_t stream)
{
int rows = params.rows, cols = params.cols;
if (params.rowMajor) {
using layout_t = raft::row_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T, int>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
} else {
using layout_t = raft::col_major;
mean(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<T>(mean_act.data(), cols),
params.sample);
stddev(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(stddev_act.data(), cols),
params.sample);
vars(handle,
raft::make_device_matrix_view<const T, int, layout_t>(data, rows, cols),
raft::make_device_vector_view<const T, int>(mean_act.data(), cols),
raft::make_device_vector_view<T, int>(vars_act.data(), cols),
params.sample);
}
raft::matrix::seqRoot(vars_act.data(), T(1), cols, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
StdDevInputs<T> params;
int rows, cols;
rmm::device_uvector<T> data, mean_act, stddev_act, vars_act;
};
const std::vector<StdDevInputs<float>> inputsf = {
{0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL},
{0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL},
{0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}};
const std::vector<StdDevInputs<double>> inputsd = {
{0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL},
{0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL},
{0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL},
{0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}};
typedef StdDevTest<float> StdDevTestF;
TEST_P(StdDevTestF, Result)
{
ASSERT_TRUE(devArrMatch(
params.stddev, stddev_act.data(), params.cols, CompareApprox<float>(params.tolerance), stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<float>(params.tolerance),
stream));
}
typedef StdDevTest<double> StdDevTestD;
TEST_P(StdDevTestD, Result)
{
ASSERT_TRUE(devArrMatch(params.stddev,
stddev_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
ASSERT_TRUE(devArrMatch(stddev_act.data(),
vars_act.data(),
params.cols,
CompareApprox<double>(params.tolerance),
stream));
}
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_SUITE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd));
} // end namespace stats
} // end namespace raft
|
bdfa978f8e72d08097f286e1724de59e499044cb.hip | // !!! This is a file automatically generated by hipify!!!
void cuda_query(const int dev){
int deviceCount;
hipGetDeviceCount(&deviceCount);
if(deviceCount == 0){
printf("\n deviceCount is zero. I quit!!!");
exit(EXIT_FAILURE);
}
//const int dev = (deviceCount == 1) ? 0 : 3;
hipSetDevice(dev);
/*hipDeviceProp_t devProp;
HANDLE_ERROR(hipGetDeviceProperties(&devProp, dev));
printf("\n Total number of device: %d", deviceCount);
printf("\n Using device Number: %d", dev);
printf("\n Device name: %s", devProp.name);
//printf("\n devProp.major: %d", devProp.major);
//printf("\n devProp.minor: %d", devProp.minor);
if(devProp.major==1){//Fermi
if(devProp.minor==1){
printf("\n SM Count: %d", devProp.multiProcessorCount*48);
}else{
printf("\n SM Count: %d", devProp.multiProcessorCount*32);
}
}else if(devProp.major==3){//Kepler
printf("\n SM Count: %d", devProp.multiProcessorCount*192);
}else if(devProp.major==5){//Maxwell
printf("\n SM Count: %d", devProp.multiProcessorCount*128);
}else if(devProp.major==6){//Pascal
if(devProp.minor==1){
printf("\n SM Count: %d", devProp.multiProcessorCount*128);
}else if(devProp.minor==0){
printf("\n SM Count: %d", devProp.multiProcessorCount*64);
}
}
printf("\n Compute Capability: v%d.%d", (int)devProp.major, (int)devProp.minor);
printf("\n Memory Clock Rate: %d(kHz)", devProp.memoryClockRate);
printf("\n Memory Bus Width: %d(bits)", devProp.memoryBusWidth);
const double maxBW = 2.0 * devProp.memoryClockRate*(devProp.memoryBusWidth/8.0)/1.0E3;
printf("\n Peak Memory Bandwidth: %f(MB/s)\n\n", maxBW);*/
} | bdfa978f8e72d08097f286e1724de59e499044cb.cu |
void cuda_query(const int dev){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(deviceCount == 0){
printf("\n deviceCount is zero. I quit!!!");
exit(EXIT_FAILURE);
}
//const int dev = (deviceCount == 1) ? 0 : 3;
cudaSetDevice(dev);
/*cudaDeviceProp devProp;
HANDLE_ERROR(cudaGetDeviceProperties(&devProp, dev));
printf("\n Total number of device: %d", deviceCount);
printf("\n Using device Number: %d", dev);
printf("\n Device name: %s", devProp.name);
//printf("\n devProp.major: %d", devProp.major);
//printf("\n devProp.minor: %d", devProp.minor);
if(devProp.major==1){//Fermi
if(devProp.minor==1){
printf("\n SM Count: %d", devProp.multiProcessorCount*48);
}else{
printf("\n SM Count: %d", devProp.multiProcessorCount*32);
}
}else if(devProp.major==3){//Kepler
printf("\n SM Count: %d", devProp.multiProcessorCount*192);
}else if(devProp.major==5){//Maxwell
printf("\n SM Count: %d", devProp.multiProcessorCount*128);
}else if(devProp.major==6){//Pascal
if(devProp.minor==1){
printf("\n SM Count: %d", devProp.multiProcessorCount*128);
}else if(devProp.minor==0){
printf("\n SM Count: %d", devProp.multiProcessorCount*64);
}
}
printf("\n Compute Capability: v%d.%d", (int)devProp.major, (int)devProp.minor);
printf("\n Memory Clock Rate: %d(kHz)", devProp.memoryClockRate);
printf("\n Memory Bus Width: %d(bits)", devProp.memoryBusWidth);
const double maxBW = 2.0 * devProp.memoryClockRate*(devProp.memoryBusWidth/8.0)/1.0E3;
printf("\n Peak Memory Bandwidth: %f(MB/s)\n\n", maxBW);*/
} |
a83102c99592c43a75c8ee288e098bcbcbd75618.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a83102c99592c43a75c8ee288e098bcbcbd75618.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add<<<gridBlock,threadBlock>>>(N,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add<<<gridBlock,threadBlock>>>(N,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add<<<gridBlock,threadBlock>>>(N,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
69f6f3272ecf244c46e3de50321f92a1c3cbe812.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/utility_ops.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/flatten_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <>
bool WeightedSumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
template <>
bool SumOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, int32_t, int64_t>>::call(
this, Input(0));
}
REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.numel();
const float* data_ptr = X.data<float>();
ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA));
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
hipLaunchKernelGGL(( NanCheckKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(hipMemcpyAsync(
&result,
scratch_.raw_data(),
1,
hipMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
Tensor cpu_X(CPU);
cpu_X.ResizeLike(Input(j));
// Hack to cause allocation happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j)); // sync copy
}
std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j)
<< "]" << std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.numel(); ++i) {
if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, true /*async*/);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const int64_t N,
const int64_t B,
const int64_t slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
// this kernel is a custom version of AxpySliceKernel
// to be used when there is only one weighted X to update
// slice of Y.
template <typename T_INDEX>
__global__ void AxpySliceKernel2(
const float* weight0,
const int64_t N,
const int64_t slice_size,
const float* alpha,
const float* X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], alpha[0] * X[(i * slice_size) + j]);
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() {
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
CAFFE_ENFORCE_GT(X0.numel(), 0);
CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.numel(), 1);
int64_t M = X0.numel();
int64_t N = X0.dim(0);
int64_t K = indices.numel();
int64_t block_size = M / N;
float* data = output->template mutable_data<float>();
const int64_t B = (InputSize() - 3) / 2;
if (B > 1) {
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
ReinitializeTensor(&x_data_host_, {B}, at::dtype<float*>().device(CPU));
ReinitializeTensor(&weights_host_, {B}, at::dtype<float*>().device(CPU));
ReinitializeTensor(&x_data_device_, {B}, at::dtype<float*>().device(CUDA));
ReinitializeTensor(&weights_device_, {B}, at::dtype<float*>().device(CUDA));
float** x_data_host = x_data_host_.mutable_data<float*>();
float** weights_host = weights_host_.mutable_data<float*>();
float** x_data_device = x_data_device_.mutable_data<float*>();
float** weights_device = weights_device_.mutable_data<float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<float*>(Input(inp + 1).raw_data());
}
context_.Copy<float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
hipLaunchKernelGGL(( AxpySliceKernel),
dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
weight0.template data<float>(),
K,
B,
block_size,
const_cast<const float**>(weights_device),
const_cast<const float**>(x_data_device),
indices.template data<Index>(),
data,
M);
} else {
// when only one input exists to update data buffer,
// avoid copying pointers to device array to prevent
// copy overhead
auto& X1 = Input(3);
auto& weight1 = Input(4);
hipLaunchKernelGGL(( AxpySliceKernel2),
dim3(std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
weight0.template data<float>(),
K,
block_size,
weight1.template data<float>(),
X1.template data<float>(),
indices.template data<Index>(),
data,
M);
}
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
namespace {
template <typename Index, typename T>
__global__ void scatter_assign_kernel(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
for (int64_t i = blockIdx.x; i < K; i += gridDim.x) {
Index idx = idxs[i];
CUDA_KERNEL_ASSERT(0 <= idx && idx < N);
const T* src = slicesData + block_size * i;
T* dest = data + block_size * idx;
for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) {
dest[j] = src[j];
}
}
}
} // namespace
template <>
template <typename Index, typename T>
void ScatterAssignOp<CUDAContext>::DoScatterAssign(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
hipLaunchKernelGGL(( scatter_assign_kernel),
dim3(::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), data, idxs, slicesData, N, K, block_size);
}
REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
template <typename T>
__global__ void RangeKernel(const int n, T* Y, T offset, T step) {
CUDA_1D_KERNEL_LOOP(index, n) {
Y[index] = index * step + offset;
}
}
template <>
template <typename T>
bool RangeOp<CUDAContext>::DoRunOnDevice(
const T& start,
const T& step,
Tensor* output) {
int N = output->numel();
hipLaunchKernelGGL(( RangeKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, output->template mutable_data<T>(), start, step);
return true;
}
REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>);
} // namespace caffe2
| 69f6f3272ecf244c46e3de50321f92a1c3cbe812.cu | #include "caffe2/operators/utility_ops.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/flatten_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <>
bool WeightedSumOp<CUDAContext>::RunOnDevice() {
if (Input(0).IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).IsType<at::Half>()) {
return DoRunWithType<at::Half>();
} else {
CAFFE_THROW("Unsupported inputs");
}
return false;
}
template <>
bool SumOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, int32_t, int64_t>>::call(
this, Input(0));
}
REGISTER_CUDA_OPERATOR(Print, PrintOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Flatten, FlattenOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(FlattenToVec, FlattenToVecOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Alias, AliasOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ResizeLike, ResizeLikeOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Sum, SumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(WeightedSum, WeightedSumOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(EnsureDense, EnsureDenseOp<CUDAContext>);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.numel();
const float* data_ptr = X.data<float>();
ReinitializeTensor(&scratch_, {1}, at::dtype<bool>().device(CUDA));
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
NanCheckKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(cudaMemcpyAsync(
&result,
scratch_.raw_data(),
1,
cudaMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->debug_def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
Tensor cpu_X(CPU);
cpu_X.ResizeLike(Input(j));
// Hack to cause allocation happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j)); // sync copy
}
std::cerr << "Input tensor: " << j << ": [" << this->debug_def().input(j)
<< "]" << std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.numel(); ++i) {
if (std::isnan(cpu_X_data[i]) || std::isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, true /*async*/);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const int64_t N,
const int64_t B,
const int64_t slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
// this kernel is a custom version of AxpySliceKernel
// to be used when there is only one weighted X to update
// slice of Y.
template <typename T_INDEX>
__global__ void AxpySliceKernel2(
const float* weight0,
const int64_t N,
const int64_t slice_size,
const float* alpha,
const float* X,
const T_INDEX* Indices,
float* Y,
const int64_t M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], alpha[0] * X[(i * slice_size) + j]);
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float, CUDAContext>::DoRunWithType() {
CAFFE_ENFORCE_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
CAFFE_ENFORCE_GT(X0.numel(), 0);
CAFFE_ENFORCE_GT(X0.dim(), 0, "X0 has to be at least the vector");
CAFFE_ENFORCE_EQ(weight0.numel(), 1);
int64_t M = X0.numel();
int64_t N = X0.dim(0);
int64_t K = indices.numel();
int64_t block_size = M / N;
float* data = output->template mutable_data<float>();
const int64_t B = (InputSize() - 3) / 2;
if (B > 1) {
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
ReinitializeTensor(&x_data_host_, {B}, at::dtype<float*>().device(CPU));
ReinitializeTensor(&weights_host_, {B}, at::dtype<float*>().device(CPU));
ReinitializeTensor(&x_data_device_, {B}, at::dtype<float*>().device(CUDA));
ReinitializeTensor(&weights_device_, {B}, at::dtype<float*>().device(CUDA));
float** x_data_host = x_data_host_.mutable_data<float*>();
float** weights_host = weights_host_.mutable_data<float*>();
float** x_data_device = x_data_device_.mutable_data<float*>();
float** weights_device = weights_device_.mutable_data<float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<float*>(Input(inp + 1).raw_data());
}
context_.Copy<float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
AxpySliceKernel<<<
std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
weight0.template data<float>(),
K,
B,
block_size,
const_cast<const float**>(weights_device),
const_cast<const float**>(x_data_device),
indices.template data<Index>(),
data,
M);
} else {
// when only one input exists to update data buffer,
// avoid copying pointers to device array to prevent
// copy overhead
auto& X1 = Input(3);
auto& weight1 = Input(4);
AxpySliceKernel2<<<
std::min<int64_t>(K, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
weight0.template data<float>(),
K,
block_size,
weight1.template data<float>(),
X1.template data<float>(),
indices.template data<Index>(),
data,
M);
}
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
namespace {
template <typename Index, typename T>
__global__ void scatter_assign_kernel(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
for (int64_t i = blockIdx.x; i < K; i += gridDim.x) {
Index idx = idxs[i];
CUDA_KERNEL_ASSERT(0 <= idx && idx < N);
const T* src = slicesData + block_size * i;
T* dest = data + block_size * idx;
for (int64_t j = threadIdx.x; j < block_size; j += blockDim.x) {
dest[j] = src[j];
}
}
}
} // namespace
template <>
template <typename Index, typename T>
void ScatterAssignOp<CUDAContext>::DoScatterAssign(
T* data,
const Index* idxs,
const T* slicesData,
int64_t N,
int64_t K,
int64_t block_size) {
scatter_assign_kernel<<<
std::min(K, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(data, idxs, slicesData, N, K, block_size);
}
REGISTER_CUDA_OPERATOR(ScatterAssign, ScatterAssignOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Size, SizeOp<CUDAContext>);
template <typename T>
__global__ void RangeKernel(const int n, T* Y, T offset, T step) {
CUDA_1D_KERNEL_LOOP(index, n) {
Y[index] = index * step + offset;
}
}
template <>
template <typename T>
bool RangeOp<CUDAContext>::DoRunOnDevice(
const T& start,
const T& step,
Tensor* output) {
int N = output->numel();
RangeKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, output->template mutable_data<T>(), start, step);
return true;
}
REGISTER_CUDA_OPERATOR(Range, RangeOp<CUDAContext>);
} // namespace caffe2
|
aba22e0d9c0ad6af93f41591a49d6a26c374a37e.hip | // !!! This is a file automatically generated by hipify!!!
#include "edge.cuh"
#include <hip/hip_runtime.h>
#include "../C/node.h"
#include "../C/kvec.h"
node_t* node_init_gpu(int _id){
node_t* e;
hipMallocManaged(&e, sizeof(node_t));
e->attr = ht_create(16);
//Add sample to hashtable
ht_set(e->attr, "", "");
kv_init(e->edges);
e->id = _id;
return e;
}
| aba22e0d9c0ad6af93f41591a49d6a26c374a37e.cu | #include "edge.cuh"
#include <cuda.h>
#include "../C/node.h"
#include "../C/kvec.h"
node_t* node_init_gpu(int _id){
node_t* e;
cudaMallocManaged(&e, sizeof(node_t));
e->attr = ht_create(16);
//Add sample to hashtable
ht_set(e->attr, "", "");
kv_init(e->edges);
e->id = _id;
return e;
}
|
8168c86d8914ce19d9b3c9f697a373b025cec88b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__constant__ float M_c[FILTER_SIZE][FILTER_SIZE];
/*__device__ float getElement(Matrix *N, const int row, const int col)
{
return N->elements[row*N->width+col];
}
*/
/*__device__ void retElem(Matrix *P, const int row, const int col, float value)
{
P->elements[row*P->width+col] = value;
return;
}*/
__global__ void convolution(Matrix N, Matrix P)
{
/********************************************************************
Determine input and output indexes of each thread
Load a tile of the input image to shared memory
Apply the filter on the input image tile
Write the compute values to the output image at the correct indexes
********************************************************************/
//INSERT KERNEL CODE HERE
/*int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col_zeroIndex = col - FILTER_SIZE/2;
int row_zeroIndex = row - FILTER_SIZE/2;
float sum = 0;
for(int j = 0; j < FILTER_SIZE; ++j){
for(int k = 0; k < FILTER_SIZE; ++k){
if((row_zeroIndex + j >= 0) && (row_zeroIndex + j < N.height) &&
(col_zeroIndex + k >= 0) && (col_zeroIndex+ k < N.width)){
//sum = M_c[j][k] * getElement(&N, row_zeroIndex + j, col_zeroIndex + k);
sum += M_c[j][k] * N.elements[(row_zeroIndex + j)*N.width + col_zeroIndex +k];
}
}
}
if( row < P.height && col < P.width)
//retElem(&P, row, col, sum);
P.elements[row * P.width + col] = sum;*/
int row = blockIdx.y * TILE_SIZE + threadIdx.y;
int col = blockIdx.x * TILE_SIZE + threadIdx.x;
int rowZeroIndex = row - FILTER_SIZE/2;
int colZeroIndex = col - FILTER_SIZE/2;
__shared__ float N_ds[TILE_SIZE + FILTER_SIZE - 1][TILE_SIZE + FILTER_SIZE - 1];
if((rowZeroIndex >= 0) && (rowZeroIndex < N.height) && (colZeroIndex >= 0) && (colZeroIndex < N.width)){
N_ds[threadIdx.y][threadIdx.x] = N.elements[rowZeroIndex * N.width + colZeroIndex];
}
else{
N_ds[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
float sum = 0.0f;
if(threadIdx.y < TILE_SIZE && threadIdx.x < TILE_SIZE){
for(int dr = 0; dr < FILTER_SIZE; ++dr){
for(int dc = 0; dc < FILTER_SIZE; ++dc){
sum += M_c[dr][dc] * N_ds[threadIdx.y + dr][threadIdx.x + dc];
}
}
if(row < P.height && col < P.width){
P.elements[row * P.width + col] = sum;
}
}
}
| 8168c86d8914ce19d9b3c9f697a373b025cec88b.cu | /********************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__constant__ float M_c[FILTER_SIZE][FILTER_SIZE];
/*__device__ float getElement(Matrix *N, const int row, const int col)
{
return N->elements[row*N->width+col];
}
*/
/*__device__ void retElem(Matrix *P, const int row, const int col, float value)
{
P->elements[row*P->width+col] = value;
return;
}*/
__global__ void convolution(Matrix N, Matrix P)
{
/********************************************************************
Determine input and output indexes of each thread
Load a tile of the input image to shared memory
Apply the filter on the input image tile
Write the compute values to the output image at the correct indexes
********************************************************************/
//INSERT KERNEL CODE HERE
/*int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col_zeroIndex = col - FILTER_SIZE/2;
int row_zeroIndex = row - FILTER_SIZE/2;
float sum = 0;
for(int j = 0; j < FILTER_SIZE; ++j){
for(int k = 0; k < FILTER_SIZE; ++k){
if((row_zeroIndex + j >= 0) && (row_zeroIndex + j < N.height) &&
(col_zeroIndex + k >= 0) && (col_zeroIndex+ k < N.width)){
//sum = M_c[j][k] * getElement(&N, row_zeroIndex + j, col_zeroIndex + k);
sum += M_c[j][k] * N.elements[(row_zeroIndex + j)*N.width + col_zeroIndex +k];
}
}
}
if( row < P.height && col < P.width)
//retElem(&P, row, col, sum);
P.elements[row * P.width + col] = sum;*/
int row = blockIdx.y * TILE_SIZE + threadIdx.y;
int col = blockIdx.x * TILE_SIZE + threadIdx.x;
int rowZeroIndex = row - FILTER_SIZE/2;
int colZeroIndex = col - FILTER_SIZE/2;
__shared__ float N_ds[TILE_SIZE + FILTER_SIZE - 1][TILE_SIZE + FILTER_SIZE - 1];
if((rowZeroIndex >= 0) && (rowZeroIndex < N.height) && (colZeroIndex >= 0) && (colZeroIndex < N.width)){
N_ds[threadIdx.y][threadIdx.x] = N.elements[rowZeroIndex * N.width + colZeroIndex];
}
else{
N_ds[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
float sum = 0.0f;
if(threadIdx.y < TILE_SIZE && threadIdx.x < TILE_SIZE){
for(int dr = 0; dr < FILTER_SIZE; ++dr){
for(int dc = 0; dc < FILTER_SIZE; ++dc){
sum += M_c[dr][dc] * N_ds[threadIdx.y + dr][threadIdx.x + dc];
}
}
if(row < P.height && col < P.width){
P.elements[row * P.width + col] = sum;
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.