hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ef89ca9471bc70162e168c1a130567695a1ba14f.hip
|
// !!! This is a file automatically generated by hipify!!!
//standard libraries
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
//opencv libraries
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
//CUDA libraries
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_types.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include <device_launch_parameters.h>
using namespace std;
using namespace cv;
__global__ void transform1080to480(Mat *image, string *result_image, int n);
int main(int argc, char** argv) {
if (argc < 5) {
// Tell the user how to run the program
cerr << "Uso:" << argv[0] << " Imagen-Entrada Imagen-Salida #Hilos #Bloques(Ejemplo:./reduccion4k 4k.jpg result.jpg 256 8)"<< endl;
/* "Usage messages" are a conventional way of telling the user
* how to run a program if they enter the command incorrectly.
*/
return 1;
}
if (atoi(argv[4]) <= 0)
{
printf("Por favor use un numero positivo de bloques\n");
return 1;
}
if (atoi(argv[3]) <= 0)
{
printf("Por favor use un numero positivo de hilos\n");
return 1;
}
//Size of vectors
long n = 100000;
// Size, in bytes, of each vector
size_t mat_size = n*sizeof(Mat);
size_t string_size = n*sizeof(string);
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
// Allocate memory on host
h_image = (Mat*)malloc(mat_size);
h_result_image = (string*)malloc(string_size);
// Allocate memory on GPU
hipMalloc(&d_image, mat_size);
hipMalloc(&d_result_image, string_size);
// Copy host to device
hipMemcpy( d_image, h_image, mat_size, hipMemcpyHostToDevice);
int THREADS, BLOCKS;
//Host input
Mat *h_image = imread(argv[1], IMREAD_COLOR);
//Host output
string *h_result_image = argv[2];
//Device input
Mat *d_image = imread(argv[1], IMREAD_COLOR);
//Device output
string *d_result_image = argv[2];
// Number of threads in each thread block
THREADS = atoi(argv[3]);
// Number of thread blocks in grid
BLOCKS = atoi(argv[4]);
// Execute the kernel
hipLaunchKernelGGL(( transform1080to480), dim3(BLOCKS), dim3(THREADS), 0, 0, d_image, d_result_image, n);
// Copy array back to host
hipMemcpy( h_result_image, d_result_image, string_size, hipMemcpyDeviceToHost );
// Release device memory
hipFree(d_image);
hipFree(d_result_image);
// Release host memory
free(h_image);
free(h_result_image);
gettimeofday(&tval_after, NULL);
timersub(&tval_after,&tval_before,&tval_result);
FILE * pFile;
pFile = fopen("/../../resultados.txt", "a");
fprintf(pFile, "Time elapsed transforming a 1080p image to 480p using CUDA with %d threads and %d blocks: %ld.%06lds\n", THREADS, BLOCKS, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
fclose(pFile);
return 0;
}
__global__ void transform1080to480(Mat *image, string *result_image, int n){
if(image.empty()) {
cout << "Error: the image has been incorrectly loaded." << endl;
}
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
Mat temp(image.rows + 2, image.cols + 2, CV_8UC3, Scalar(255,255, 255));
Mat copy( image.rows*2/9, image.cols/6, CV_8UC3, Scalar(255,255, 255));
Vec3b cpixel;
cpixel[0] = (uchar) 0;
cpixel[1] = (uchar) 0;
cpixel[2] = (uchar) 0;
temp.at<Vec3b>(0, 0) = cpixel;
temp.at<Vec3b>(temp.rows - 1, 0) = cpixel;
temp.at<Vec3b>(0, temp.cols - 1) = cpixel;
temp.at<Vec3b>(temp.rows - 1, temp.cols - 1) = cpixel;
for(int i = 0; i < image.rows ; i++) {
for(int j = 0; j < image.cols; j++) {
cpixel = image.at<Vec3b>(i, j);
temp.at<Vec3b>(i+1, j+1) = cpixel;
}
}
for(int i = 0; i < image.rows; i++){
cpixel = image.at<Vec3b>(i, 0);
temp.at<Vec3b>(i+1, 0) = cpixel;
}
for(int i = 0; i < image.rows; i++){
cpixel = image.at<Vec3b>(i, image.cols - 1);
temp.at<Vec3b>(i+1, temp.cols - 1) = cpixel;
}
for(int i = 0; i < image.cols; i++){
cpixel = image.at<Vec3b>(0, i);
temp.at<Vec3b>(0, i + 1) = cpixel;
}
for(int i = 0; i < image.cols; i++){
cpixel = image.at<Vec3b>(image.rows - 1, i);
temp.at<Vec3b>(temp.rows - 1, i + 1) = cpixel;
}
for(int i = 0; i < image.rows; i++){
for(int j = 0; j < image.cols; j++){
Vec3b mpixel = temp.at<Vec3b>(i+1, j+1);
Vec3b upixel = temp.at<Vec3b>(i, j+1);
Vec3b dpixel = temp.at<Vec3b>(i+2, j+1);
Vec3b lpixel = temp.at<Vec3b>(i+1, j);
Vec3b rpixel = temp.at<Vec3b>(i+1, j+2);
uchar a = (mpixel[0] + upixel[0] + dpixel[0] + lpixel[0] + rpixel[0])/5;
uchar b = (mpixel[1] + upixel[1] + dpixel[1] + lpixel[1] + rpixel[1])/5;
uchar c = (mpixel[2] + upixel[2] + dpixel[2] + lpixel[2] + rpixel[2])/5;
Vec3b ppixel;
ppixel[0] = a;
ppixel[1] = b;
ppixel[2] = c;
if((i+j)%2 == 0){
if(i%2 == 0)
copy.at<Vec3b>((i*2)/9,j/6) = ppixel;
else
copy.at<Vec3b>(((i*2)/9)+1, j/6+1) = ppixel;
}
}
}
//Write resized image
imwrite(result_image, copy);
}
|
ef89ca9471bc70162e168c1a130567695a1ba14f.cu
|
//standard libraries
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
//opencv libraries
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
//CUDA libraries
#include <cuda.h>
#include <cuda_runtime.h>
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/cuda_types.hpp"
#include "opencv2/core/cuda_stream_accessor.hpp"
#include <device_launch_parameters.h>
using namespace std;
using namespace cv;
__global__ void transform1080to480(Mat *image, string *result_image, int n);
int main(int argc, char** argv) {
if (argc < 5) {
// Tell the user how to run the program
cerr << "Uso:" << argv[0] << " Imagen-Entrada Imagen-Salida #Hilos #Bloques(Ejemplo:./reduccion4k 4k.jpg result.jpg 256 8)"<< endl;
/* "Usage messages" are a conventional way of telling the user
* how to run a program if they enter the command incorrectly.
*/
return 1;
}
if (atoi(argv[4]) <= 0)
{
printf("Por favor use un numero positivo de bloques\n");
return 1;
}
if (atoi(argv[3]) <= 0)
{
printf("Por favor use un numero positivo de hilos\n");
return 1;
}
//Size of vectors
long n = 100000;
// Size, in bytes, of each vector
size_t mat_size = n*sizeof(Mat);
size_t string_size = n*sizeof(string);
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
// Allocate memory on host
h_image = (Mat*)malloc(mat_size);
h_result_image = (string*)malloc(string_size);
// Allocate memory on GPU
cudaMalloc(&d_image, mat_size);
cudaMalloc(&d_result_image, string_size);
// Copy host to device
cudaMemcpy( d_image, h_image, mat_size, cudaMemcpyHostToDevice);
int THREADS, BLOCKS;
//Host input
Mat *h_image = imread(argv[1], IMREAD_COLOR);
//Host output
string *h_result_image = argv[2];
//Device input
Mat *d_image = imread(argv[1], IMREAD_COLOR);
//Device output
string *d_result_image = argv[2];
// Number of threads in each thread block
THREADS = atoi(argv[3]);
// Number of thread blocks in grid
BLOCKS = atoi(argv[4]);
// Execute the kernel
transform1080to480<<<BLOCKS, THREADS>>>(d_image, d_result_image, n);
// Copy array back to host
cudaMemcpy( h_result_image, d_result_image, string_size, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_image);
cudaFree(d_result_image);
// Release host memory
free(h_image);
free(h_result_image);
gettimeofday(&tval_after, NULL);
timersub(&tval_after,&tval_before,&tval_result);
FILE * pFile;
pFile = fopen("/../../resultados.txt", "a");
fprintf(pFile, "Time elapsed transforming a 1080p image to 480p using CUDA with %d threads and %d blocks: %ld.%06lds\n", THREADS, BLOCKS, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
fclose(pFile);
return 0;
}
__global__ void transform1080to480(Mat *image, string *result_image, int n){
if(image.empty()) {
cout << "Error: the image has been incorrectly loaded." << endl;
}
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
Mat temp(image.rows + 2, image.cols + 2, CV_8UC3, Scalar(255,255, 255));
Mat copy( image.rows*2/9, image.cols/6, CV_8UC3, Scalar(255,255, 255));
Vec3b cpixel;
cpixel[0] = (uchar) 0;
cpixel[1] = (uchar) 0;
cpixel[2] = (uchar) 0;
temp.at<Vec3b>(0, 0) = cpixel;
temp.at<Vec3b>(temp.rows - 1, 0) = cpixel;
temp.at<Vec3b>(0, temp.cols - 1) = cpixel;
temp.at<Vec3b>(temp.rows - 1, temp.cols - 1) = cpixel;
for(int i = 0; i < image.rows ; i++) {
for(int j = 0; j < image.cols; j++) {
cpixel = image.at<Vec3b>(i, j);
temp.at<Vec3b>(i+1, j+1) = cpixel;
}
}
for(int i = 0; i < image.rows; i++){
cpixel = image.at<Vec3b>(i, 0);
temp.at<Vec3b>(i+1, 0) = cpixel;
}
for(int i = 0; i < image.rows; i++){
cpixel = image.at<Vec3b>(i, image.cols - 1);
temp.at<Vec3b>(i+1, temp.cols - 1) = cpixel;
}
for(int i = 0; i < image.cols; i++){
cpixel = image.at<Vec3b>(0, i);
temp.at<Vec3b>(0, i + 1) = cpixel;
}
for(int i = 0; i < image.cols; i++){
cpixel = image.at<Vec3b>(image.rows - 1, i);
temp.at<Vec3b>(temp.rows - 1, i + 1) = cpixel;
}
for(int i = 0; i < image.rows; i++){
for(int j = 0; j < image.cols; j++){
Vec3b mpixel = temp.at<Vec3b>(i+1, j+1);
Vec3b upixel = temp.at<Vec3b>(i, j+1);
Vec3b dpixel = temp.at<Vec3b>(i+2, j+1);
Vec3b lpixel = temp.at<Vec3b>(i+1, j);
Vec3b rpixel = temp.at<Vec3b>(i+1, j+2);
uchar a = (mpixel[0] + upixel[0] + dpixel[0] + lpixel[0] + rpixel[0])/5;
uchar b = (mpixel[1] + upixel[1] + dpixel[1] + lpixel[1] + rpixel[1])/5;
uchar c = (mpixel[2] + upixel[2] + dpixel[2] + lpixel[2] + rpixel[2])/5;
Vec3b ppixel;
ppixel[0] = a;
ppixel[1] = b;
ppixel[2] = c;
if((i+j)%2 == 0){
if(i%2 == 0)
copy.at<Vec3b>((i*2)/9,j/6) = ppixel;
else
copy.at<Vec3b>(((i*2)/9)+1, j/6+1) = ppixel;
}
}
}
//Write resized image
imwrite(result_image, copy);
}
|
a666628f965da0a02e05ffc785fd4468556eb0cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
// ------------------------------------------------------------------
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN
// (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/nms_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__device__ inline float devIoU(float const *const a, float const *const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask,
const int NUM_BOX_CORNERS) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int block_threads = blockDim.x;
const int row_size = min(n_boxes - row_start * block_threads, block_threads);
const int col_size = min(n_boxes - col_start * block_threads, block_threads);
__shared__ float block_boxes[NUM_THREADS_MACRO * NUM_2D_BOX_CORNERS_MACRO];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 0] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
0];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 1] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
1];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 2] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
2];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 3] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = block_threads * row_start + threadIdx.x;
const float cur_box[NUM_2D_BOX_CORNERS_MACRO] = {
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 0],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 1],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 2],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 3]};
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * NUM_BOX_CORNERS) >
nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, block_threads);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
NMSCuda::NMSCuda(const int NUM_THREADS, const int NUM_BOX_CORNERS,
const float nms_overlap_threshold)
: NUM_THREADS_(NUM_THREADS),
NUM_BOX_CORNERS_(NUM_BOX_CORNERS),
nms_overlap_threshold_(nms_overlap_threshold) {}
void NMSCuda::doNMSCuda(const int host_filter_count,
float *dev_sorted_box_for_nms, int *out_keep_inds,
int *out_num_to_keep) {
const int col_blocks = DIVUP(host_filter_count, NUM_THREADS_);
dim3 blocks(DIVUP(host_filter_count, NUM_THREADS_),
DIVUP(host_filter_count, NUM_THREADS_));
dim3 threads(NUM_THREADS_);
uint64_t *dev_mask = NULL;
GPU_CHECK(hipMalloc(
&dev_mask, host_filter_count * col_blocks * sizeof(uint64_t)));
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, host_filter_count, nms_overlap_threshold_,
dev_sorted_box_for_nms, dev_mask,
NUM_BOX_CORNERS_);
// postprocess for nms output
std::vector<uint64_t> host_mask(host_filter_count * col_blocks);
GPU_CHECK(
hipMemcpy(&host_mask[0], dev_mask,
sizeof(uint64_t) * host_filter_count * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
for (int i = 0; i < host_filter_count; i++) {
int nblock = i / NUM_THREADS_;
int inblock = i % NUM_THREADS_;
if (!(remv[nblock] & (1ULL << inblock))) {
out_keep_inds[(*out_num_to_keep)++] = i;
uint64_t *p = &host_mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
GPU_CHECK(hipFree(dev_mask));
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
a666628f965da0a02e05ffc785fd4468556eb0cd.cu
|
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
// ------------------------------------------------------------------
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN
// (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include <algorithm>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/nms_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__device__ inline float devIoU(float const *const a, float const *const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask,
const int NUM_BOX_CORNERS) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int block_threads = blockDim.x;
const int row_size = min(n_boxes - row_start * block_threads, block_threads);
const int col_size = min(n_boxes - col_start * block_threads, block_threads);
__shared__ float block_boxes[NUM_THREADS_MACRO * NUM_2D_BOX_CORNERS_MACRO];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 0] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
0];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 1] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
1];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 2] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
2];
block_boxes[threadIdx.x * NUM_BOX_CORNERS + 3] =
dev_boxes[(block_threads * col_start + threadIdx.x) * NUM_BOX_CORNERS +
3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = block_threads * row_start + threadIdx.x;
const float cur_box[NUM_2D_BOX_CORNERS_MACRO] = {
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 0],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 1],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 2],
dev_boxes[cur_box_idx * NUM_BOX_CORNERS + 3]};
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (int i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * NUM_BOX_CORNERS) >
nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, block_threads);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
NMSCuda::NMSCuda(const int NUM_THREADS, const int NUM_BOX_CORNERS,
const float nms_overlap_threshold)
: NUM_THREADS_(NUM_THREADS),
NUM_BOX_CORNERS_(NUM_BOX_CORNERS),
nms_overlap_threshold_(nms_overlap_threshold) {}
void NMSCuda::doNMSCuda(const int host_filter_count,
float *dev_sorted_box_for_nms, int *out_keep_inds,
int *out_num_to_keep) {
const int col_blocks = DIVUP(host_filter_count, NUM_THREADS_);
dim3 blocks(DIVUP(host_filter_count, NUM_THREADS_),
DIVUP(host_filter_count, NUM_THREADS_));
dim3 threads(NUM_THREADS_);
uint64_t *dev_mask = NULL;
GPU_CHECK(cudaMalloc(
&dev_mask, host_filter_count * col_blocks * sizeof(uint64_t)));
nms_kernel<<<blocks, threads>>>(host_filter_count, nms_overlap_threshold_,
dev_sorted_box_for_nms, dev_mask,
NUM_BOX_CORNERS_);
// postprocess for nms output
std::vector<uint64_t> host_mask(host_filter_count * col_blocks);
GPU_CHECK(
cudaMemcpy(&host_mask[0], dev_mask,
sizeof(uint64_t) * host_filter_count * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
for (int i = 0; i < host_filter_count; i++) {
int nblock = i / NUM_THREADS_;
int inblock = i % NUM_THREADS_;
if (!(remv[nblock] & (1ULL << inblock))) {
out_keep_inds[(*out_num_to_keep)++] = i;
uint64_t *p = &host_mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
GPU_CHECK(cudaFree(dev_mask));
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
fe99e396f84c41e583b6aa1fd6216a6988cf33c5.hip
|
// !!! This is a file automatically generated by hipify!!!
/**********************************************************************
Copyright 2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <chrono>
#include <hip/hip_runtime.h>
#include "scan.h"
/*
* ScanLargeArrays : Scan is done for each block and the sum of each
* block is stored in separate array (sumBuffer). SumBuffer is scanned
* and results are added to every value of next corresponding block to
* compute the scan of a large array.(not limited to 2*MAX_GROUP_SIZE)
* Scan uses a balanced tree algorithm. See Belloch, 1990 "Prefix Sums
* and Their Applications"
* @param output output data
* @param input input data
* @param block local memory used in the kernel
* @param sumBuffer sum of blocks
* @param length length of the input data
*/
__global__
void blockAddition(const float*__restrict__ input,
float*__restrict__ output)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int gid = bid * blockDim.x + tid;
__shared__ float value;
/* Only 1 thread of a group will read from global buffer */
if(tid == 0)
{
value = input[bid];
}
__syncthreads();
output[gid] += value;
}
__global__
void ScanLargeArrays(float *__restrict__ output,
const float *__restrict__ input,
const unsigned int block_size, // size of block
float *__restrict__ sumBuffer) // sum of blocks
{
extern __shared__ float block[]; // Size : block_size
int tid = threadIdx.x;
int bid = blockIdx.x;
int gid = bid * blockDim.x + tid;
/* Cache the computational window in shared memory */
block[2*tid] = input[2*gid];
block[2*tid + 1] = input[2*gid + 1];
__syncthreads();
float cache0 = block[0];
float cache1 = cache0 + block[1];
/* build the sum in place up the tree */
for(int stride = 1; stride < block_size; stride *=2)
{
if(2*tid>=stride)
{
cache0 = block[2*tid-stride]+block[2*tid];
cache1 = block[2*tid+1-stride]+block[2*tid+1];
}
__syncthreads();
block[2*tid] = cache0;
block[2*tid+1] = cache1;
__syncthreads();
}
/* store the value in sum buffer before making it to 0 */
sumBuffer[bid] = block[block_size-1];
/*write the results back to global memory */
if(tid==0)
{
output[2*gid] = 0;
output[2*gid+1] = block[2*tid];
}
else
{
output[2*gid] = block[2*tid-1];
output[2*gid + 1] = block[2*tid];
}
}
__global__
void prefixSum(float *__restrict__ output,
const float *__restrict__ input,
const unsigned int block_size)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int gid = bid * blockDim.x + tid;
extern __shared__ float block[];
/* Cache the computational window in shared memory */
block[2*tid] = input[2*gid];
block[2*tid + 1] = input[2*gid + 1];
__syncthreads();
float cache0 = block[0];
float cache1 = cache0 + block[1];
/* build the sum in place up the tree */
for(int stride = 1; stride < block_size; stride *=2)
{
if(2*tid>=stride)
{
cache0 = block[2*tid-stride]+block[2*tid];
cache1 = block[2*tid+1-stride]+block[2*tid+1];
}
__syncthreads();
block[2*tid] = cache0;
block[2*tid+1] = cache1;
__syncthreads();
}
/*write the results back to global memory */
if(tid==0)
{
output[2*gid] = 0;
output[2*gid+1] = block[2*tid];
}
else
{
output[2*gid] = block[2*tid-1];
output[2*gid + 1] = block[2*tid];
}
}
void bScan(const unsigned int blockSize,
const unsigned int len,
const float *inputBuffer,
float *outputBuffer,
float *blockSumBuffer)
{
// set the block size
dim3 grid (len / blockSize);
dim3 block (blockSize / 2);
hipLaunchKernelGGL(( ScanLargeArrays), dim3(grid), dim3(block), sizeof(float)*blockSize, 0,
outputBuffer, inputBuffer, blockSize, blockSumBuffer);
}
void pScan(const unsigned int blockSize,
const unsigned int len,
const float *inputBuffer,
float *outputBuffer)
{
dim3 grid (1);
dim3 block (len / 2);
hipLaunchKernelGGL(prefixSum, grid, block, (len+1)*sizeof(float), 0, outputBuffer, inputBuffer, blockSize);
}
void bAddition(const unsigned int blockSize,
const unsigned int len,
float *inputBuffer,
float *outputBuffer)
{
// set the block size
dim3 grid (len / blockSize);
dim3 block (blockSize);
hipLaunchKernelGGL(blockAddition, grid, block, 0, 0, inputBuffer, outputBuffer);
}
/*
* Scan for verification
*/
void scanLargeArraysCPUReference(
float * output,
float * input,
const unsigned int length)
{
output[0] = 0;
for(unsigned int i = 1; i < length; ++i)
{
output[i] = input[i-1] + output[i-1];
}
}
int main(int argc, char * argv[])
{
if (argc != 4) {
std::cout << "Usage: " << argv[0] << " <repeat> <input length> <block size>\n";
return 1;
}
int iterations = atoi(argv[1]);
int length = atoi(argv[2]);
int blockSize = atoi(argv[3]);
if(iterations < 1)
{
std::cout << "Error, iterations cannot be 0 or negative. Exiting..\n";
return -1;
}
if(!isPowerOf2(length))
{
length = roundToPowerOf2(length);
}
if((length/blockSize>GROUP_SIZE)&&(((length)&(length-1))!=0))
{
std::cout << "Invalid length: " << length << std::endl;
return -1;
}
// input buffer size
unsigned int sizeBytes = length * sizeof(float);
float* input = (float*) malloc (sizeBytes);
// store device results for verification
float* output = (float*) malloc (sizeBytes);
// random initialisation of input
fillRandom<float>(input, length, 1, 0, 255);
blockSize = (blockSize < length/2) ? blockSize : length/2;
// Calculate number of passes required
float t = ::log((float)length) / ::log((float)blockSize);
unsigned int pass = (unsigned int)t;
// If t is equal to pass
if(::fabs(t - (float)pass) < 1e-7)
{
pass--;
}
// Create input buffer on device
float* inputBuffer;
hipMalloc((void**)&inputBuffer, sizeBytes);
hipMemcpy(inputBuffer, input, sizeBytes, hipMemcpyHostToDevice);
// Allocate output buffers
std::vector<float*> outputBuffers(pass);
for(unsigned int i = 0; i < pass; i++)
{
int size = (int)(length / ::pow((float)blockSize,(float)i));
float* outputBuffer;
hipMalloc((void**)&outputBuffer, size * sizeof(float));
outputBuffers[i] = outputBuffer;
}
// Allocate blockSumBuffers
std::vector<float*> blockSumBuffers(pass);
for(unsigned int i = 0; i < pass; i++)
{
int size = (int)(length / ::pow((float)blockSize,(float)(i + 1)));
float* sum;
hipMalloc((void**)&sum, size * sizeof(float));
blockSumBuffers[i] = sum;
}
// Create a tempBuffer on device
int tempLength = (int)(length / ::pow((float)blockSize, (float)pass));
float* tempBuffer;
hipMalloc((void**)&tempBuffer, tempLength * sizeof(float));
std::cout << "Executing kernel for " << iterations << " iterations\n";
std::cout << "-------------------------------------------\n";
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int n = 0; n < iterations; n++)
{
// Do block-wise sum
bScan(blockSize, length, inputBuffer, outputBuffers[0], blockSumBuffers[0]);
for(int i = 1; i < (int)pass; i++)
{
int size = (int)(length / ::pow((float)blockSize,(float)i));
bScan(blockSize, size, blockSumBuffers[i - 1], outputBuffers[i], blockSumBuffers[i]);
}
// Do scan to tempBuffer
pScan(blockSize, tempLength, blockSumBuffers[pass - 1], tempBuffer);
// Do block-addition on outputBufferss
bAddition(blockSize, (unsigned int)(length / ::pow((float)blockSize, (float)(pass - 1))),
tempBuffer, outputBuffers[pass - 1]);
for(int i = pass - 1; i > 0; i--)
{
bAddition(blockSize, (unsigned int)(length / ::pow((float)blockSize, (float)(i - 1))),
outputBuffers[i], outputBuffers[i - 1]);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
std::cout << "Average execution time of scan kernels: " << time * 1e-3f / iterations
<< " (us)\n";
hipMemcpy(output, outputBuffers[0], sizeBytes, hipMemcpyDeviceToHost);
hipFree(inputBuffer);
for(unsigned int i = 0; i < pass; i++)
{
hipFree(outputBuffers[i]);
hipFree(blockSumBuffers[i]);
}
hipFree(tempBuffer);
// verification
float* verificationOutput = (float*)malloc(sizeBytes);
memset(verificationOutput, 0, sizeBytes);
// reference implementation
scanLargeArraysCPUReference(verificationOutput, input, length);
// compare the results and see if they match
if (compare<float>(output, verificationOutput, length, (float)0.001))
std::cout << "PASS" << std::endl;
else
std::cout << "FAIL" << std::endl;
free(input);
free(output);
free(verificationOutput);
return 0;
}
|
fe99e396f84c41e583b6aa1fd6216a6988cf33c5.cu
|
/**********************************************************************
Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <chrono>
#include <hip/hip_runtime.h>
#include "scan.h"
/*
* ScanLargeArrays : Scan is done for each block and the sum of each
* block is stored in separate array (sumBuffer). SumBuffer is scanned
* and results are added to every value of next corresponding block to
* compute the scan of a large array.(not limited to 2*MAX_GROUP_SIZE)
* Scan uses a balanced tree algorithm. See Belloch, 1990 "Prefix Sums
* and Their Applications"
* @param output output data
* @param input input data
* @param block local memory used in the kernel
* @param sumBuffer sum of blocks
* @param length length of the input data
*/
__global__
void blockAddition(const float*__restrict__ input,
float*__restrict__ output)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int gid = bid * blockDim.x + tid;
__shared__ float value;
/* Only 1 thread of a group will read from global buffer */
if(tid == 0)
{
value = input[bid];
}
__syncthreads();
output[gid] += value;
}
__global__
void ScanLargeArrays(float *__restrict__ output,
const float *__restrict__ input,
const unsigned int block_size, // size of block
float *__restrict__ sumBuffer) // sum of blocks
{
extern __shared__ float block[]; // Size : block_size
int tid = threadIdx.x;
int bid = blockIdx.x;
int gid = bid * blockDim.x + tid;
/* Cache the computational window in shared memory */
block[2*tid] = input[2*gid];
block[2*tid + 1] = input[2*gid + 1];
__syncthreads();
float cache0 = block[0];
float cache1 = cache0 + block[1];
/* build the sum in place up the tree */
for(int stride = 1; stride < block_size; stride *=2)
{
if(2*tid>=stride)
{
cache0 = block[2*tid-stride]+block[2*tid];
cache1 = block[2*tid+1-stride]+block[2*tid+1];
}
__syncthreads();
block[2*tid] = cache0;
block[2*tid+1] = cache1;
__syncthreads();
}
/* store the value in sum buffer before making it to 0 */
sumBuffer[bid] = block[block_size-1];
/*write the results back to global memory */
if(tid==0)
{
output[2*gid] = 0;
output[2*gid+1] = block[2*tid];
}
else
{
output[2*gid] = block[2*tid-1];
output[2*gid + 1] = block[2*tid];
}
}
__global__
void prefixSum(float *__restrict__ output,
const float *__restrict__ input,
const unsigned int block_size)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int gid = bid * blockDim.x + tid;
extern __shared__ float block[];
/* Cache the computational window in shared memory */
block[2*tid] = input[2*gid];
block[2*tid + 1] = input[2*gid + 1];
__syncthreads();
float cache0 = block[0];
float cache1 = cache0 + block[1];
/* build the sum in place up the tree */
for(int stride = 1; stride < block_size; stride *=2)
{
if(2*tid>=stride)
{
cache0 = block[2*tid-stride]+block[2*tid];
cache1 = block[2*tid+1-stride]+block[2*tid+1];
}
__syncthreads();
block[2*tid] = cache0;
block[2*tid+1] = cache1;
__syncthreads();
}
/*write the results back to global memory */
if(tid==0)
{
output[2*gid] = 0;
output[2*gid+1] = block[2*tid];
}
else
{
output[2*gid] = block[2*tid-1];
output[2*gid + 1] = block[2*tid];
}
}
void bScan(const unsigned int blockSize,
const unsigned int len,
const float *inputBuffer,
float *outputBuffer,
float *blockSumBuffer)
{
// set the block size
dim3 grid (len / blockSize);
dim3 block (blockSize / 2);
ScanLargeArrays<<<grid, block, sizeof(float)*blockSize>>>(
outputBuffer, inputBuffer, blockSize, blockSumBuffer);
}
void pScan(const unsigned int blockSize,
const unsigned int len,
const float *inputBuffer,
float *outputBuffer)
{
dim3 grid (1);
dim3 block (len / 2);
hipLaunchKernelGGL(prefixSum, grid, block, (len+1)*sizeof(float), 0, outputBuffer, inputBuffer, blockSize);
}
void bAddition(const unsigned int blockSize,
const unsigned int len,
float *inputBuffer,
float *outputBuffer)
{
// set the block size
dim3 grid (len / blockSize);
dim3 block (blockSize);
hipLaunchKernelGGL(blockAddition, grid, block, 0, 0, inputBuffer, outputBuffer);
}
/*
* Scan for verification
*/
void scanLargeArraysCPUReference(
float * output,
float * input,
const unsigned int length)
{
output[0] = 0;
for(unsigned int i = 1; i < length; ++i)
{
output[i] = input[i-1] + output[i-1];
}
}
int main(int argc, char * argv[])
{
if (argc != 4) {
std::cout << "Usage: " << argv[0] << " <repeat> <input length> <block size>\n";
return 1;
}
int iterations = atoi(argv[1]);
int length = atoi(argv[2]);
int blockSize = atoi(argv[3]);
if(iterations < 1)
{
std::cout << "Error, iterations cannot be 0 or negative. Exiting..\n";
return -1;
}
if(!isPowerOf2(length))
{
length = roundToPowerOf2(length);
}
if((length/blockSize>GROUP_SIZE)&&(((length)&(length-1))!=0))
{
std::cout << "Invalid length: " << length << std::endl;
return -1;
}
// input buffer size
unsigned int sizeBytes = length * sizeof(float);
float* input = (float*) malloc (sizeBytes);
// store device results for verification
float* output = (float*) malloc (sizeBytes);
// random initialisation of input
fillRandom<float>(input, length, 1, 0, 255);
blockSize = (blockSize < length/2) ? blockSize : length/2;
// Calculate number of passes required
float t = std::log((float)length) / std::log((float)blockSize);
unsigned int pass = (unsigned int)t;
// If t is equal to pass
if(std::fabs(t - (float)pass) < 1e-7)
{
pass--;
}
// Create input buffer on device
float* inputBuffer;
hipMalloc((void**)&inputBuffer, sizeBytes);
hipMemcpy(inputBuffer, input, sizeBytes, hipMemcpyHostToDevice);
// Allocate output buffers
std::vector<float*> outputBuffers(pass);
for(unsigned int i = 0; i < pass; i++)
{
int size = (int)(length / std::pow((float)blockSize,(float)i));
float* outputBuffer;
hipMalloc((void**)&outputBuffer, size * sizeof(float));
outputBuffers[i] = outputBuffer;
}
// Allocate blockSumBuffers
std::vector<float*> blockSumBuffers(pass);
for(unsigned int i = 0; i < pass; i++)
{
int size = (int)(length / std::pow((float)blockSize,(float)(i + 1)));
float* sum;
hipMalloc((void**)&sum, size * sizeof(float));
blockSumBuffers[i] = sum;
}
// Create a tempBuffer on device
int tempLength = (int)(length / std::pow((float)blockSize, (float)pass));
float* tempBuffer;
hipMalloc((void**)&tempBuffer, tempLength * sizeof(float));
std::cout << "Executing kernel for " << iterations << " iterations\n";
std::cout << "-------------------------------------------\n";
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int n = 0; n < iterations; n++)
{
// Do block-wise sum
bScan(blockSize, length, inputBuffer, outputBuffers[0], blockSumBuffers[0]);
for(int i = 1; i < (int)pass; i++)
{
int size = (int)(length / std::pow((float)blockSize,(float)i));
bScan(blockSize, size, blockSumBuffers[i - 1], outputBuffers[i], blockSumBuffers[i]);
}
// Do scan to tempBuffer
pScan(blockSize, tempLength, blockSumBuffers[pass - 1], tempBuffer);
// Do block-addition on outputBufferss
bAddition(blockSize, (unsigned int)(length / std::pow((float)blockSize, (float)(pass - 1))),
tempBuffer, outputBuffers[pass - 1]);
for(int i = pass - 1; i > 0; i--)
{
bAddition(blockSize, (unsigned int)(length / std::pow((float)blockSize, (float)(i - 1))),
outputBuffers[i], outputBuffers[i - 1]);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
std::cout << "Average execution time of scan kernels: " << time * 1e-3f / iterations
<< " (us)\n";
hipMemcpy(output, outputBuffers[0], sizeBytes, hipMemcpyDeviceToHost);
hipFree(inputBuffer);
for(unsigned int i = 0; i < pass; i++)
{
hipFree(outputBuffers[i]);
hipFree(blockSumBuffers[i]);
}
hipFree(tempBuffer);
// verification
float* verificationOutput = (float*)malloc(sizeBytes);
memset(verificationOutput, 0, sizeBytes);
// reference implementation
scanLargeArraysCPUReference(verificationOutput, input, length);
// compare the results and see if they match
if (compare<float>(output, verificationOutput, length, (float)0.001))
std::cout << "PASS" << std::endl;
else
std::cout << "FAIL" << std::endl;
free(input);
free(output);
free(verificationOutput);
return 0;
}
|
f7315c066c6303ebdfe39423cabf06f708925cc6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
/*
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
*/
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
f7315c066c6303ebdfe39423cabf06f708925cc6.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
/*
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
*/
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
15d10968bcbf110ca83c2db45d2464dfb51dc230.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/book.h"
#include "./common/cpu_bitmap.h"
#define WIDTH 800
#define HEIGHT 608
/**
* This method takes an x and y coordinate and subdivides up the values.
* It checks to see if the coorinate is at the top left edge of the square
* and will then subdivide the square into nine, removing the centermost.
* It then does the same to each of the other eight squares
*/
__device__ int sierpinski(int x, int y) {
for (;x > 0 || y > 0;) {
if (x % 3 == 1 && y % 3 == 1) {
return 0;
}
x /= 3;
y /= 3;
}
return 1;
}
/**
* Runs our GPU kernel
*/
__global__ void kernel(unsigned char *buffer) {
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int offset = (y * gridDim.x * blockDim.x) + x;
const int sier = sierpinski(x, y);
const int index = offset * 4;
buffer[index] = 0;
buffer[index + 1] = (x * 256) / 800 * sier;
buffer[index + 2] = (y * 256) / 608 * sier;
buffer[index + 3] = 255;
}
/**
* Main method to time our GPU kernel 1000x and display the bitmap
*/
int main(void) {
CPUBitmap bitmap(WIDTH, HEIGHT);
unsigned char *dev_bitmap;
float elapsed;
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
hipEvent_t start, stop;
hipEvent_t bitmapCpy_start, bitmapCpy_stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventCreate(&bitmapCpy_start));
HANDLE_ERROR(hipEventCreate(&bitmapCpy_stop));
HANDLE_ERROR(hipMalloc((void**) &dev_bitmap, bitmap.image_size()));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i = 0; i < 1000; i++) {
hipLaunchKernelGGL(( kernel), dim3(grid_size), dim3(block_size), 0, 0, dev_bitmap);
}
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsed, start, stop));
printf("Sierpinski carpet fractal created 1000x and copied back to host memory in %3.1f ms\n", elapsed);
bitmap.display_and_exit();
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipEventDestroy(bitmapCpy_start));
HANDLE_ERROR(hipEventDestroy(bitmapCpy_stop));
HANDLE_ERROR(hipFree(dev_bitmap));
}
|
15d10968bcbf110ca83c2db45d2464dfb51dc230.cu
|
#include "./common/book.h"
#include "./common/cpu_bitmap.h"
#define WIDTH 800
#define HEIGHT 608
/**
* This method takes an x and y coordinate and subdivides up the values.
* It checks to see if the coorinate is at the top left edge of the square
* and will then subdivide the square into nine, removing the centermost.
* It then does the same to each of the other eight squares
*/
__device__ int sierpinski(int x, int y) {
for (;x > 0 || y > 0;) {
if (x % 3 == 1 && y % 3 == 1) {
return 0;
}
x /= 3;
y /= 3;
}
return 1;
}
/**
* Runs our GPU kernel
*/
__global__ void kernel(unsigned char *buffer) {
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int offset = (y * gridDim.x * blockDim.x) + x;
const int sier = sierpinski(x, y);
const int index = offset * 4;
buffer[index] = 0;
buffer[index + 1] = (x * 256) / 800 * sier;
buffer[index + 2] = (y * 256) / 608 * sier;
buffer[index + 3] = 255;
}
/**
* Main method to time our GPU kernel 1000x and display the bitmap
*/
int main(void) {
CPUBitmap bitmap(WIDTH, HEIGHT);
unsigned char *dev_bitmap;
float elapsed;
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
cudaEvent_t start, stop;
cudaEvent_t bitmapCpy_start, bitmapCpy_stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventCreate(&bitmapCpy_start));
HANDLE_ERROR(cudaEventCreate(&bitmapCpy_stop));
HANDLE_ERROR(cudaMalloc((void**) &dev_bitmap, bitmap.image_size()));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i = 0; i < 1000; i++) {
kernel<<<grid_size, block_size>>>(dev_bitmap);
}
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsed, start, stop));
printf("Sierpinski carpet fractal created 1000x and copied back to host memory in %3.1f ms\n", elapsed);
bitmap.display_and_exit();
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaEventDestroy(bitmapCpy_start));
HANDLE_ERROR(cudaEventDestroy(bitmapCpy_stop));
HANDLE_ERROR(cudaFree(dev_bitmap));
}
|
f477773b1584a103ab1798556a6f5b13b8c8ad06.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <string>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 512, num_test = 500;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int) ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) {
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *) &n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *) &n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
void printTimes(vector<float> &time, string filename);
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename);
void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename);
int main(int argc, char *argv[]) {
// int num_train = 100 * batch_size, num_val = batch_size;
// void *X_train = malloc(num_train * input_channels * sizeof(float));
// int *y_train = (int *)malloc(num_train * sizeof(int));
// void *X_val = malloc(num_val * input_channels * sizeof(float));
// int *y_val = (int *)malloc(num_val * sizeof(int));
// for (int i = 0; i < num_train; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000;
// y_train[i] = 0;
// }
// for (int i = 0; i < num_val; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000;
// y_val[i] = rand() % 2;
// }
// int rows = 28, cols = 28, channels = 1;
// vector<vector<uchar> > train_images, test_images;
// vector<uchar> train_labels, test_labels;
// readMNIST(train_images, test_images, train_labels, test_labels);
// float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
int rows = 227, cols = 227, channels = 3;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
// int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10;
// vector<LayerSpecifier> layer_specifier;
// ConvDescriptor layer0;
// LayerSpecifier temp;
// layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// ActivationDescriptor layer0_actv;
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// BatchNormDescriptor layer0_bn;
// for (int i = 0; i < 200; i++) {
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// }
// PoolingDescriptor layer0_pool;
// layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX);
// temp.initPointer(POOLING);
// *((PoolingDescriptor *)temp.params) = layer0_pool;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// // DropoutDescriptor layer0_dropout;
// // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2);
// // temp.initPointer(DROPOUT);
// // *((DropoutDescriptor *)temp.params) = layer0_dropout;
// // layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// FCDescriptor layer1;
// layer1.initializeValues(input_channels, hidden_channels1);
// temp.initPointer(FULLY_CONNECTED);
// *((FCDescriptor *)(temp.params)) = layer1;
// layer_specifier.push_back(temp);
// temp.initPointer(ACTV);
// ActivationDescriptor layer1_actv;
// layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1);
// *((ActivationDescriptor *)temp.params) = layer1_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// temp.initPointer(FULLY_CONNECTED);
// FCDescriptor layer2;
// layer2.initializeValues(hidden_channels1, output_channels);
// *((FCDescriptor *)temp.params) = layer2;
// layer_specifier.push_back(temp);
// // temp.initPointer(FULLY_CONNECTED);
// // FCDescriptor layer3;
// // layer3.initializeValues(hidden_channels2, output_channels);
// // *((FCDescriptor *)temp.params) = layer3;
// // layer_specifier.push_back(temp);
// temp.initPointer(SOFTMAX);
// SoftmaxDescriptor smax;
// smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1);
// *((SoftmaxDescriptor *)(temp.params)) = smax;
// layer_specifier.push_back(temp);
// AlexNet
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer1;
layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer2;
layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer3;
layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer3;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer4;
layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer4;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer5;
layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer5;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer6;
layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer6;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer7;
layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer7;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer8;
layer8.initializeValues(9216, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer8;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer9;
layer9.initializeValues(4096, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer9;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer10;
layer10.initializeValues(4096, 1000);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer10;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer11;
layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer11;
layer_specifier.push_back(temp);
}
vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
vDNNType vdnn_type = vDNN_DYN;
string filename("vdnn_dyn");
if (argc == 3) {
filename.assign("vdnn");
// argv[1] - layers to offload, argv[2] - conv algo to use
if (strcmp(argv[1], "dyn") == 0) {
vdnn_type = vDNN_DYN;
filename.append("_dyn");
}
else if (strcmp(argv[1], "conv") == 0) {
vdnn_type = vDNN_CONV;
filename.append("_conv");
}
else if (strcmp(argv[1], "all") == 0) {
vdnn_type = vDNN_ALL;
filename.append("_all");
}
else if (strcmp(argv[1], "alternate_conv") == 0) {
vdnn_type = vDNN_ALTERNATE_CONV;
filename.append("_alternate_conv");
}
else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) {
if (strcmp(argv[2], "p") == 0) {
vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
filename.append("_p");
}
else if (strcmp(argv[2], "m") == 0) {
vdnn_conv_algo = vDNN_MEMORY_OPTIMAL;
filename.append("_m");
}
else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
}
}
int batch_size = 256;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.1;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD);
int num_epoch = 1000;
double learning_rate = 1e-3;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<float> time;
vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag;
solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag);
printTimes(time, filename);
printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename);
vector<vector<float> > fwd_computation_time, bwd_computation_time;
solver.getComputationTime(1, fwd_computation_time, bwd_computation_time);
vector<vector<float> > fwd_transfer_time, bwd_transfer_time;
solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time);
printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename);
printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename);
}
void printTimes(vector<float> &time, string filename) {
float mean_time = 0.0;
float std_dev = 0.0;
int N = time.size();
for (int i = 0; i < N; i++) {
mean_time += time[i];
}
mean_time /= N;
for (int i = 0; i < N; i++) {
std_dev += pow(time[i] - mean_time, 2);
}
std_dev /= N;
std_dev = pow(std_dev, 0.5);
cout << "Average time: " << mean_time << endl;
cout << "Standard deviation: " << std_dev << endl;
filename.append(".dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
for (int i = 0; i < N; i++) {
f << time[i] << endl;
}
f << "mean_time: " << mean_time << endl;
f << "standard_deviation: " << std_dev << endl;
f.close();
filename.append(".bin");
fstream f_bin;
f_bin.open(filename.c_str(), ios_base::out);
f_bin.write((char *)&N, sizeof(N));
for (int i = 0; i < N; i++) {
f_bin.write((char *)&time[i], sizeof(time[i]));
}
f_bin.close();
}
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) {
filename.append("_lag.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_vdnn_lag.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) {
f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl;
}
for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) {
f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl;
}
f << endl;
}
f.close();
}
void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename) {
if (computation)
filename.append("_compute_time.dat");
else
filename.append("_transfer_time.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_times.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_times[i].size(); j++) {
f << "fwd" << j << ": " << fwd_times[i][j] << endl;
}
for (int j = 0; j < bwd_times[i].size(); j++) {
f << "bwd" << j << ": " << bwd_times[i][j] << endl;
}
f << endl;
}
f.close();
}
|
f477773b1584a103ab1798556a6f5b13b8c8ad06.cu
|
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <string>
#include "solver.h"
using namespace std;
typedef unsigned char uchar;
int num_train = 512, num_test = 500;
int reverseInt(int n) {
int bytes = 4;
unsigned char ch[bytes];
for (int i = 0; i < bytes; i++) {
ch[i] = (n >> i * 8) & 255;
}
int p = 0;
for (int i = 0; i < bytes; i++) {
p += (int) ch[i] << (bytes - i - 1) * 8;
}
return p;
}
void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) {
string filename_train_images = "data/train-images.idx3-ubyte";
string filename_train_labels = "data/train-labels.idx1-ubyte";
string filename_test_images = "data/t10k-images.idx3-ubyte";
string filename_test_labels = "data/t10k-labels.idx1-ubyte";
// read train/test images
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_images;
else
filename = filename_test_images;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_images, sizeof(n_images));
n_images = reverseInt(n_images);
f.read((char *) &n_rows, sizeof(n_rows));
n_rows = reverseInt(n_rows);
f.read((char *) &n_cols, sizeof(n_cols));
n_cols = reverseInt(n_cols);
for (int k = 0; k < n_images; k++) {
vector<uchar> temp;
temp.reserve(n_rows * n_cols);
for (int j = 0; j < n_rows * n_cols; j++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
temp.push_back(t);
}
if (i == 0)
train_images.push_back(temp);
else
test_images.push_back(temp);
}
f.close();
}
// read train/test labels
for (int i = 0; i < 2; i++) {
string filename;
if (i == 0)
filename = filename_train_labels;
else
filename = filename_test_labels;
ifstream f(filename.c_str(), ios::binary);
if (!f.is_open())
printf("Cannot read MNIST from %s\n", filename.c_str());
// read metadata
int magic_number = 0, n_labels = 0;
f.read((char *) &magic_number, sizeof(magic_number));
magic_number = reverseInt(magic_number);
f.read((char *) &n_labels, sizeof(n_labels));
n_labels = reverseInt(n_labels);
for (int k = 0; k < n_labels; k++) {
uchar t = 0;
f.read((char *)&t, sizeof(t));
if (i == 0)
train_labels.push_back(t);
else
test_labels.push_back(t);
}
f.close();
}
}
void printTimes(vector<float> &time, string filename);
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename);
void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename);
int main(int argc, char *argv[]) {
// int num_train = 100 * batch_size, num_val = batch_size;
// void *X_train = malloc(num_train * input_channels * sizeof(float));
// int *y_train = (int *)malloc(num_train * sizeof(int));
// void *X_val = malloc(num_val * input_channels * sizeof(float));
// int *y_val = (int *)malloc(num_val * sizeof(int));
// for (int i = 0; i < num_train; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000;
// y_train[i] = 0;
// }
// for (int i = 0; i < num_val; i++) {
// for (int j = 0; j < input_channels; j++)
// ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000;
// y_val[i] = rand() % 2;
// }
// int rows = 28, cols = 28, channels = 1;
// vector<vector<uchar> > train_images, test_images;
// vector<uchar> train_labels, test_labels;
// readMNIST(train_images, test_images, train_labels, test_labels);
// float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels;
float *f_train_images, *f_test_images;
int *f_train_labels, *f_test_labels;
int rows = 227, cols = 227, channels = 3;
int input_size = rows * cols * channels;
// f_train_images = (float *)malloc(num_train * input_size * sizeof(float));
// f_train_labels = (int *)malloc(num_train * sizeof(int));
checkCudaErrors(cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float)));
checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int)));
f_test_images = (float *)malloc(num_test * input_size * sizeof(float));
f_test_labels = (int *)malloc(num_test * sizeof(int));
float *mean_image;
mean_image = (float *)malloc(input_size * sizeof(float));
for (int i = 0; i < input_size; i++) {
mean_image[i] = 0;
for (int k = 0; k < num_train; k++) {
mean_image[i] += f_train_images[k * input_size + i];
}
mean_image[i] /= num_train;
}
for (int i = 0; i < num_train; i++) {
for (int j = 0; j < input_size; j++) {
f_train_images[i * input_size + j] -= mean_image[j];
}
}
for (int i = 0; i < num_test; i++) {
for (int j = 0; j < input_size; j++) {
f_test_images[i * input_size + j] -= mean_image[j];
}
}
// int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10;
// vector<LayerSpecifier> layer_specifier;
// ConvDescriptor layer0;
// LayerSpecifier temp;
// layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// ActivationDescriptor layer0_actv;
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// BatchNormDescriptor layer0_bn;
// for (int i = 0; i < 200; i++) {
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows, cols);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// }
// PoolingDescriptor layer0_pool;
// layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX);
// temp.initPointer(POOLING);
// *((PoolingDescriptor *)temp.params) = layer0_pool;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// // DropoutDescriptor layer0_dropout;
// // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2);
// // temp.initPointer(DROPOUT);
// // *((DropoutDescriptor *)temp.params) = layer0_dropout;
// // layer_specifier.push_back(temp);
// layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1);
// temp.initPointer(CONV);
// *((ConvDescriptor *)temp.params) = layer0;
// layer_specifier.push_back(temp);
// layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2);
// temp.initPointer(ACTV);
// *((ActivationDescriptor *)temp.params) = layer0_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// FCDescriptor layer1;
// layer1.initializeValues(input_channels, hidden_channels1);
// temp.initPointer(FULLY_CONNECTED);
// *((FCDescriptor *)(temp.params)) = layer1;
// layer_specifier.push_back(temp);
// temp.initPointer(ACTV);
// ActivationDescriptor layer1_actv;
// layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1);
// *((ActivationDescriptor *)temp.params) = layer1_actv;
// layer_specifier.push_back(temp);
// layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1);
// temp.initPointer(BATCHNORM);
// *((BatchNormDescriptor *)temp.params) = layer0_bn;
// layer_specifier.push_back(temp);
// temp.initPointer(FULLY_CONNECTED);
// FCDescriptor layer2;
// layer2.initializeValues(hidden_channels1, output_channels);
// *((FCDescriptor *)temp.params) = layer2;
// layer_specifier.push_back(temp);
// // temp.initPointer(FULLY_CONNECTED);
// // FCDescriptor layer3;
// // layer3.initializeValues(hidden_channels2, output_channels);
// // *((FCDescriptor *)temp.params) = layer3;
// // layer_specifier.push_back(temp);
// temp.initPointer(SOFTMAX);
// SoftmaxDescriptor smax;
// smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1);
// *((SoftmaxDescriptor *)(temp.params)) = smax;
// layer_specifier.push_back(temp);
// AlexNet
vector<LayerSpecifier> layer_specifier;
{
ConvDescriptor layer0;
layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer0;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer1;
layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer1;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer2;
layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer2;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer3;
layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer3;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer4;
layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer4;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer5;
layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer5;
layer_specifier.push_back(temp);
}
{
ConvDescriptor layer6;
layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU);
LayerSpecifier temp;
temp.initPointer(CONV);
*((ConvDescriptor *)temp.params) = layer6;
layer_specifier.push_back(temp);
}
{
PoolingDescriptor layer7;
layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX);
LayerSpecifier temp;
temp.initPointer(POOLING);
*((PoolingDescriptor *)temp.params) = layer7;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer8;
layer8.initializeValues(9216, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer8;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer9;
layer9.initializeValues(4096, 4096, RELU);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer9;
layer_specifier.push_back(temp);
}
{
FCDescriptor layer10;
layer10.initializeValues(4096, 1000);
LayerSpecifier temp;
temp.initPointer(FULLY_CONNECTED);
*((FCDescriptor *)temp.params) = layer10;
layer_specifier.push_back(temp);
}
{
SoftmaxDescriptor layer11;
layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1);
LayerSpecifier temp;
temp.initPointer(SOFTMAX);
*((SoftmaxDescriptor *)temp.params) = layer11;
layer_specifier.push_back(temp);
}
vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
vDNNType vdnn_type = vDNN_DYN;
string filename("vdnn_dyn");
if (argc == 3) {
filename.assign("vdnn");
// argv[1] - layers to offload, argv[2] - conv algo to use
if (strcmp(argv[1], "dyn") == 0) {
vdnn_type = vDNN_DYN;
filename.append("_dyn");
}
else if (strcmp(argv[1], "conv") == 0) {
vdnn_type = vDNN_CONV;
filename.append("_conv");
}
else if (strcmp(argv[1], "all") == 0) {
vdnn_type = vDNN_ALL;
filename.append("_all");
}
else if (strcmp(argv[1], "alternate_conv") == 0) {
vdnn_type = vDNN_ALTERNATE_CONV;
filename.append("_alternate_conv");
}
else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0 or strcmp(argv[1], "alternate_conv") == 0)) {
if (strcmp(argv[2], "p") == 0) {
vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL;
filename.append("_p");
}
else if (strcmp(argv[2], "m") == 0) {
vdnn_conv_algo = vDNN_MEMORY_OPTIMAL;
filename.append("_m");
}
else {
printf("invalid argument.. using vdnn dynamic\n");
filename.assign("vdnn_dyn");
}
}
}
int batch_size = 256;
long long dropout_seed = 1;
float softmax_eps = 1e-8;
float init_std_dev = 0.1;
NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD);
int num_epoch = 1000;
double learning_rate = 1e-3;
double learning_rate_decay = 0.9;
Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train);
vector<float> loss;
vector<float> time;
vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag;
solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag);
printTimes(time, filename);
printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename);
vector<vector<float> > fwd_computation_time, bwd_computation_time;
solver.getComputationTime(1, fwd_computation_time, bwd_computation_time);
vector<vector<float> > fwd_transfer_time, bwd_transfer_time;
solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time);
printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename);
printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename);
}
void printTimes(vector<float> &time, string filename) {
float mean_time = 0.0;
float std_dev = 0.0;
int N = time.size();
for (int i = 0; i < N; i++) {
mean_time += time[i];
}
mean_time /= N;
for (int i = 0; i < N; i++) {
std_dev += pow(time[i] - mean_time, 2);
}
std_dev /= N;
std_dev = pow(std_dev, 0.5);
cout << "Average time: " << mean_time << endl;
cout << "Standard deviation: " << std_dev << endl;
filename.append(".dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
for (int i = 0; i < N; i++) {
f << time[i] << endl;
}
f << "mean_time: " << mean_time << endl;
f << "standard_deviation: " << std_dev << endl;
f.close();
filename.append(".bin");
fstream f_bin;
f_bin.open(filename.c_str(), ios_base::out);
f_bin.write((char *)&N, sizeof(N));
for (int i = 0; i < N; i++) {
f_bin.write((char *)&time[i], sizeof(time[i]));
}
f_bin.close();
}
void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) {
filename.append("_lag.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_vdnn_lag.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) {
f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl;
}
for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) {
f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl;
}
f << endl;
}
f.close();
}
void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename) {
if (computation)
filename.append("_compute_time.dat");
else
filename.append("_transfer_time.dat");
fstream f;
f.open(filename.c_str(), ios_base::out);
int N = fwd_times.size();
for (int i = 0; i < N; i++) {
for (int j = 0; j < fwd_times[i].size(); j++) {
f << "fwd" << j << ": " << fwd_times[i][j] << endl;
}
for (int j = 0; j < bwd_times[i].size(); j++) {
f << "bwd" << j << ": " << bwd_times[i][j] << endl;
}
f << endl;
}
f.close();
}
|
370c89c6bee9d14be61dc1c34e0291b09c6177c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@author Ichitaro Yamazaki
@generated from magmablas/zlacpy_sym_out.cu, normal z -> s, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_sym_out_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_S_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_S_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_sym_out_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_sym_out_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void slacpy_sym_out_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
SLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as SLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_slacpy_sym_out(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( slacpy_sym_out_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( slacpy_sym_out_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
else {
hipLaunchKernelGGL(( slacpy_sym_out_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb );
}
}
|
370c89c6bee9d14be61dc1c34e0291b09c6177c4.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Mark Gates
@author Azzam Haidar
@author Ichitaro Yamazaki
@generated from magmablas/zlacpy_sym_out.cu, normal z -> s, Thu Oct 8 23:05:33 2020
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slacpy_sym_out_full_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_sym_out_lower_device(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x; // row
int iby = blockIdx.y*BLK_Y; // col
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n);
for (int jj=0; jj < n; jj++) {
perm[rows[2*jj+1]] = rows[2*jj+1];
}
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m ) {
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int jj=0; jj < BLK_Y; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_S_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
else {
// either partial block-column or diagonal block
for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) {
int j = rows[2*(iby+jj)+1];
if (ind <= j)
dB[j + ind*ldda] = MAGMA_S_CONJ( dA[ind + (iby+jj)*lddb] );
else
dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb];
}
}
}
}
/******************************************************************************/
/*
Similar to slacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slacpy_sym_out_upper_device(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slacpy_sym_out_full_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void slacpy_sym_out_lower_kernel(
int m, int n, magma_int_t *rows, magma_int_t *perm,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb);
}
__global__
void slacpy_sym_out_upper_kernel(
int m, int n,
const float *dA, int ldda,
float *dB, int lddb )
{
slacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb);
}
/***************************************************************************//**
Purpose
-------
SLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as SLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
rows INTEGER array, on GPU, dimension (2*n)
On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th
rows are swapped.
@param[in,out]
perm INTEGER array, on GPU, dimension (m)
On entry, it stores the permutation array such that i-th row will be
the original perm[i]-th row after the pivots are applied.
On exit, it is restored to be identity permutation.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, the matrix after the symmetric pivoting is applied.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dB REAL array, dimension (LDDB,N)
The M-by-N matrix dB.
On entry, dB stores the columns after row pivoting is applied.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lacpy
*******************************************************************************/
extern "C" void
magmablas_slacpy_sym_out(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magma_int_t *rows, magma_int_t *perm,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) );
if ( uplo == MagmaLower ) {
slacpy_sym_out_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb );
}
else if ( uplo == MagmaUpper ) {
slacpy_sym_out_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
else {
slacpy_sym_out_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb );
}
}
|
e892fe409562fde0607ae9f75a96e36b3094ef1d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(data[blockIdx.x * sz + threadIdx.x]);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && Y->size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && dY.size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
e892fe409562fde0607ae9f75a96e36b3094ef1d.cu
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(data[blockIdx.x * sz + threadIdx.x]);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && Y->size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (sizeof(T) == 4) {
if (order_ == StorageOrder::NCHW && dY.size() == N * C) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float,float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16,float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
c33398c51bb0f8e30c483c03f40fd2284276b605.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <random>
#include <vector>
#include "linalg/batched/matrix.h"
#include "linalg_naive.h"
#include "sparse/batched/csr.h"
#include "test_utils.h"
namespace MLCommon {
namespace Sparse {
namespace Batched {
enum CSROperation { SpMV_op, SpMM_op };
template <typename T>
struct CSRInputs {
CSROperation operation;
int batch_size;
int m; // Dimensions of A
int n;
int nnz; // Number of non-zero elements in A
int p; // Dimensions of B or x
int q;
T alpha; // Scalars
T beta;
T tolerance;
};
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<CSRInputs<T>>::GetParam();
// Check if the dimensions are valid and compute the output dimensions
int m_r, n_r;
switch (params.operation) {
case SpMV_op:
ASSERT_TRUE(params.n == params.p);
ASSERT_TRUE(params.q == 1);
m_r = params.m;
n_r = 1;
break;
case SpMM_op:
ASSERT_TRUE(params.n == params.p);
m_r = params.m;
n_r = params.q;
break;
}
// Create test matrices/vectors
std::vector<T> A;
std::vector<T> Bx;
A.resize(params.batch_size * params.m * params.n, (T)0.0);
Bx.resize(params.batch_size * params.p * params.q);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> idis(0, params.m * params.n - 1);
std::uniform_real_distribution<T> udis(-1.0, 3.0);
// Generate a random sparse matrix (with dense representation)
std::vector<bool> mask = std::vector<bool>(params.m * params.n, false);
for (int idx = 0; idx < params.nnz; idx++) {
int k;
do {
k = idis(gen);
} while (mask[k]);
mask[k] = true;
int i = k % params.m;
int j = k / params.m;
for (int bid = 0; bid < params.batch_size; bid++) {
A[bid * params.m * params.n + j * params.m + i] = udis(gen);
}
}
// Generate random dense matrices/vectors
for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen);
res_h.resize(params.batch_size * m_r * n_r);
for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen);
// Create handles, stream, allocator
CUBLAS_CHECK(hipblasCreate(&handle));
CUDA_CHECK(hipStreamCreate(&stream));
CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle));
auto allocator = std::make_shared<MLCommon::defaultDeviceAllocator>();
// Created batched dense matrices
LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size,
handle, allocator, stream);
LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size,
handle, allocator, stream);
// Create matrix that will hold the results
res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle,
allocator, stream);
// Copy the data to the device
updateDevice(AbM.raw_data(), A.data(), A.size(), stream);
updateDevice(BxbM.raw_data(), Bx.data(), Bx.size(), stream);
updateDevice(res_bM->raw_data(), res_h.data(), res_h.size(), stream);
// Create sparse matrix A from the dense A and the mask
CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle);
// Compute the tested results
switch (params.operation) {
case SpMV_op:
b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM);
break;
case SpMM_op:
b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM);
break;
}
// Compute the expected results
switch (params.operation) {
case SpMV_op:
for (int bid = 0; bid < params.batch_size; bid++) {
LinAlg::Naive::matMul(res_h.data() + bid * m_r,
A.data() + bid * params.m * params.n,
Bx.data() + bid * params.p, params.m, params.n,
1, params.alpha, params.beta);
}
break;
case SpMM_op:
for (int bid = 0; bid < params.batch_size; bid++) {
LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
Bx.data() + bid * params.p * params.q, params.m,
params.n, params.q, params.alpha, params.beta);
}
break;
}
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
delete res_bM;
CUBLAS_CHECK(hipblasDestroy(handle));
CUDA_CHECK(hipStreamDestroy(stream));
CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle));
}
protected:
CSRInputs<T> params;
LinAlg::Batched::Matrix<T> *res_bM;
std::vector<T> res_h;
hipblasHandle_t handle;
cusolverSpHandle_t cusolverSpHandle;
hipStream_t stream;
};
// Test parameters (op, batch_size, m, n, nnz, p, q, tolerance)
const std::vector<CSRInputs<double>> inputsd = {
{SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6},
{SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6},
{SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6},
{SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6},
{SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6},
{SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6},
{SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}};
// Test parameters (op, batch_size, m, n, nnz, p, q, tolerance)
const std::vector<CSRInputs<float>> inputsf = {
{SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2},
{SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2},
{SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2},
{SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2},
{SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2},
{SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2},
{SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}};
using BatchedCSRTestD = CSRTest<double>;
using BatchedCSRTestF = CSRTest<float>;
TEST_P(BatchedCSRTestD, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(),
CompareApprox<double>(params.tolerance), stream));
}
TEST_P(BatchedCSRTestF, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(),
CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace Sparse
} // namespace MLCommon
|
c33398c51bb0f8e30c483c03f40fd2284276b605.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <random>
#include <vector>
#include "linalg/batched/matrix.h"
#include "linalg_naive.h"
#include "sparse/batched/csr.h"
#include "test_utils.h"
namespace MLCommon {
namespace Sparse {
namespace Batched {
enum CSROperation { SpMV_op, SpMM_op };
template <typename T>
struct CSRInputs {
CSROperation operation;
int batch_size;
int m; // Dimensions of A
int n;
int nnz; // Number of non-zero elements in A
int p; // Dimensions of B or x
int q;
T alpha; // Scalars
T beta;
T tolerance;
};
template <typename T>
class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<CSRInputs<T>>::GetParam();
// Check if the dimensions are valid and compute the output dimensions
int m_r, n_r;
switch (params.operation) {
case SpMV_op:
ASSERT_TRUE(params.n == params.p);
ASSERT_TRUE(params.q == 1);
m_r = params.m;
n_r = 1;
break;
case SpMM_op:
ASSERT_TRUE(params.n == params.p);
m_r = params.m;
n_r = params.q;
break;
}
// Create test matrices/vectors
std::vector<T> A;
std::vector<T> Bx;
A.resize(params.batch_size * params.m * params.n, (T)0.0);
Bx.resize(params.batch_size * params.p * params.q);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> idis(0, params.m * params.n - 1);
std::uniform_real_distribution<T> udis(-1.0, 3.0);
// Generate a random sparse matrix (with dense representation)
std::vector<bool> mask = std::vector<bool>(params.m * params.n, false);
for (int idx = 0; idx < params.nnz; idx++) {
int k;
do {
k = idis(gen);
} while (mask[k]);
mask[k] = true;
int i = k % params.m;
int j = k / params.m;
for (int bid = 0; bid < params.batch_size; bid++) {
A[bid * params.m * params.n + j * params.m + i] = udis(gen);
}
}
// Generate random dense matrices/vectors
for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen);
res_h.resize(params.batch_size * m_r * n_r);
for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen);
// Create handles, stream, allocator
CUBLAS_CHECK(cublasCreate(&handle));
CUDA_CHECK(cudaStreamCreate(&stream));
CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle));
auto allocator = std::make_shared<MLCommon::defaultDeviceAllocator>();
// Created batched dense matrices
LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size,
handle, allocator, stream);
LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size,
handle, allocator, stream);
// Create matrix that will hold the results
res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle,
allocator, stream);
// Copy the data to the device
updateDevice(AbM.raw_data(), A.data(), A.size(), stream);
updateDevice(BxbM.raw_data(), Bx.data(), Bx.size(), stream);
updateDevice(res_bM->raw_data(), res_h.data(), res_h.size(), stream);
// Create sparse matrix A from the dense A and the mask
CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle);
// Compute the tested results
switch (params.operation) {
case SpMV_op:
b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM);
break;
case SpMM_op:
b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM);
break;
}
// Compute the expected results
switch (params.operation) {
case SpMV_op:
for (int bid = 0; bid < params.batch_size; bid++) {
LinAlg::Naive::matMul(res_h.data() + bid * m_r,
A.data() + bid * params.m * params.n,
Bx.data() + bid * params.p, params.m, params.n,
1, params.alpha, params.beta);
}
break;
case SpMM_op:
for (int bid = 0; bid < params.batch_size; bid++) {
LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r,
A.data() + bid * params.m * params.n,
Bx.data() + bid * params.p * params.q, params.m,
params.n, params.q, params.alpha, params.beta);
}
break;
}
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
delete res_bM;
CUBLAS_CHECK(cublasDestroy(handle));
CUDA_CHECK(cudaStreamDestroy(stream));
CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle));
}
protected:
CSRInputs<T> params;
LinAlg::Batched::Matrix<T> *res_bM;
std::vector<T> res_h;
cublasHandle_t handle;
cusolverSpHandle_t cusolverSpHandle;
cudaStream_t stream;
};
// Test parameters (op, batch_size, m, n, nnz, p, q, tolerance)
const std::vector<CSRInputs<double>> inputsd = {
{SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6},
{SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6},
{SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6},
{SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6},
{SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6},
{SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6},
{SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}};
// Test parameters (op, batch_size, m, n, nnz, p, q, tolerance)
const std::vector<CSRInputs<float>> inputsf = {
{SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2},
{SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2},
{SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2},
{SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2},
{SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2},
{SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2},
{SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}};
using BatchedCSRTestD = CSRTest<double>;
using BatchedCSRTestF = CSRTest<float>;
TEST_P(BatchedCSRTestD, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(),
CompareApprox<double>(params.tolerance), stream));
}
TEST_P(BatchedCSRTestF, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(),
CompareApprox<float>(params.tolerance), stream));
}
INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace Sparse
} // namespace MLCommon
|
aec5bdc28944b90c9da714fce40350562ed5f5d1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GPUKernel_VpVm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int a = 2;
int v = 1;
double *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
double *outp = NULL;
hipMalloc(&outp, XSIZE*YSIZE);
double *outm = NULL;
hipMalloc(&outm, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GPUKernel_VpVm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,v,in,outp,outm);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GPUKernel_VpVm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,v,in,outp,outm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GPUKernel_VpVm), dim3(gridBlock),dim3(threadBlock), 0, 0, a,v,in,outp,outm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
aec5bdc28944b90c9da714fce40350562ed5f5d1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GPUKernel_VpVm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int a = 2;
int v = 1;
double *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
double *outp = NULL;
cudaMalloc(&outp, XSIZE*YSIZE);
double *outm = NULL;
cudaMalloc(&outm, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GPUKernel_VpVm<<<gridBlock,threadBlock>>>(a,v,in,outp,outm);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GPUKernel_VpVm<<<gridBlock,threadBlock>>>(a,v,in,outp,outm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GPUKernel_VpVm<<<gridBlock,threadBlock>>>(a,v,in,outp,outm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
70659876fecd7ab8d262caaf8d0958ee82f20322.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sobel.cuh"
__global__ void gpu_sobel_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
u_char val;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {Resultat[globalIndex]=0;}
else {
val = std::abs(Source[(i-1)*width+(j-1)] + Source[(i-1)*width+(j)] + Source[(i-1)*width+(j+1)] -\
(Source[(i+1)*width+(j-1)] + Source[(i+1)*width+(j)] + Source[(i+1)*width+(j+1)]));
Resultat[globalIndex] = val + std::abs(Source[(i-1)*width+(j-1)] + Source[(i)*width+(j-1)] + Source[(i+1)*width+(j-1)] -\
(Source[(i-1)*width+(j+1)] + Source[(i)*width+(j+1)] + Source[(i+1)*width+(j+1)]));
}
}
__global__ void gpu_sobel_kernel_shared(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y-2) + y;
int j = blockIdx.x*(BLOCKDIM_X-2) + x;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {}
else {
//mainstream
tuile[x][y] = Source[globalIndex];
__syncthreads();
u_char val;
if ((x>0)&&(y>0)&&(x<BLOCKDIM_X-1)&&(y<BLOCKDIM_Y-1)) {
val = std::abs(tuile[x-1][y-1] + tuile[x-1][y] + tuile[x-1][y+1] -\
(tuile[x+1][y-1] + tuile[x+1][y] + tuile[x+1][y+1]));
Resultat[globalIndex] = val + std::abs(tuile[x-1][y-1] + tuile[x][y-1] + tuile[x+1][y-1] -\
(tuile[x-1][y+1] + tuile[x][y+1] + tuile[x+1][y+1]));
}
}
}
void cpu_sobel(u_char **Source, u_char **Resultat, unsigned width, unsigned height) {
#pragma omp parallel for num_threads(8)
for (auto i = 1; i < height-1; i++) {
for (auto j = 1; j < width-1; j++) {
if ((i==0)||(i==height-1)||(j==0)||(j==width-1)) {Resultat[i][j]=0;}
else {
Resultat[i][j] = std::abs(Source[i-1][j-1] + Source[i-1][j] + Source[i-1][j+1] - (Source[i+1][j-1] + Source[i+1][j] + Source[i+1][j+1]));
Resultat[i][j] += std::abs(Source[i-1][j-1] + Source[i][j-1] + Source[i+1][j-1] - (Source[i-1][j+1] + Source[i][j+1] + Source[i+1][j+1]));
}
}
}
}
|
70659876fecd7ab8d262caaf8d0958ee82f20322.cu
|
#include "sobel.cuh"
__global__ void gpu_sobel_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
u_char val;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {Resultat[globalIndex]=0;}
else {
val = std::abs(Source[(i-1)*width+(j-1)] + Source[(i-1)*width+(j)] + Source[(i-1)*width+(j+1)] -\
(Source[(i+1)*width+(j-1)] + Source[(i+1)*width+(j)] + Source[(i+1)*width+(j+1)]));
Resultat[globalIndex] = val + std::abs(Source[(i-1)*width+(j-1)] + Source[(i)*width+(j-1)] + Source[(i+1)*width+(j-1)] -\
(Source[(i-1)*width+(j+1)] + Source[(i)*width+(j+1)] + Source[(i+1)*width+(j+1)]));
}
}
__global__ void gpu_sobel_kernel_shared(u_char *Source, u_char *Resultat, unsigned width, unsigned height) {
__shared__ u_char tuile[BLOCKDIM_X][BLOCKDIM_Y];
int x = threadIdx.x;
int y = threadIdx.y;
int i = blockIdx.y*(BLOCKDIM_Y-2) + y;
int j = blockIdx.x*(BLOCKDIM_X-2) + x;
int globalIndex = i*width+j;
if ((i==0)||(i>=height-1)||(j==0)||(j>=width-1)) {}
else {
//mainstream
tuile[x][y] = Source[globalIndex];
__syncthreads();
u_char val;
if ((x>0)&&(y>0)&&(x<BLOCKDIM_X-1)&&(y<BLOCKDIM_Y-1)) {
val = std::abs(tuile[x-1][y-1] + tuile[x-1][y] + tuile[x-1][y+1] -\
(tuile[x+1][y-1] + tuile[x+1][y] + tuile[x+1][y+1]));
Resultat[globalIndex] = val + std::abs(tuile[x-1][y-1] + tuile[x][y-1] + tuile[x+1][y-1] -\
(tuile[x-1][y+1] + tuile[x][y+1] + tuile[x+1][y+1]));
}
}
}
void cpu_sobel(u_char **Source, u_char **Resultat, unsigned width, unsigned height) {
#pragma omp parallel for num_threads(8)
for (auto i = 1; i < height-1; i++) {
for (auto j = 1; j < width-1; j++) {
if ((i==0)||(i==height-1)||(j==0)||(j==width-1)) {Resultat[i][j]=0;}
else {
Resultat[i][j] = std::abs(Source[i-1][j-1] + Source[i-1][j] + Source[i-1][j+1] - (Source[i+1][j-1] + Source[i+1][j] + Source[i+1][j+1]));
Resultat[i][j] += std::abs(Source[i-1][j-1] + Source[i][j-1] + Source[i+1][j-1] - (Source[i-1][j+1] + Source[i][j+1] + Source[i+1][j+1]));
}
}
}
}
|
ffb70357280d4fd207566a768057766cdae9f1a0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <math.h>
// #include <stdio.h>
#include "hip/hip_runtime.h"
#define NDEBUG1
#include <assert.h>
#define tx threadIdx.x
#define ty threadIdx.y
#define bx blockIdx.x
#define by blockIdx.y
#define NN 64
#define multi 1
#define BSZ0 16
#define BSZ (multi * (BSZ0))
#define GSZ (NN / BSZ)
#define MAX_NSZ 7
#define NSZ 5
#define NSZ2 (NSZ / 2)
#define BSZ_HL (BSZ + NSZ - 1)
#define SER(row, col, ncols) ((row) * (ncols) + (col))
__constant__ float gaussDistW[MAX_NSZ * MAX_NSZ];
// me 4*4 blocks kai kai 1024 thr/block kai gia 4 pixel/thread vgainei ligo panw apo 32k shared
// alla den mporw na exw 1024 thread, mallon giati einai mexri 768.
// ara to spaw se 256/block? -> ipologismos metaforwn.
__device__ __forceinline__ void getSharedBlock(float sharedImg[BSZ_HL][BSZ_HL], const float *globalImg, int I0, int N) {
int ii = SER(ty, tx, BSZ0); // 2d to 1d index of thread i in the block
do {
int I = ii % BSZ_HL; // x index in block including padding
int J = ii / BSZ_HL; // y index in block including padding
int IGlobal = I0 + SER(J, I, N); // global input index
assert(I < BSZ_HL);
if( (I < BSZ_HL) && (J < BSZ_HL) && (ii < N * N) ) {
sharedImg[I][J] = globalImg[IGlobal]; // download from global
}
ii += BSZ0 * BSZ0; // next iteration starts THREADNUM position after
} while ( ii < BSZ_HL * BSZ_HL ); // only J check needed ?
}
__device__ __forceinline__ void getWeight(float blockImg[BSZ_HL][BSZ_HL], float foreignBlockImg[BSZ_HL][BSZ_HL], float sigma, float weightSum[multi][multi], float fSum[multi][multi]) {
// Compute block weights with self
// new tx is (1) blockdim away
#define txM (tx + ( mx ) * blockDim.x)
#define tyM (ty + ( my ) * blockDim.y)
for (int mx = 0; mx < multi; mx++) { // Multiple pixels per thread
for (int my = 0; my < multi; my++) {
for(int k = 0; k < BSZ; k++ ) { // Other block
for(int l = 0; l < BSZ; l++ ) {
float partialW = 0;
for(int m = -NSZ2; m <= NSZ2; m++) // Neighbourhoud
for(int n = -NSZ2; n <= NSZ2; n++)
partialW += gaussDistW[ SER((n + MAX_NSZ / 2), (m + MAX_NSZ / 2), MAX_NSZ)]
* powf( ( 0.7//blockImg[(txM + NSZ2) + m][(tyM + NSZ2) + n]
- foreignBlockImg[(k + NSZ2) + m][(l + NSZ2) + n] ), 2);
// if (!mx && !my && k==1) printf("%f\n",partialW);
partialW = expf((-partialW / sigma));
weightSum[mx][my] += partialW;
fSum[mx][my] += partialW * foreignBlockImg[k + NSZ2][l + NSZ2];
}
}
}
}
}
__device__ __forceinline__ void downloadAndCalculate(float blockImg[BSZ_HL][BSZ_HL], float foreignBlockImg[BSZ_HL][BSZ_HL],
const float *inputImg, float sigma, float weightSum[multi][multi], float fSum[multi][multi], int N, int I0) {
getSharedBlock(foreignBlockImg, inputImg, I0, N);
__syncthreads();
getWeight(blockImg, foreignBlockImg, sigma, weightSum, fSum);
}
__global__ void nlm(float const *inputImg, float *outputImg, int N, float sigma) {
// assert(NN == N);
int N2 = NN + NSZ - 1; // input image with padding
// assert(GSZ == gridDim.x);
// assert(BSZ0 == blockDim.x);
__shared__ float blockImg[BSZ_HL][BSZ_HL];
__shared__ float foreignBlockImg[BSZ_HL][BSZ_HL];
// if(!tx && !ty && !bx && !by) {
// for (int mu = 0; mu < 49; mu++) {
// printf("%f ",gaussDistW[mu]);
// }
// }
// __syncthreads();
float weightSum[multi][multi], fSum[multi][multi]; // Weightsums for multiple pixels per thread.
for (int mx = 0; mx < multi; mx++) {
for (int my = 0; my < multi; my++) {
weightSum[mx][my] = 0;
fSum[mx][my] = 0;
}
}
// put inside
int I0 = SER(by * BSZ, bx * BSZ, N2); // Download this block's pixels
//downloadAndCalculate(blockImg, blockImg, inputImg, sigma, weightSum, fSum, N2, I0);
for (char i = 0; i < GSZ; i++) { // gia kathe BLOCK stin arxiki eikona (X)
for (char j = 0; j < GSZ; j++) {
if ( !(by == j && bx == i) ) {
// Download other blocks
int I1 = SER(j * BSZ, i * BSZ, N2); //first pixel in block. Used as a ref point to calculate the block.(pg21)
downloadAndCalculate(blockImg, foreignBlockImg, inputImg, sigma, weightSum, fSum, N2, I1);
}
}
}
for (int mx = 0; mx < multi; mx++) {
for (int my = 0; my < multi; my++) {
// add NSZ2 to skip the padding pixels
outputImg[SER(by * BSZ, bx * BSZ, NN) + SER(tyM, txM, NN)] = fSum[mx][my] / weightSum[mx][my];
//blockImg[(txM + NSZ2)][(tyM + NSZ2)]
//inputImg[I0 + SER(NSZ2, NSZ2, N2) + SER(tyM, txM, NN)]
}
}
}
// template __global__ void kernel<false>();
|
ffb70357280d4fd207566a768057766cdae9f1a0.cu
|
#include <math.h>
// #include <stdio.h>
#include "cuda_runtime.h"
#define NDEBUG1
#include <assert.h>
#define tx threadIdx.x
#define ty threadIdx.y
#define bx blockIdx.x
#define by blockIdx.y
#define NN 64
#define multi 1
#define BSZ0 16
#define BSZ (multi * (BSZ0))
#define GSZ (NN / BSZ)
#define MAX_NSZ 7
#define NSZ 5
#define NSZ2 (NSZ / 2)
#define BSZ_HL (BSZ + NSZ - 1)
#define SER(row, col, ncols) ((row) * (ncols) + (col))
__constant__ float gaussDistW[MAX_NSZ * MAX_NSZ];
// me 4*4 blocks kai kai 1024 thr/block kai gia 4 pixel/thread vgainei ligo panw apo 32k shared
// alla den mporw na exw 1024 thread, mallon giati einai mexri 768.
// ara to spaw se 256/block? -> ipologismos metaforwn.
__device__ __forceinline__ void getSharedBlock(float sharedImg[BSZ_HL][BSZ_HL], const float *globalImg, int I0, int N) {
int ii = SER(ty, tx, BSZ0); // 2d to 1d index of thread i in the block
do {
int I = ii % BSZ_HL; // x index in block including padding
int J = ii / BSZ_HL; // y index in block including padding
int IGlobal = I0 + SER(J, I, N); // global input index
assert(I < BSZ_HL);
if( (I < BSZ_HL) && (J < BSZ_HL) && (ii < N * N) ) {
sharedImg[I][J] = globalImg[IGlobal]; // download from global
}
ii += BSZ0 * BSZ0; // next iteration starts THREADNUM position after
} while ( ii < BSZ_HL * BSZ_HL ); // only J check needed ?
}
__device__ __forceinline__ void getWeight(float blockImg[BSZ_HL][BSZ_HL], float foreignBlockImg[BSZ_HL][BSZ_HL], float sigma, float weightSum[multi][multi], float fSum[multi][multi]) {
// Compute block weights with self
// new tx is (1) blockdim away
#define txM (tx + ( mx ) * blockDim.x)
#define tyM (ty + ( my ) * blockDim.y)
for (int mx = 0; mx < multi; mx++) { // Multiple pixels per thread
for (int my = 0; my < multi; my++) {
for(int k = 0; k < BSZ; k++ ) { // Other block
for(int l = 0; l < BSZ; l++ ) {
float partialW = 0;
for(int m = -NSZ2; m <= NSZ2; m++) // Neighbourhoud
for(int n = -NSZ2; n <= NSZ2; n++)
partialW += gaussDistW[ SER((n + MAX_NSZ / 2), (m + MAX_NSZ / 2), MAX_NSZ)]
* powf( ( 0.7//blockImg[(txM + NSZ2) + m][(tyM + NSZ2) + n]
- foreignBlockImg[(k + NSZ2) + m][(l + NSZ2) + n] ), 2);
// if (!mx && !my && k==1) printf("%f\n",partialW);
partialW = expf((-partialW / sigma));
weightSum[mx][my] += partialW;
fSum[mx][my] += partialW * foreignBlockImg[k + NSZ2][l + NSZ2];
}
}
}
}
}
__device__ __forceinline__ void downloadAndCalculate(float blockImg[BSZ_HL][BSZ_HL], float foreignBlockImg[BSZ_HL][BSZ_HL],
const float *inputImg, float sigma, float weightSum[multi][multi], float fSum[multi][multi], int N, int I0) {
getSharedBlock(foreignBlockImg, inputImg, I0, N);
__syncthreads();
getWeight(blockImg, foreignBlockImg, sigma, weightSum, fSum);
}
__global__ void nlm(float const *inputImg, float *outputImg, int N, float sigma) {
// assert(NN == N);
int N2 = NN + NSZ - 1; // input image with padding
// assert(GSZ == gridDim.x);
// assert(BSZ0 == blockDim.x);
__shared__ float blockImg[BSZ_HL][BSZ_HL];
__shared__ float foreignBlockImg[BSZ_HL][BSZ_HL];
// if(!tx && !ty && !bx && !by) {
// for (int mu = 0; mu < 49; mu++) {
// printf("%f ",gaussDistW[mu]);
// }
// }
// __syncthreads();
float weightSum[multi][multi], fSum[multi][multi]; // Weightsums for multiple pixels per thread.
for (int mx = 0; mx < multi; mx++) {
for (int my = 0; my < multi; my++) {
weightSum[mx][my] = 0;
fSum[mx][my] = 0;
}
}
// put inside
int I0 = SER(by * BSZ, bx * BSZ, N2); // Download this block's pixels
//downloadAndCalculate(blockImg, blockImg, inputImg, sigma, weightSum, fSum, N2, I0);
for (char i = 0; i < GSZ; i++) { // gia kathe BLOCK stin arxiki eikona (X)
for (char j = 0; j < GSZ; j++) {
if ( !(by == j && bx == i) ) {
// Download other blocks
int I1 = SER(j * BSZ, i * BSZ, N2); //first pixel in block. Used as a ref point to calculate the block.(pg21)
downloadAndCalculate(blockImg, foreignBlockImg, inputImg, sigma, weightSum, fSum, N2, I1);
}
}
}
for (int mx = 0; mx < multi; mx++) {
for (int my = 0; my < multi; my++) {
// add NSZ2 to skip the padding pixels
outputImg[SER(by * BSZ, bx * BSZ, NN) + SER(tyM, txM, NN)] = fSum[mx][my] / weightSum[mx][my];
//blockImg[(txM + NSZ2)][(tyM + NSZ2)]
//inputImg[I0 + SER(NSZ2, NSZ2, N2) + SER(tyM, txM, NN)]
}
}
}
// template __global__ void kernel<false>();
|
f60e874c9af1fbd2d2928767b3ddcad3792eb221.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <THH/THH.h>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
HIPGuardMasqueradingAsCUDA device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// hipMemcpyAsync on the default stream.
HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
void *dst = iter.data_ptr(0);
void *src = iter.data_ptr(1);
size_t size = numel * iter.element_size(0);
if (src != dst || src_device != dst_device) {
// Perform the copy
AT_CUDA_CHECK(hipMemcpyAsync(
dst, src, size,
hipMemcpyDeviceToDevice,
copy_stream));
}
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; });
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
}
AT_CUDA_CHECK(hipGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use hipMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
if (non_blocking) {
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
#if HIP_VERSION >= 301
AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
#else
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
#endif
}
}
static void FN_copy_kernel_cuda(TensorIterator& iter, bool non_blocking, int tID, bool csr) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
if (non_blocking) {
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
#if HIP_VERSION >= 301
AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
#else
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
#endif
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
REGISTER_DISPATCH(FN_copy_stub, &FN_copy_kernel_cuda);
} // namespace native
} // namespace at
|
f60e874c9af1fbd2d2928767b3ddcad3792eb221.cu
|
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <THC/THC.h>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
CUDAGuard device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// cudaMemcpyAsync on the default stream.
CUDAStream copy_stream = getCurrentCUDAStream(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentCUDAStream(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
void *dst = iter.data_ptr(0);
void *src = iter.data_ptr(1);
size_t size = numel * iter.element_size(0);
if (src != dst || src_device != dst_device) {
// Perform the copy
AT_CUDA_CHECK(cudaMemcpyAsync(
dst, src, size,
cudaMemcpyDeviceToDevice,
copy_stream));
}
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; });
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentCUDAStream(dst_device.index()));
}
AT_CUDA_CHECK(cudaGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use cudaMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
CUDAStream stream = getCurrentCUDAStream();
if (non_blocking) {
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
#if HIP_VERSION >= 301
AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
#else
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
#endif
}
}
static void FN_copy_kernel_cuda(TensorIterator& iter, bool non_blocking, int tID, bool csr) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it involves the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
CUDAStream stream = getCurrentCUDAStream();
if (non_blocking) {
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
#if HIP_VERSION >= 301
AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
#else
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
#endif
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
REGISTER_DISPATCH(FN_copy_stub, &FN_copy_kernel_cuda);
} // namespace native
} // namespace at
|
16fb107e89ba69d5c29e77ee30b931040f2bee7d.hip
|
// !!! This is a file automatically generated by hipify!!!
// "mpi + cuda reduction + timing"
#ifdef GPU
#include <hip/hip_runtime.h>
#endif
#include <mpi.h>
#include <iostream>
#include <vector>
#include "mpierr.h"
#include <cmath>
#include <algorithm>
#include <sstream>
#include <string>
#include <set>
#include <numeric>
#include <ctime>
// switches:
// #GPU : enable GPU computation
// #NO_LOG: do not printout log messages
// #REDUCE_CPU: perform final per-task reduction step on the CPU
// #DOUBLE_: double precision
// #MPI_RROBIN_: assume a round robin layout i.e process 0 -> node 0, process 1 -> node 1 ...
// #NO_GPU_MALLOC_TIME: do not take into account malloc time; usually this is part of an initialization step
// compilation with mvapich2:
// nvcc -L/apps/eiger/mvapich2/1.6/mvapich2-gnu/lib -I/apps/eiger/mvapich2/1.6/mvapich2-gnu/include \
// -libumad -lmpich -lpthread -lrdmacm -libverbs -arch=sm_20 -DGPU \
// ~/projects/gpu-training/trunk/cuda_exercises_ugo/resources/mpiscratch/mpicuda2.cu
// run:
// 1) w/o scheduler: mpiexec -np ... -hosts ... ./a.out
// 2) w/ scheduler: see mpi_cuda_pbs_ref.sh script
// note: when using mvapich2/1.6 and *not* going through the pbs scheduler it seems
// the default behavior is rrobin, using the pbs launch script the default
// behavior is "bunch" (as defined by the mvapich2 documentation)
// note: using single precision floats because that's the only supported type
// for atomics on CUDA 4
// note: experiment with different number of MPI tasks per GPU/node; using
// 256 Mi floats, 16 MPI tasks on two nodes (8 per node, 4 per GPUs)
// CUDA fails to allocate memory exaclty for one task on each node;
// Everything works fine with the same data with 8 tasks (4 per node, 2 per GPU ).
// note: it is possible to implement a discovery step to find the current MPI layout
// by checking if MPI rank 0 and 1 are on the same processor ("bunch" layout) or
// not ("scatter" layout)
#ifndef DOUBLE_
// with CUDA 4.0 atomics are available for single precision only!!!
typedef float real_t;
#define MPI_REAL_T_ MPI_FLOAT
#else
typedef double real_t;
#define MPI_REAL_T_ MPI_DOUBLE
#endif
//------------------------------------------------------------------------------
#ifdef GPU
const int BLOCK_SIZE = 512;
#ifndef DOUBLE_ //atomics are available for single precision only!!!
__global__ void dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) atomicAdd( out, cache[ 0 ] );
}
#endif
__global__ void partial_dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
#endif
//------------------------------------------------------------------------------
int main( int argc, char** argv ) {
int numtasks = 0;
int task = 0;
// INIT ENV
MPI_( MPI_Init( &argc, &argv ) );
MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
MPI_( MPI_Comm_rank( MPI_COMM_WORLD, &task ) );
std::vector< char > nodeid( MPI_MAX_PROCESSOR_NAME, '\0' );
int len = 0;
MPI_( MPI_Get_processor_name( &nodeid[ 0 ], &len ) );
#ifdef MPI_RROBIN_
// RETRIEVE TOTAL NUMBER OF NODES USED, is there an easier way ?
// required to have each GPU assigned to the same number of processes
// on each node
const int SEND_NODE_TAG = 0x01;
//const int SEND_NUM_NODES = 0x10;
MPI_Request req;
MPI_( MPI_Isend( &nodeid[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, SEND_NODE_TAG,
MPI_COMM_WORLD, &req ) );
int node_count = -1;
if( task == 0 ) {
typedef std::set< std::string > NodeCount;
NodeCount ncount;
std::vector< char > n( MPI_MAX_PROCESSOR_NAME, '\0' );
MPI_Status s;
for( int r = 0; r != numtasks; ++r ) {
MPI_( MPI_Recv( &n[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, r, SEND_NODE_TAG,
MPI_COMM_WORLD, &s ) );
ncount.insert( &n[ 0 ] );
}
node_count = int( ncount.size() );
#ifndef NO_LOG
std::cout << "Number of nodes: " << node_count << std::endl;
#endif
}
// SEND INFORMATION USED FOR GPU <-> RANK MAPPING TO EACH PROCESS
// Option 1: use scatter, useful only to send per-process specific information like e.g
// the GPU to use. It is in general a more robust method to have the root process
// compute the rank -> gpu map
//std::vector< int > sendbuf( numtasks, node_count );
// MPI Scatter parameters: address of send buffer,
// per-receiving process receive buffer size,...
// send buffer size = num tasks x per-receiving-process buffer size
//MPI_( MPI_Scatter( &sendbuf[ 0 ], 1, MPI_INT, &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
// Option 2: simply broadcast the number of nodes
MPI_( MPI_Bcast( &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
#endif
// PER TASK DATA INIT - in the real world this is the place where data are read from file
// through the MPI_File_ functions or, less likely received from the root process
const int ARRAY_SIZE = 1024 * 1024 * 256;// * 1024 * 256; // 256 Mi floats x 2 == 2 GiB total storage
// @WARNING: ARRAY_SIZE must be evenly divisible by the number of MPI processes
const int PER_MPI_TASK_ARRAY_SIZE = ARRAY_SIZE / numtasks;
if( ARRAY_SIZE % numtasks != 0 && task == 0 ) {
std::cerr << ARRAY_SIZE << " must be evenly divisible by the number of mpi processes" << std::endl;
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
std::vector< real_t > v1( ARRAY_SIZE / numtasks, 0. );
std::vector< real_t > v2( ARRAY_SIZE / numtasks, 0. );
for( int i = 0; i != PER_MPI_TASK_ARRAY_SIZE; ++i ) {
v1[ i ] = 1;
v2[ i ] = 1;
}
std::vector< double > begins( numtasks );
std::vector< double > ends( numtasks );
double begin = clock();
MPI_( MPI_Gather( &begin, 1, MPI_DOUBLE, &begins[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
// PARALLEL DOT PRODUCT COMPUTATION
real_t partial_dot = 0.f;
#ifndef GPU
int t = 0;
for( t = 0; t != PER_MPI_TASK_ARRAY_SIZE; ++t ) {
partial_dot += v1[ t ] * v2[ t ];
}
//partial_dot = real_t( p );
#ifndef NO_LOG
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " size: " << PER_MPI_TASK_ARRAY_SIZE
<< ' ' << t << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
#endif
#else
// SELECT GPU = task % <num gpus on node>, note that with this
// approach it is possible to support nodes with different numbers of GPUs
int device_count = 0;
if( hipGetDeviceCount( &device_count ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipGetDeviceCount FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef MPI_RROBIN_
const int device = ( task / node_count ) % device_count;
#else
const int device = task % device_count;
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << "\tGPU: " << device << '\n';
std::cout << os.str(); os.flush();
}
#endif
if( hipSetDevice( device ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " cudaGetSetDevice FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_begin = clock();
#endif
real_t* dev_v1 = 0;
real_t* dev_v2 = 0;
real_t* dev_dout = 0;
if( hipMalloc( &dev_v1, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( hipMalloc( &dev_v2, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// MOVE DATA TO GPU
if( hipMemcpy( dev_v1, &v1[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
hipMemcpyHostToDevice ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( hipMemcpy( dev_v2, &v2[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
hipMemcpyHostToDevice ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// INVOKE KERNEL
const int NUM_THREADS_PER_BLOCK = BLOCK_SIZE; // must match size of buffer used for reduction
const int NUM_BLOCKS = ::min( PER_MPI_TASK_ARRAY_SIZE / NUM_THREADS_PER_BLOCK,
0xffff ); // max number of blocks is 64k
#ifndef REDUCE_CPU
#ifdef NO_GPU_MALLOC_TIME
malloc_begin = clock();
#endif
if( hipMalloc( &dev_dout, sizeof( real_t ) * 1 ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// initialize partial dot product to zero
if( hipMemset( dev_dout, 0, sizeof( real_t) ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMemset FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// actual on-device computation
hipLaunchKernelGGL(( dot_product_kernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
// check for kernel launch errors: it is not possible to catch on-device execution errors but only
// if there was an error launching the kernel
if( hipGetLastError() != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " kernel launch FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// MOVE DATA TO CPU
hipMemcpy( &partial_dot, dev_dout, sizeof( real_t ) * 1, hipMemcpyDeviceToHost );
#else
const int PARTIAL_REDUCE_SIZE = NUM_BLOCKS;
if( hipMalloc( &dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
hipLaunchKernelGGL(( partial_dot_product_kernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
std::vector< real_t > rdot( PARTIAL_REDUCE_SIZE );
hipMemcpy( &rdot[ 0 ], dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE, hipMemcpyDeviceToHost );
partial_dot = std::accumulate( rdot.begin(), rdot.end(), 0.f );
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
}
#endif
#endif
// REDUCE (SUM) ALL ranks -> rank 0
real_t result = 0.;
MPI_( MPI_Reduce( &partial_dot, &result, 1, MPI_REAL_T_, MPI_SUM, 0, MPI_COMM_WORLD ) );
double end = clock();
MPI_( MPI_Gather( &end, 1, MPI_DOUBLE, &ends[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
const std::pair< double, double > minmax( *std::min_element( begins.begin(), begins.end() ),
*std::max_element( ends.begin(), ends.end() ) );
// IF RANK == 0 -> PRINT RESULT
if( task == 0 ) {
std::cout << "dot product result: " << result << std::endl;
std::cout << "time: " << ( minmax.second - minmax.first ) / CLOCKS_PER_SEC << 's' << std::endl;
}
#ifdef GPU
// RELEASE GPU RESOURCES
hipFree( dev_v1 );
hipFree( dev_v2 );
hipFree( dev_dout );
hipDeviceReset();
#endif
// RELEASE MPI RESOURCES
MPI_( MPI_Finalize() );
return 0;
}
|
16fb107e89ba69d5c29e77ee30b931040f2bee7d.cu
|
// "mpi + cuda reduction + timing"
#ifdef GPU
#include <cuda.h>
#endif
#include <mpi.h>
#include <iostream>
#include <vector>
#include "mpierr.h"
#include <cmath>
#include <algorithm>
#include <sstream>
#include <string>
#include <set>
#include <numeric>
#include <ctime>
// switches:
// #GPU : enable GPU computation
// #NO_LOG: do not printout log messages
// #REDUCE_CPU: perform final per-task reduction step on the CPU
// #DOUBLE_: double precision
// #MPI_RROBIN_: assume a round robin layout i.e process 0 -> node 0, process 1 -> node 1 ...
// #NO_GPU_MALLOC_TIME: do not take into account malloc time; usually this is part of an initialization step
// compilation with mvapich2:
// nvcc -L/apps/eiger/mvapich2/1.6/mvapich2-gnu/lib -I/apps/eiger/mvapich2/1.6/mvapich2-gnu/include \
// -libumad -lmpich -lpthread -lrdmacm -libverbs -arch=sm_20 -DGPU \
// ~/projects/gpu-training/trunk/cuda_exercises_ugo/resources/mpiscratch/mpicuda2.cu
// run:
// 1) w/o scheduler: mpiexec -np ... -hosts ... ./a.out
// 2) w/ scheduler: see mpi_cuda_pbs_ref.sh script
// note: when using mvapich2/1.6 and *not* going through the pbs scheduler it seems
// the default behavior is rrobin, using the pbs launch script the default
// behavior is "bunch" (as defined by the mvapich2 documentation)
// note: using single precision floats because that's the only supported type
// for atomics on CUDA 4
// note: experiment with different number of MPI tasks per GPU/node; using
// 256 Mi floats, 16 MPI tasks on two nodes (8 per node, 4 per GPUs)
// CUDA fails to allocate memory exaclty for one task on each node;
// Everything works fine with the same data with 8 tasks (4 per node, 2 per GPU ).
// note: it is possible to implement a discovery step to find the current MPI layout
// by checking if MPI rank 0 and 1 are on the same processor ("bunch" layout) or
// not ("scatter" layout)
#ifndef DOUBLE_
// with CUDA 4.0 atomics are available for single precision only!!!
typedef float real_t;
#define MPI_REAL_T_ MPI_FLOAT
#else
typedef double real_t;
#define MPI_REAL_T_ MPI_DOUBLE
#endif
//------------------------------------------------------------------------------
#ifdef GPU
const int BLOCK_SIZE = 512;
#ifndef DOUBLE_ //atomics are available for single precision only!!!
__global__ void dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) atomicAdd( out, cache[ 0 ] );
}
#endif
__global__ void partial_dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
#endif
//------------------------------------------------------------------------------
int main( int argc, char** argv ) {
int numtasks = 0;
int task = 0;
// INIT ENV
MPI_( MPI_Init( &argc, &argv ) );
MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
MPI_( MPI_Comm_rank( MPI_COMM_WORLD, &task ) );
std::vector< char > nodeid( MPI_MAX_PROCESSOR_NAME, '\0' );
int len = 0;
MPI_( MPI_Get_processor_name( &nodeid[ 0 ], &len ) );
#ifdef MPI_RROBIN_
// RETRIEVE TOTAL NUMBER OF NODES USED, is there an easier way ?
// required to have each GPU assigned to the same number of processes
// on each node
const int SEND_NODE_TAG = 0x01;
//const int SEND_NUM_NODES = 0x10;
MPI_Request req;
MPI_( MPI_Isend( &nodeid[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, SEND_NODE_TAG,
MPI_COMM_WORLD, &req ) );
int node_count = -1;
if( task == 0 ) {
typedef std::set< std::string > NodeCount;
NodeCount ncount;
std::vector< char > n( MPI_MAX_PROCESSOR_NAME, '\0' );
MPI_Status s;
for( int r = 0; r != numtasks; ++r ) {
MPI_( MPI_Recv( &n[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, r, SEND_NODE_TAG,
MPI_COMM_WORLD, &s ) );
ncount.insert( &n[ 0 ] );
}
node_count = int( ncount.size() );
#ifndef NO_LOG
std::cout << "Number of nodes: " << node_count << std::endl;
#endif
}
// SEND INFORMATION USED FOR GPU <-> RANK MAPPING TO EACH PROCESS
// Option 1: use scatter, useful only to send per-process specific information like e.g
// the GPU to use. It is in general a more robust method to have the root process
// compute the rank -> gpu map
//std::vector< int > sendbuf( numtasks, node_count );
// MPI Scatter parameters: address of send buffer,
// per-receiving process receive buffer size,...
// send buffer size = num tasks x per-receiving-process buffer size
//MPI_( MPI_Scatter( &sendbuf[ 0 ], 1, MPI_INT, &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
// Option 2: simply broadcast the number of nodes
MPI_( MPI_Bcast( &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
#endif
// PER TASK DATA INIT - in the real world this is the place where data are read from file
// through the MPI_File_ functions or, less likely received from the root process
const int ARRAY_SIZE = 1024 * 1024 * 256;// * 1024 * 256; // 256 Mi floats x 2 == 2 GiB total storage
// @WARNING: ARRAY_SIZE must be evenly divisible by the number of MPI processes
const int PER_MPI_TASK_ARRAY_SIZE = ARRAY_SIZE / numtasks;
if( ARRAY_SIZE % numtasks != 0 && task == 0 ) {
std::cerr << ARRAY_SIZE << " must be evenly divisible by the number of mpi processes" << std::endl;
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
std::vector< real_t > v1( ARRAY_SIZE / numtasks, 0. );
std::vector< real_t > v2( ARRAY_SIZE / numtasks, 0. );
for( int i = 0; i != PER_MPI_TASK_ARRAY_SIZE; ++i ) {
v1[ i ] = 1;
v2[ i ] = 1;
}
std::vector< double > begins( numtasks );
std::vector< double > ends( numtasks );
double begin = clock();
MPI_( MPI_Gather( &begin, 1, MPI_DOUBLE, &begins[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
// PARALLEL DOT PRODUCT COMPUTATION
real_t partial_dot = 0.f;
#ifndef GPU
int t = 0;
for( t = 0; t != PER_MPI_TASK_ARRAY_SIZE; ++t ) {
partial_dot += v1[ t ] * v2[ t ];
}
//partial_dot = real_t( p );
#ifndef NO_LOG
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " size: " << PER_MPI_TASK_ARRAY_SIZE
<< ' ' << t << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
#endif
#else
// SELECT GPU = task % <num gpus on node>, note that with this
// approach it is possible to support nodes with different numbers of GPUs
int device_count = 0;
if( cudaGetDeviceCount( &device_count ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaGetDeviceCount FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef MPI_RROBIN_
const int device = ( task / node_count ) % device_count;
#else
const int device = task % device_count;
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << "\tGPU: " << device << '\n';
std::cout << os.str(); os.flush();
}
#endif
if( cudaSetDevice( device ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaGetSetDevice FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_begin = clock();
#endif
real_t* dev_v1 = 0;
real_t* dev_v2 = 0;
real_t* dev_dout = 0;
if( cudaMalloc( &dev_v1, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( cudaMalloc( &dev_v2, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// MOVE DATA TO GPU
if( cudaMemcpy( dev_v1, &v1[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
cudaMemcpyHostToDevice ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( cudaMemcpy( dev_v2, &v2[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
cudaMemcpyHostToDevice ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// INVOKE KERNEL
const int NUM_THREADS_PER_BLOCK = BLOCK_SIZE; // must match size of buffer used for reduction
const int NUM_BLOCKS = std::min( PER_MPI_TASK_ARRAY_SIZE / NUM_THREADS_PER_BLOCK,
0xffff ); // max number of blocks is 64k
#ifndef REDUCE_CPU
#ifdef NO_GPU_MALLOC_TIME
malloc_begin = clock();
#endif
if( cudaMalloc( &dev_dout, sizeof( real_t ) * 1 ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// initialize partial dot product to zero
if( cudaMemset( dev_dout, 0, sizeof( real_t) ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMemset FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// actual on-device computation
dot_product_kernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>( dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
// check for kernel launch errors: it is not possible to catch on-device execution errors but only
// if there was an error launching the kernel
if( cudaGetLastError() != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " kernel launch FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// MOVE DATA TO CPU
cudaMemcpy( &partial_dot, dev_dout, sizeof( real_t ) * 1, cudaMemcpyDeviceToHost );
#else
const int PARTIAL_REDUCE_SIZE = NUM_BLOCKS;
if( cudaMalloc( &dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
partial_dot_product_kernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>( dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
std::vector< real_t > rdot( PARTIAL_REDUCE_SIZE );
cudaMemcpy( &rdot[ 0 ], dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE, cudaMemcpyDeviceToHost );
partial_dot = std::accumulate( rdot.begin(), rdot.end(), 0.f );
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
}
#endif
#endif
// REDUCE (SUM) ALL ranks -> rank 0
real_t result = 0.;
MPI_( MPI_Reduce( &partial_dot, &result, 1, MPI_REAL_T_, MPI_SUM, 0, MPI_COMM_WORLD ) );
double end = clock();
MPI_( MPI_Gather( &end, 1, MPI_DOUBLE, &ends[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
const std::pair< double, double > minmax( *std::min_element( begins.begin(), begins.end() ),
*std::max_element( ends.begin(), ends.end() ) );
// IF RANK == 0 -> PRINT RESULT
if( task == 0 ) {
std::cout << "dot product result: " << result << std::endl;
std::cout << "time: " << ( minmax.second - minmax.first ) / CLOCKS_PER_SEC << 's' << std::endl;
}
#ifdef GPU
// RELEASE GPU RESOURCES
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_dout );
cudaDeviceReset();
#endif
// RELEASE MPI RESOURCES
MPI_( MPI_Finalize() );
return 0;
}
|
af9a2bb38af199c031b1dbb0a7984800f65fb99f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
__global__ void gpu_isfinite(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isfinite(A[thread_id_x]);
}
}
__global__ void gpu_isinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]);
}
}
__global__ void gpu_isnan(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isnan(A[thread_id_x]);
}
}
__global__ void gpu_isneginf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f;
}
}
__global__ void gpu_isposinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f;
}
}
__global__ void gpu_logical_and(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_or(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_not(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = !((bool)A[thread_id_x]);
}
}
__global__ void gpu_logical_xor(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x];
}
}
__global__ void gpu_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, long int size, bool &allclose){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
// if(!allclose) return; // Abort if there is a result
if (thread_id_x < size && allclose){
bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
if (!close){
allclose = false;
// return;
}
}
}
__global__ void gpu_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
}
}
__global__ void gpu_greater(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] > v;
}
}
__global__ void gpu_greater(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] > B[thread_id_x];
}
}
__global__ void gpu_greater_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] >= v;
}
}
__global__ void gpu_greater_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] >= B[thread_id_x];
}
}
__global__ void gpu_less(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] < v;
}
}
__global__ void gpu_less(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] < B[thread_id_x];
}
}
__global__ void gpu_less_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] <= v;
}
}
__global__ void gpu_less_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] <= B[thread_id_x];
}
}
__global__ void gpu_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] == v;
}
}
__global__ void gpu_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] == B[thread_id_x];
}
}
__global__ void gpu_not_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] != v;
}
}
__global__ void gpu_not_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] != B[thread_id_x];
}
}
|
af9a2bb38af199c031b1dbb0a7984800f65fb99f.cu
|
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
__global__ void gpu_isfinite(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isfinite(A[thread_id_x]);
}
}
__global__ void gpu_isinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]);
}
}
__global__ void gpu_isnan(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isnan(A[thread_id_x]);
}
}
__global__ void gpu_isneginf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f;
}
}
__global__ void gpu_isposinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f;
}
}
__global__ void gpu_logical_and(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_or(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_not(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = !((bool)A[thread_id_x]);
}
}
__global__ void gpu_logical_xor(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x];
}
}
__global__ void gpu_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, long int size, bool &allclose){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
// if(!allclose) return; // Abort if there is a result
if (thread_id_x < size && allclose){
bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
if (!close){
allclose = false;
// return;
}
}
}
__global__ void gpu_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
}
}
__global__ void gpu_greater(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] > v;
}
}
__global__ void gpu_greater(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] > B[thread_id_x];
}
}
__global__ void gpu_greater_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] >= v;
}
}
__global__ void gpu_greater_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] >= B[thread_id_x];
}
}
__global__ void gpu_less(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] < v;
}
}
__global__ void gpu_less(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] < B[thread_id_x];
}
}
__global__ void gpu_less_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] <= v;
}
}
__global__ void gpu_less_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] <= B[thread_id_x];
}
}
__global__ void gpu_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] == v;
}
}
__global__ void gpu_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] == B[thread_id_x];
}
}
__global__ void gpu_not_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] != v;
}
}
__global__ void gpu_not_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] != B[thread_id_x];
}
}
|
b7df33b5377b290f4a32f19dbf0d19bafd26f493.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <pthread.h>
#include <stdio.h>
#include "cuda_check.hpp"
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
void *launch_kernel(void *dummy)
{
float *data;
CUDA_CHECK(hipMalloc(&data, N * sizeof(float)));
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, data, N);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipStreamSynchronize(0));
return NULL;
}
int main()
{
const int num_threads = 8;
pthread_t threads[num_threads];
for (int i = 0; i < num_threads; i++) {
if (pthread_create(&threads[i], NULL, launch_kernel, 0)) {
fprintf(stderr, "Error creating threadn");
return 1;
}
}
for (int i = 0; i < num_threads; i++) {
if(pthread_join(threads[i], NULL)) {
fprintf(stderr, "Error joining threadn");
return 2;
}
}
hipDeviceReset();
return 0;
}
|
b7df33b5377b290f4a32f19dbf0d19bafd26f493.cu
|
#include <pthread.h>
#include <stdio.h>
#include "cuda_check.hpp"
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
void *launch_kernel(void *dummy)
{
float *data;
CUDA_CHECK(cudaMalloc(&data, N * sizeof(float)));
kernel<<<1, 64>>>(data, N);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaStreamSynchronize(0));
return NULL;
}
int main()
{
const int num_threads = 8;
pthread_t threads[num_threads];
for (int i = 0; i < num_threads; i++) {
if (pthread_create(&threads[i], NULL, launch_kernel, 0)) {
fprintf(stderr, "Error creating threadn");
return 1;
}
}
for (int i = 0; i < num_threads; i++) {
if(pthread_join(threads[i], NULL)) {
fprintf(stderr, "Error joining threadn");
return 2;
}
}
cudaDeviceReset();
return 0;
}
|
0eb5da880073172d92b65de867951179918ab0a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include <stdio.h>
#include "precomputation.cuh"
__global__ void RGradient_kernel(const double *d_InputIMGR, const double *d_InputIMGT, const double* __restrict__ d_InputBiubicMatrix,
double *d_OutputIMGR, double *d_OutputIMGT,
double *d_OutputIMGRx, double *d_OutputIMGRy,
double *d_OutputIMGTx, double *d_OutputIMGTy, double *d_OutputIMGTxy, double *d_OutputdtBicubic,
int width, int height)
{
//The size of input images
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Temp arrays
double d_TaoT[16];
double d_AlphaT[16];
//The rows and cols of output matrix.
if((row < height) && (col < width)){
d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1];
d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]);
d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]);
d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1];
d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]);
d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]);
d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]);
}
__syncthreads();
if((row < height-1) && (col < width-1)){
d_TaoT[0] = d_OutputIMGT[row*(width)+col];
d_TaoT[1] = d_OutputIMGT[row*(width)+col+1];
d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col];
d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1];
d_TaoT[4] = d_OutputIMGTx[row*(width)+col];
d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1];
d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col];
d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1];
d_TaoT[8] = d_OutputIMGTy[row*(width)+col];
d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1];
d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col];
d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1];
d_TaoT[12] = d_OutputIMGTxy[row*(width)+col];
d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1];
d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col];
d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1];
for(int k=0; k<16; k++){
d_AlphaT[k] = 0.0;
for(int l=0; l<16; l++){
d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]);
}
}
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15];
}
else {
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0;
}
}
void launch_kernel(const double *h_InputIMGR, const double *h_InputIMGT,
double *h_OutputIMGR, double *h_OutputIMGT,
double *h_OutputIMGRx, double *h_OutputIMGRy,
double *h_OutputIMGTx, double *h_OutputIMGTy, double *h_OutputIMGTxy, double *h_OutputdTBicubic,
int width, int height)
{
float totalp_time, total_time, compute_time;
StopWatchWin totalp, total, compute;
double *d_InputIMGR, *d_InputIMGT, *d_InputBiubicMatrix;
double *d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy;
double *d_OutputdTBicubic;
const static double h_InputBicubicMatrix[16*16] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,
-3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ,
0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 ,
-3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0,
0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0,
9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 ,
-6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1,
2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 ,
0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 ,
-6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1,
4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1
};
totalp.start();
checkCudaErrors(hipMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_InputBiubicMatrix, 16*16*sizeof(double)));
checkCudaErrors(hipMemcpy(d_InputIMGR,h_InputIMGR,(width+2)*(height+2)*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_InputIMGT,h_InputIMGT,(width+2)*(height+2)*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_InputBiubicMatrix,h_InputBicubicMatrix,16*16*sizeof(double),hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGR, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGT, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGRx, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGRy, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGTx, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGTy, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputIMGTxy, width*height*sizeof(double)));
checkCudaErrors(hipMalloc((void**)&d_OutputdTBicubic, width*height*4*4*sizeof(double)));
dim3 dimB(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 dimG((width-1)/BLOCK_SIZE+1,(height-1)/BLOCK_SIZE+1,1);
compute.start();
hipLaunchKernelGGL(( RGradient_kernel), dim3(dimG), dim3(dimB), 0, 0, d_InputIMGR,d_InputIMGT,d_InputBiubicMatrix,
d_OutputIMGR, d_OutputIMGT,
d_OutputIMGRx, d_OutputIMGRy,
d_OutputIMGTx, d_OutputIMGTy, d_OutputIMGTxy,d_OutputdTBicubic,
width, height);
compute.stop();
compute_time = compute.getTime();
total.start();
checkCudaErrors(hipMemcpy(h_OutputIMGR,d_OutputIMGR,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGT,d_OutputIMGT,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGRx,d_OutputIMGRx,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGRy,d_OutputIMGRy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTx,d_OutputIMGTx,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTy,d_OutputIMGTy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputIMGTxy,d_OutputIMGTxy,width*height*sizeof(double),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_OutputdTBicubic,d_OutputdTBicubic,width*height*4*4*sizeof(double),hipMemcpyDeviceToHost));
total.stop();
total_time = total.getTime();
hipFree(d_InputIMGR);
hipFree(d_InputIMGT);
hipFree(d_InputBiubicMatrix);
checkCudaErrors(hipFree(d_OutputIMGR));
checkCudaErrors(hipFree(d_OutputIMGT));
checkCudaErrors(hipFree(d_OutputIMGRx));
checkCudaErrors(hipFree(d_OutputIMGRy));
checkCudaErrors(hipFree(d_OutputIMGTx));
checkCudaErrors(hipFree(d_OutputIMGTy));
checkCudaErrors(hipFree(d_OutputIMGTxy));
checkCudaErrors(hipFree(d_OutputdTBicubic));
totalp.stop();
totalp_time = totalp.getTime();
printf("Total total time: %f\n",totalp_time);
printf("\nTotal time: %f\n",total_time);
printf("Compute time: %f\n",compute_time);
}
void initialize_CUDA()
{
hipFree(0);
}
|
0eb5da880073172d92b65de867951179918ab0a8.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include "helper_functions.h"
#include <stdio.h>
#include "precomputation.cuh"
__global__ void RGradient_kernel(const double *d_InputIMGR, const double *d_InputIMGT, const double* __restrict__ d_InputBiubicMatrix,
double *d_OutputIMGR, double *d_OutputIMGT,
double *d_OutputIMGRx, double *d_OutputIMGRy,
double *d_OutputIMGTx, double *d_OutputIMGTy, double *d_OutputIMGTxy, double *d_OutputdtBicubic,
int width, int height)
{
//The size of input images
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Temp arrays
double d_TaoT[16];
double d_AlphaT[16];
//The rows and cols of output matrix.
if((row < height) && (col < width)){
d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1];
d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]);
d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]);
d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1];
d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]);
d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]);
d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]);
}
__syncthreads();
if((row < height-1) && (col < width-1)){
d_TaoT[0] = d_OutputIMGT[row*(width)+col];
d_TaoT[1] = d_OutputIMGT[row*(width)+col+1];
d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col];
d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1];
d_TaoT[4] = d_OutputIMGTx[row*(width)+col];
d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1];
d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col];
d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1];
d_TaoT[8] = d_OutputIMGTy[row*(width)+col];
d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1];
d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col];
d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1];
d_TaoT[12] = d_OutputIMGTxy[row*(width)+col];
d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1];
d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col];
d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1];
for(int k=0; k<16; k++){
d_AlphaT[k] = 0.0;
for(int l=0; l<16; l++){
d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]);
}
}
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2];
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6];
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10];
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14];
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15];
}
else {
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0;
d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0;
}
}
void launch_kernel(const double *h_InputIMGR, const double *h_InputIMGT,
double *h_OutputIMGR, double *h_OutputIMGT,
double *h_OutputIMGRx, double *h_OutputIMGRy,
double *h_OutputIMGTx, double *h_OutputIMGTy, double *h_OutputIMGTxy, double *h_OutputdTBicubic,
int width, int height)
{
float totalp_time, total_time, compute_time;
StopWatchWin totalp, total, compute;
double *d_InputIMGR, *d_InputIMGT, *d_InputBiubicMatrix;
double *d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy;
double *d_OutputdTBicubic;
const static double h_InputBicubicMatrix[16*16] = {
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,
-3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ,
0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 ,
-3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0,
0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0,
9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 ,
-6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1,
2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 ,
0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 ,
-6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1,
4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1
};
totalp.start();
checkCudaErrors(cudaMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_InputBiubicMatrix, 16*16*sizeof(double)));
checkCudaErrors(cudaMemcpy(d_InputIMGR,h_InputIMGR,(width+2)*(height+2)*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_InputIMGT,h_InputIMGT,(width+2)*(height+2)*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_InputBiubicMatrix,h_InputBicubicMatrix,16*16*sizeof(double),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGR, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGT, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGRx, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGRy, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTx, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTy, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTxy, width*height*sizeof(double)));
checkCudaErrors(cudaMalloc((void**)&d_OutputdTBicubic, width*height*4*4*sizeof(double)));
dim3 dimB(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 dimG((width-1)/BLOCK_SIZE+1,(height-1)/BLOCK_SIZE+1,1);
compute.start();
RGradient_kernel<<<dimG, dimB>>>(d_InputIMGR,d_InputIMGT,d_InputBiubicMatrix,
d_OutputIMGR, d_OutputIMGT,
d_OutputIMGRx, d_OutputIMGRy,
d_OutputIMGTx, d_OutputIMGTy, d_OutputIMGTxy,d_OutputdTBicubic,
width, height);
compute.stop();
compute_time = compute.getTime();
total.start();
checkCudaErrors(cudaMemcpy(h_OutputIMGR,d_OutputIMGR,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGT,d_OutputIMGT,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGRx,d_OutputIMGRx,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGRy,d_OutputIMGRy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTx,d_OutputIMGTx,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTy,d_OutputIMGTy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputIMGTxy,d_OutputIMGTxy,width*height*sizeof(double),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_OutputdTBicubic,d_OutputdTBicubic,width*height*4*4*sizeof(double),cudaMemcpyDeviceToHost));
total.stop();
total_time = total.getTime();
cudaFree(d_InputIMGR);
cudaFree(d_InputIMGT);
cudaFree(d_InputBiubicMatrix);
checkCudaErrors(cudaFree(d_OutputIMGR));
checkCudaErrors(cudaFree(d_OutputIMGT));
checkCudaErrors(cudaFree(d_OutputIMGRx));
checkCudaErrors(cudaFree(d_OutputIMGRy));
checkCudaErrors(cudaFree(d_OutputIMGTx));
checkCudaErrors(cudaFree(d_OutputIMGTy));
checkCudaErrors(cudaFree(d_OutputIMGTxy));
checkCudaErrors(cudaFree(d_OutputdTBicubic));
totalp.stop();
totalp_time = totalp.getTime();
printf("Total total time: %f\n",totalp_time);
printf("\nTotal time: %f\n",total_time);
printf("Compute time: %f\n",compute_time);
}
void initialize_CUDA()
{
cudaFree(0);
}
|
d0b5fe464612d624d386546d25f4c03e2151049d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "sieve_cuda.cuh"
#include "../support/cuda_error_output.h"
//Private------------------------------------------------------------------------------------------
//Protected----------------------------------------------------------------------------------------
void SieveCUDA::AllocateGPUMemory() {
//CUDA Memory Notes:
// A single CUDA Block can run 1024 threads.
// Each block shares:
// - The global memory (slow)
// - The constant memory (fast)
// Each block has:
// - A shared memory that can be accessed by the threads in the block
// - A set of registers (for variables an the like i presume)
// > NTS: Keep in mind that since each thread can write into the registers
// > the numbers of variables declared in the kernel functions are multiplied
// > by the number of threads. Over-declaration of variables eats memory fast.
//
// Global memory capacity (bytes): 3221225472
// Shared memory capacity (bytes): 49152
//Step 0.1:
// NTS: Do we benefit from contious allocation?
//Allocate memory on device
CUDAErrorOutput(
hipMalloc(
(void**)&(this->device_mem_ptr_),
this->sieve_mem_ptr_->BytesAllocated()
),
"hipMalloc()", __FUNCTION__
);
}
void SieveCUDA::DeallocateGPUMemory() {
//Deallocate the memory on device
CUDAErrorOutput(
hipFree(this->device_mem_ptr_),
"hipFree()", __FUNCTION__
);
this->device_mem_ptr_ = nullptr;
}
void SieveCUDA::UploadMemory() {
//Copy data to memory
CUDAErrorOutput(
hipMemcpy(
this->device_mem_ptr_, //Target
this->sieve_mem_ptr_->getMemPtr(), //Source
this->sieve_mem_ptr_->BytesAllocated(), //Byte count
hipMemcpyHostToDevice //Transfer type
),
"hipMemcpy()", __FUNCTION__
);
}
void SieveCUDA::DownloadMemory() {
//Download data into memory structure
CUDAErrorOutput(
hipMemcpy(
this->sieve_mem_ptr_->getMemPtr(), //Target
this->device_mem_ptr_, //Source
this->sieve_mem_ptr_->BytesAllocated(), //Byte count
hipMemcpyDeviceToHost //Transfer type
),
"hipMemcpy()", __FUNCTION__
);
}
void SieveCUDA::LaunchKernel(size_t in_sieve_start) {
// Launch a kernel on the GPU with one thread for each element.
// -> block
// -> threads per block (max 1024)
// -> size of shared memory
//NTS: unsigned int, not size_t. Need to fix safe conversion?
// Excess threads are fine, cannot be more than 1024 which fits
unsigned int full_blocks = this->sieve_mem_ptr_->NumberCapacity() / 1024; //Number of full blocks
unsigned int excess_threads = this->sieve_mem_ptr_->NumberCapacity() % 1024; //Number of threads not handled by full blocks
//size_t bytes = this->mem_class_ptr_->BytesAllocated(); //Number of bytes to be in shared memory //NTS: Everything is in global, no shared needed
//Get where sieving should end
size_t n = in_sieve_start + this->sieve_mem_ptr_->NumberCapacity();
//If there are to be several kernel launches we need to figure out
//where the subsequent blocks should start
size_t alt_start = in_sieve_start;
//Launch full blocks with 1024 threads
//NTS: A kernel can have 48 blocks at maximum? : no : 2^31 - 1?
unsigned int max_blocks = 2147483647;
//size_t max_blocks = 2;
while (full_blocks > 0) {
//Determine number of blocks in launch
unsigned int blocks_in_launch = (full_blocks > max_blocks) ? max_blocks : full_blocks;
//Launch kernel
//std::cout << ">>\tLaunching [" << blocks_in_launch << " of " << full_blocks << "] full blocks\n";
//SundaramKernel <<<blocks_in_launch, 1024, 0>>> (alt_start, n, this->device_mem_ptr_);
this->SieveKernel(blocks_in_launch, 1024, alt_start, n, this->device_mem_ptr_);
//Decrease number of remaining blocks
//Move kernel starting value
full_blocks -= blocks_in_launch;
alt_start += blocks_in_launch * 1024;
// Check for any errors launching the kernel
CUDAErrorOutput(
hipGetLastError(),
"<full blocks launch>",
__FUNCTION__
);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDAErrorOutput(
hipDeviceSynchronize(),
"hipDeviceSynchronize()",
__FUNCTION__
);
}
//Launch leftover threads in 1 block //NTS: Will run sequentially, thus start and end must be altered
if (excess_threads > 0) {
//std::cout << ">>\tLaunching [" << excess_threads << "] excess threads\n";
//SundaramKernel <<<1, excess_threads, 0>>> (alt_start, n, this->device_mem_ptr_);
this->SieveKernel(1, excess_threads, alt_start, n, this->device_mem_ptr_);
// Check for any errors launching the kernel
CUDAErrorOutput(
hipGetLastError(),
"<excess thread launch>",
__FUNCTION__
);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDAErrorOutput(
hipDeviceSynchronize(),
"hipDeviceSynchronize()",
__FUNCTION__
);
}
}
//Public-------------------------------------------------------------------------------------------
SieveCUDA::SieveCUDA() {
}
SieveCUDA::~SieveCUDA() {
//NTS: Do not delete this ptr here
this->sieve_mem_ptr_ = nullptr;
}
void SieveCUDA::LinkMemory(PrimeMemoryBool * in_ptr) {
this->sieve_mem_ptr_ = in_ptr;
}
|
d0b5fe464612d624d386546d25f4c03e2151049d.cu
|
#include "sieve_cuda.cuh"
#include "../support/cuda_error_output.h"
//Private------------------------------------------------------------------------------------------
//Protected----------------------------------------------------------------------------------------
void SieveCUDA::AllocateGPUMemory() {
//CUDA Memory Notes:
// A single CUDA Block can run 1024 threads.
// Each block shares:
// - The global memory (slow)
// - The constant memory (fast)
// Each block has:
// - A shared memory that can be accessed by the threads in the block
// - A set of registers (for variables an the like i presume)
// > NTS: Keep in mind that since each thread can write into the registers
// > the numbers of variables declared in the kernel functions are multiplied
// > by the number of threads. Over-declaration of variables eats memory fast.
//
// Global memory capacity (bytes): 3221225472
// Shared memory capacity (bytes): 49152
//Step 0.1:
// NTS: Do we benefit from contious allocation?
//Allocate memory on device
CUDAErrorOutput(
cudaMalloc(
(void**)&(this->device_mem_ptr_),
this->sieve_mem_ptr_->BytesAllocated()
),
"cudaMalloc()", __FUNCTION__
);
}
void SieveCUDA::DeallocateGPUMemory() {
//Deallocate the memory on device
CUDAErrorOutput(
cudaFree(this->device_mem_ptr_),
"cudaFree()", __FUNCTION__
);
this->device_mem_ptr_ = nullptr;
}
void SieveCUDA::UploadMemory() {
//Copy data to memory
CUDAErrorOutput(
cudaMemcpy(
this->device_mem_ptr_, //Target
this->sieve_mem_ptr_->getMemPtr(), //Source
this->sieve_mem_ptr_->BytesAllocated(), //Byte count
cudaMemcpyHostToDevice //Transfer type
),
"cudaMemcpy()", __FUNCTION__
);
}
void SieveCUDA::DownloadMemory() {
//Download data into memory structure
CUDAErrorOutput(
cudaMemcpy(
this->sieve_mem_ptr_->getMemPtr(), //Target
this->device_mem_ptr_, //Source
this->sieve_mem_ptr_->BytesAllocated(), //Byte count
cudaMemcpyDeviceToHost //Transfer type
),
"cudaMemcpy()", __FUNCTION__
);
}
void SieveCUDA::LaunchKernel(size_t in_sieve_start) {
// Launch a kernel on the GPU with one thread for each element.
// -> block
// -> threads per block (max 1024)
// -> size of shared memory
//NTS: unsigned int, not size_t. Need to fix safe conversion?
// Excess threads are fine, cannot be more than 1024 which fits
unsigned int full_blocks = this->sieve_mem_ptr_->NumberCapacity() / 1024; //Number of full blocks
unsigned int excess_threads = this->sieve_mem_ptr_->NumberCapacity() % 1024; //Number of threads not handled by full blocks
//size_t bytes = this->mem_class_ptr_->BytesAllocated(); //Number of bytes to be in shared memory //NTS: Everything is in global, no shared needed
//Get where sieving should end
size_t n = in_sieve_start + this->sieve_mem_ptr_->NumberCapacity();
//If there are to be several kernel launches we need to figure out
//where the subsequent blocks should start
size_t alt_start = in_sieve_start;
//Launch full blocks with 1024 threads
//NTS: A kernel can have 48 blocks at maximum? : no : 2^31 - 1?
unsigned int max_blocks = 2147483647;
//size_t max_blocks = 2;
while (full_blocks > 0) {
//Determine number of blocks in launch
unsigned int blocks_in_launch = (full_blocks > max_blocks) ? max_blocks : full_blocks;
//Launch kernel
//std::cout << ">>\tLaunching [" << blocks_in_launch << " of " << full_blocks << "] full blocks\n";
//SundaramKernel <<<blocks_in_launch, 1024, 0>>> (alt_start, n, this->device_mem_ptr_);
this->SieveKernel(blocks_in_launch, 1024, alt_start, n, this->device_mem_ptr_);
//Decrease number of remaining blocks
//Move kernel starting value
full_blocks -= blocks_in_launch;
alt_start += blocks_in_launch * 1024;
// Check for any errors launching the kernel
CUDAErrorOutput(
cudaGetLastError(),
"<full blocks launch>",
__FUNCTION__
);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDAErrorOutput(
cudaDeviceSynchronize(),
"cudaDeviceSynchronize()",
__FUNCTION__
);
}
//Launch leftover threads in 1 block //NTS: Will run sequentially, thus start and end must be altered
if (excess_threads > 0) {
//std::cout << ">>\tLaunching [" << excess_threads << "] excess threads\n";
//SundaramKernel <<<1, excess_threads, 0>>> (alt_start, n, this->device_mem_ptr_);
this->SieveKernel(1, excess_threads, alt_start, n, this->device_mem_ptr_);
// Check for any errors launching the kernel
CUDAErrorOutput(
cudaGetLastError(),
"<excess thread launch>",
__FUNCTION__
);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDAErrorOutput(
cudaDeviceSynchronize(),
"cudaDeviceSynchronize()",
__FUNCTION__
);
}
}
//Public-------------------------------------------------------------------------------------------
SieveCUDA::SieveCUDA() {
}
SieveCUDA::~SieveCUDA() {
//NTS: Do not delete this ptr here
this->sieve_mem_ptr_ = nullptr;
}
void SieveCUDA::LinkMemory(PrimeMemoryBool * in_ptr) {
this->sieve_mem_ptr_ = in_ptr;
}
|
8457dd60410ba5b24bde5f04c988508d6f1edd27.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ctrsv_notrans_kernel_outplace(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
magmaFloatComplex *b, int incb,
magmaFloatComplex *x)
{
ctrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ctrsv_trans_kernel_outplace(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
magmaFloatComplex *b, int incb,
magmaFloatComplex *x)
{
ctrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr b, magma_int_t incb,
magmaFloatComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the ctrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ctrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr b, magma_int_t incb,
magmaFloatComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_claset( MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_cgemv will cause slow down
magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, 0), lda,
x, 1, MAGMA_C_ONE, x+col, 1, queue );
}
magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(0, col), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue );
}
magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_ctrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex_ptr dx=NULL;
magma_cmalloc( &dx, size_x );
magmablas_claset( MagmaFull, n, 1, MAGMA_C_ZERO, MAGMA_C_ZERO, dx, n, queue );
magmablas_ctrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_clacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
8457dd60410ba5b24bde5f04c988508d6f1edd27.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ctrsv_notrans_kernel_outplace(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
magmaFloatComplex *b, int incb,
magmaFloatComplex *x)
{
ctrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y,
const int TILE_SIZE, const int flag, const magma_uplo_t uplo,
const magma_trans_t trans, const magma_diag_t diag >
__global__ void
ctrsv_trans_kernel_outplace(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
magmaFloatComplex *b, int incb,
magmaFloatComplex *x)
{
ctrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr b, magma_int_t incb,
magmaFloatComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ctrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
}
/******************************************************************************/
/*
README: flag decides if the ctrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ctrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloatComplex_ptr b, magma_int_t incb,
magmaFloatComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_claset( MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_cgemv will cause slow down
magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_cgemv( MagmaNoTrans, jb, i, MAGMA_C_ONE, A(col, 0), lda,
x, 1, MAGMA_C_ONE, x+col, 1, queue );
}
magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_C_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_cgemv( MagmaConjTrans, i, jb, MAGMA_C_ONE, A(0, col), lda, x, 1, MAGMA_C_ONE, x+col, 1, queue );
}
magmablas_ctrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
/***************************************************************************//**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv
*******************************************************************************/
extern "C" void
magmablas_ctrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex_ptr dx=NULL;
magma_cmalloc( &dx, size_x );
magmablas_claset( MagmaFull, n, 1, MAGMA_C_ZERO, MAGMA_C_ZERO, dx, n, queue );
magmablas_ctrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_clacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
588ebff4a781715220a422ae1589546e9c0cf943.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <nori/normalMap.h>
#include <filesystem/resolver.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
NORI_NAMESPACE_BEGIN
__device__ Vector3f NormalMap::eval(const Point2f& uv, const Frame& frame) {
// rescale and shift
float x = (uv.x()/m_scale.x()) - m_delta.x();
float y = (uv.y()/m_scale.y()) - m_delta.y();
x = x - floor(x);
y = y - floor(y);
float4 rgb = tex2D<float4>(image_tex, y, x);
Vector3f n((2.0f*rgb.x)-1.0f, (2.0f*rgb.y)-1.0f, rgb.z);
n.normalize();
return frame.toWorld(n);
}
#ifndef __CUDA_ARCH__
__host__ NormalMap::NormalMap(const PropertyList &props) {
modifierType = ENormalMap;
m_delta = props.getPoint2("delta", Point2f(0));
m_scale = props.getVector2("scale", Vector2f(1));
filesystem::path filename = getFileResolver()->resolve(props.getString("filename"));
image = cv::imread(filename.str(), CV_LOAD_IMAGE_COLOR); // BGR format
if (!image.data)
{
throw NoriException("Image %s could not be found!", filename); \
}
cout << getSize() << endl;
}
__host__ std::string NormalMap::toString() const {
return tfm::format(
"NormalMap[]");
}
__host__ void NormalMap::transferImage(unsigned char** target, hipTextureObject_t* image_tex, cv::Mat& img)
{
CHECK_ERROR( hipMallocPitch(target, &gpu_step, img.elemSize() * img.cols, img.rows));
CHECK_ERROR( hipMemcpy2D(*target, gpu_step, img.data, img.step, img.cols * img.elemSize(), img.rows, hipMemcpyHostToDevice));
hipChannelFormatDesc desc = hipCreateChannelDesc(8,8,8,8, hipChannelFormatKindUnsigned);
// Specify texture
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = *target;
resDesc.res.pitch2D.pitchInBytes = gpu_step;
resDesc.res.pitch2D.width = img.cols;
resDesc.res.pitch2D.height = img.rows;
resDesc.res.pitch2D.desc = desc;
// Specify texture object parameters
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.filterMode = hipFilterModePoint;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 1;
CHECK_ERROR(hipCreateTextureObject(image_tex, &resDesc, &texDesc, NULL));
}
__host__ void NormalMap::gpuTransfer(NoriObject ** objects) {
uchar* rgbaData = new uchar[image.total()*4];
cv::Mat image_rgba(image.size(), CV_8UC4, rgbaData);
cv::cvtColor(image, image_rgba, CV_BGR2RGBA, 4);
/*cv::namedWindow("test");
cv::imshow("image", image_rgba);
cv::waitKey();*/
transferImage(&gpu_data, &image_tex, image_rgba);
}
#endif
#ifndef __CUDA_ARCH__
NORI_REGISTER_CLASS(NormalMap, "normal_map")
#endif
NORI_NAMESPACE_END
|
588ebff4a781715220a422ae1589546e9c0cf943.cu
|
#include <nori/normalMap.h>
#include <filesystem/resolver.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
NORI_NAMESPACE_BEGIN
__device__ Vector3f NormalMap::eval(const Point2f& uv, const Frame& frame) {
// rescale and shift
float x = (uv.x()/m_scale.x()) - m_delta.x();
float y = (uv.y()/m_scale.y()) - m_delta.y();
x = x - floor(x);
y = y - floor(y);
float4 rgb = tex2D<float4>(image_tex, y, x);
Vector3f n((2.0f*rgb.x)-1.0f, (2.0f*rgb.y)-1.0f, rgb.z);
n.normalize();
return frame.toWorld(n);
}
#ifndef __CUDA_ARCH__
__host__ NormalMap::NormalMap(const PropertyList &props) {
modifierType = ENormalMap;
m_delta = props.getPoint2("delta", Point2f(0));
m_scale = props.getVector2("scale", Vector2f(1));
filesystem::path filename = getFileResolver()->resolve(props.getString("filename"));
image = cv::imread(filename.str(), CV_LOAD_IMAGE_COLOR); // BGR format
if (!image.data)
{
throw NoriException("Image %s could not be found!", filename); \
}
cout << getSize() << endl;
}
__host__ std::string NormalMap::toString() const {
return tfm::format(
"NormalMap[]");
}
__host__ void NormalMap::transferImage(unsigned char** target, cudaTextureObject_t* image_tex, cv::Mat& img)
{
CHECK_ERROR( cudaMallocPitch(target, &gpu_step, img.elemSize() * img.cols, img.rows));
CHECK_ERROR( cudaMemcpy2D(*target, gpu_step, img.data, img.step, img.cols * img.elemSize(), img.rows, cudaMemcpyHostToDevice));
cudaChannelFormatDesc desc = cudaCreateChannelDesc(8,8,8,8, cudaChannelFormatKindUnsigned);
// Specify texture
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = *target;
resDesc.res.pitch2D.pitchInBytes = gpu_step;
resDesc.res.pitch2D.width = img.cols;
resDesc.res.pitch2D.height = img.rows;
resDesc.res.pitch2D.desc = desc;
// Specify texture object parameters
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModePoint;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 1;
CHECK_ERROR(cudaCreateTextureObject(image_tex, &resDesc, &texDesc, NULL));
}
__host__ void NormalMap::gpuTransfer(NoriObject ** objects) {
uchar* rgbaData = new uchar[image.total()*4];
cv::Mat image_rgba(image.size(), CV_8UC4, rgbaData);
cv::cvtColor(image, image_rgba, CV_BGR2RGBA, 4);
/*cv::namedWindow("test");
cv::imshow("image", image_rgba);
cv::waitKey();*/
transferImage(&gpu_data, &image_tex, image_rgba);
}
#endif
#ifndef __CUDA_ARCH__
NORI_REGISTER_CLASS(NormalMap, "normal_map")
#endif
NORI_NAMESPACE_END
|
0e143399400bea6595113fea5521b5920c6d6126.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "predicate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_array = NULL;
hipMalloc(&d_array, XSIZE*YSIZE);
int d_numberOfElements = 1;
int *d_predicateArray = NULL;
hipMalloc(&d_predicateArray, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
predicate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_numberOfElements,d_predicateArray);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
predicate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_numberOfElements,d_predicateArray);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
predicate), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,d_numberOfElements,d_predicateArray);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0e143399400bea6595113fea5521b5920c6d6126.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "predicate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_array = NULL;
cudaMalloc(&d_array, XSIZE*YSIZE);
int d_numberOfElements = 1;
int *d_predicateArray = NULL;
cudaMalloc(&d_predicateArray, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
predicate<<<gridBlock,threadBlock>>>(d_array,d_numberOfElements,d_predicateArray);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
predicate<<<gridBlock,threadBlock>>>(d_array,d_numberOfElements,d_predicateArray);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
predicate<<<gridBlock,threadBlock>>>(d_array,d_numberOfElements,d_predicateArray);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c6e6f6d6aa2fdd40bb439611af94e11b9798d216.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "count3D.h"
#include "utility.h"
#include <stdio.h>
#include "cuda_check_error.h"
#define TPB 64
__global__ void count3DKernel(float3 *pc, int len, float3 lower, float3 upper, int m, int n, int p,int* counts, aabb* cells,int *mutex)
{
const int t= blockIdx.x*blockDim.x+threadIdx.x;
if(t>=len)
return;
int i= (int)(pc[t].x-lower.x)/(upper.x-lower.x)*m;
int j= (int)(pc[t].y-lower.y)/(upper.y-lower.y)*n;
int k= (int)(pc[t].z-lower.z)/(upper.z-lower.z)*p;
//printf("Thread %2d: point(%f,%f,%f) is in cell(%d,%d,%d)\n", t,pc[t].x,pc[t].y,pc[t].z,i,j,k);
int cell_index=i+j*m+k*m*n;
if(i>=m||j>=n||k>=p)
return;
//mutex
bool leave=true;
while(leave)
{
if (0 == (atomicCAS(&mutex[cell_index],0,1)))
{
counts[cell_index]++;
cells[cell_index].density=cells[cell_index].density+1;
//printf("counts[%d,%d,%d]=%d\n", i,j,k, counts[cell_index]);
//printf("cells[%d,%d,%d].density=%f\n", i,j,k, cells[cell_index].density);
leave=false;
atomicExch(&mutex[cell_index], 0);
}
}
}
//m,n,p x,y,z
void count3D(const std::vector<float3>pc, int m, int n,int p, int *counts, aabb* cells)
{
int len = pc.size();
aabb box=point_cloud_bounds(pc);
box.print();
float3* d_pc;
CudaSafeCall(hipMalloc(&d_pc, len*sizeof(float3)));
CudaSafeCall(hipMemcpy(d_pc, &pc[0], len*sizeof(float3),hipMemcpyHostToDevice));
int* d_counts;
CudaSafeCall(hipMalloc(&d_counts, m*n*p*sizeof(int)));
int blocks=(len+TPB-1)/TPB;
int* mutex;//all threads share on mutex.
CudaSafeCall(hipMallocManaged((void**)&mutex, m*n*p*sizeof(int)));
CudaSafeCall(hipMemset(mutex,0,m*n*p*sizeof(int)));
hipLaunchKernelGGL(( count3DKernel), dim3(blocks), dim3(TPB), 0, 0, d_pc, len, box.min(), box.max(), m,n,p,d_counts,cells, mutex);
CudaCheckError();
CudaSafeCall(hipMemcpy(counts, d_counts, m*n*p*sizeof(int), hipMemcpyDeviceToHost));
CudaSafeCall(hipFree(d_pc));
CudaSafeCall(hipFree(d_counts));
CudaSafeCall(hipFree(mutex));
}
|
c6e6f6d6aa2fdd40bb439611af94e11b9798d216.cu
|
#include "count3D.h"
#include "utility.h"
#include <stdio.h>
#include "cuda_check_error.h"
#define TPB 64
__global__ void count3DKernel(float3 *pc, int len, float3 lower, float3 upper, int m, int n, int p,int* counts, aabb* cells,int *mutex)
{
const int t= blockIdx.x*blockDim.x+threadIdx.x;
if(t>=len)
return;
int i= (int)(pc[t].x-lower.x)/(upper.x-lower.x)*m;
int j= (int)(pc[t].y-lower.y)/(upper.y-lower.y)*n;
int k= (int)(pc[t].z-lower.z)/(upper.z-lower.z)*p;
//printf("Thread %2d: point(%f,%f,%f) is in cell(%d,%d,%d)\n", t,pc[t].x,pc[t].y,pc[t].z,i,j,k);
int cell_index=i+j*m+k*m*n;
if(i>=m||j>=n||k>=p)
return;
//mutex
bool leave=true;
while(leave)
{
if (0 == (atomicCAS(&mutex[cell_index],0,1)))
{
counts[cell_index]++;
cells[cell_index].density=cells[cell_index].density+1;
//printf("counts[%d,%d,%d]=%d\n", i,j,k, counts[cell_index]);
//printf("cells[%d,%d,%d].density=%f\n", i,j,k, cells[cell_index].density);
leave=false;
atomicExch(&mutex[cell_index], 0);
}
}
}
//m,n,p x,y,z上分成的小正方形
void count3D(const std::vector<float3>pc, int m, int n,int p, int *counts, aabb* cells)
{
int len = pc.size();
aabb box=point_cloud_bounds(pc);
box.print();
float3* d_pc;
CudaSafeCall(cudaMalloc(&d_pc, len*sizeof(float3)));
CudaSafeCall(cudaMemcpy(d_pc, &pc[0], len*sizeof(float3),cudaMemcpyHostToDevice));
int* d_counts;
CudaSafeCall(cudaMalloc(&d_counts, m*n*p*sizeof(int)));
int blocks=(len+TPB-1)/TPB;
int* mutex;//all threads share on mutex.
CudaSafeCall(cudaMallocManaged((void**)&mutex, m*n*p*sizeof(int)));
CudaSafeCall(cudaMemset(mutex,0,m*n*p*sizeof(int)));
count3DKernel<<<blocks, TPB>>>(d_pc, len, box.min(), box.max(), m,n,p,d_counts,cells, mutex);
CudaCheckError();
CudaSafeCall(cudaMemcpy(counts, d_counts, m*n*p*sizeof(int), cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(d_pc));
CudaSafeCall(cudaFree(d_counts));
CudaSafeCall(cudaFree(mutex));
}
|
7874281a277158b02ce6f3038eaeab116b290383.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const EllpackDeviceAccessor& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const bst_feature_t> feature_set, // Selected features
DeviceNodeStats node,
xgboost::EllpackDeviceAccessor matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = hipcub::KeyValuePair<int, float>;
using BlockScanT =
hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> host_node_sum_gradients;
dh::caching_device_vector<GradientPair> node_sum_gradients;
bst_uint n_rows;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned_memory;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(page,
n_rows,
batch_param,
param.subsample,
param.sampling_method));
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(host_node_sum_gradients.begin(), host_node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
n_rows = sample.sample_rows;
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
dh::TemporaryArray<DeviceSplitCandidate> d_result_all(nidxs.size());
dh::TemporaryArray<DeviceSplitCandidate> split_candidates_all(nidxs.size()*num_columns);
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
common::Span<bst_feature_t> d_sampled_features =
p_feature_set->DeviceSpan();
common::Span<bst_feature_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
common::Span<DeviceSplitCandidate> d_split_candidates(
split_candidates_all.data().get() + i * num_columns,
d_feature_set.size());
DeviceNodeStats node(host_node_sum_gradients[nidx], nidx, param);
common::Span<DeviceSplitCandidate> d_result(d_result_all.data().get() + i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(hipMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
hipMemcpyHostToDevice));
continue;
}
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]} (
EvaluateSplitKernel<kBlockThreads, GradientSumT>,
hist.GetNodeHistogram(nidx), d_feature_set, node, page->GetDeviceAccessor(device_id),
gpu_param, d_split_candidates, node_value_constraints[nidx],
dh::ToSpan(monotone_constraints));
// Reduce over features to find best feature
size_t cub_bytes = 0;
hipcub::DeviceReduce::Reduce(nullptr,
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
dh::TemporaryArray<char> cub_temp(cub_bytes);
hipcub::DeviceReduce::Reduce(reinterpret_cast<void*>(cub_temp.data().get()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(hipMemcpy(result_all.data(), d_result_all.data().get(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
hipMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
hipMemcpyAsync(node_sum_gradients.data().get(), host_node_sum_gradients.data(),
sizeof(GradientPair) * host_node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess,
left_stats.GetHess(), right_stats.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
host_node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
host_node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer, int64_t num_columns) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
dh::safe_cuda(hipMemcpyAsync(node_sum_gradients.data().get(), &root_sum, sizeof(root_sum),
hipMemcpyHostToDevice));
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients.data().get()),
reinterpret_cast<float*>(node_sum_gradients.data().get()), 2);
reducer->Synchronize();
dh::safe_cuda(hipMemcpyAsync(host_node_sum_gradients.data(),
node_sum_gradients.data().get(), sizeof(GradientPair),
hipMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = host_node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, host_node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
host_node_sum_gradients.resize(param.MaxNodes());
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(hipSetDevice(device_));
maker->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
7874281a277158b02ce6f3038eaeab116b290383.cu
|
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const EllpackDeviceAccessor& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const bst_feature_t> feature_set, // Selected features
DeviceNodeStats node,
xgboost::EllpackDeviceAccessor matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> host_node_sum_gradients;
dh::caching_device_vector<GradientPair> node_sum_gradients;
bst_uint n_rows;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned_memory;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(page,
n_rows,
batch_param,
param.subsample,
param.sampling_method));
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(host_node_sum_gradients.begin(), host_node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
n_rows = sample.sample_rows;
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
dh::TemporaryArray<DeviceSplitCandidate> d_result_all(nidxs.size());
dh::TemporaryArray<DeviceSplitCandidate> split_candidates_all(nidxs.size()*num_columns);
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
common::Span<bst_feature_t> d_sampled_features =
p_feature_set->DeviceSpan();
common::Span<bst_feature_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
common::Span<DeviceSplitCandidate> d_split_candidates(
split_candidates_all.data().get() + i * num_columns,
d_feature_set.size());
DeviceNodeStats node(host_node_sum_gradients[nidx], nidx, param);
common::Span<DeviceSplitCandidate> d_result(d_result_all.data().get() + i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(cudaMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
cudaMemcpyHostToDevice));
continue;
}
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]} (
EvaluateSplitKernel<kBlockThreads, GradientSumT>,
hist.GetNodeHistogram(nidx), d_feature_set, node, page->GetDeviceAccessor(device_id),
gpu_param, d_split_candidates, node_value_constraints[nidx],
dh::ToSpan(monotone_constraints));
// Reduce over features to find best feature
size_t cub_bytes = 0;
cub::DeviceReduce::Reduce(nullptr,
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
dh::TemporaryArray<char> cub_temp(cub_bytes);
cub::DeviceReduce::Reduce(reinterpret_cast<void*>(cub_temp.data().get()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(cudaMemcpy(result_all.data(), d_result_all.data().get(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
cudaMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
cudaMemcpyAsync(node_sum_gradients.data().get(), host_node_sum_gradients.data(),
sizeof(GradientPair) * host_node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess,
left_stats.GetHess(), right_stats.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
host_node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
host_node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer, int64_t num_columns) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
dh::safe_cuda(cudaMemcpyAsync(node_sum_gradients.data().get(), &root_sum, sizeof(root_sum),
cudaMemcpyHostToDevice));
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients.data().get()),
reinterpret_cast<float*>(node_sum_gradients.data().get()), 2);
reducer->Synchronize();
dh::safe_cuda(cudaMemcpyAsync(host_node_sum_gradients.data(),
node_sum_gradients.data().get(), sizeof(GradientPair),
cudaMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = host_node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, host_node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
host_node_sum_gradients.resize(param.MaxNodes());
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(cudaSetDevice(device_));
maker->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
c804b96fa7db46b085031c72752d55ae53b4f312.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "expm.cuh"
#include <cublasLt.h>
#include <stdexcept>
#include <thrust/device_free.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
inline void checkCudaStatus(hipError_t status) {
if (status != hipSuccess) {
printf("cuda API failed with status %d: %s\n",
status,
hipGetErrorString(status));
throw std::logic_error("cuda API failed");
}
}
inline void checkCublasStatus(hipblasStatus_t status) {
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("cuBLAS API failed with status %d\n", status);
throw std::logic_error("cuBLAS API failed");
}
}
inline void checkCuSolverStatus(cusolverStatus_t status) {
if (status != CUSOLVER_STATUS_SUCCESS) {
printf("cuSOLVER API failed with status %d\n", status);
throw std::logic_error("cuSolver API failed");
}
}
inline void print_matrix(const thrust::host_vector<double> &a,
size_t row_size) {
std::cout << "[ ";
for (size_t i = 0; i < row_size; i++) {
if (i != 0) { std::cout << " "; }
for (size_t j = 0; j < row_size; j++) {
std::cout << a[i * row_size + j];
if (j != row_size - 1) {
std::cout << " ";
} else {
if (i == row_size - 1) { std::cout << "]"; }
std::cout << "\n";
}
}
}
std::cout << "\n";
}
template <typename T>
struct identity_matrix_functor : public thrust::unary_function<T, T> {
const size_t _row_size;
__host__ __device__ identity_matrix_functor(size_t row_size) :
_row_size{row_size} {}
__host__ __device__ T operator()(T a) {
size_t i = static_cast<size_t>(a + 0.5);
size_t r = i / _row_size;
size_t c = i % _row_size;
return 1.0 * (r == c);
}
};
template <typename T>
struct scale_functor : public thrust::unary_function<T, T> {
const T _scale;
__host__ __device__ scale_functor(T scale) : _scale{scale} {}
__host__ __device__ T operator()(T x) { return x * _scale; }
};
template <typename T> struct inf_norm {
__host__ __device__ bool operator()(const T &lhs, const T &rhs) {
return fabs(lhs) < fabs(rhs);
}
};
template <typename T> struct saxpy : public thrust::binary_function<T, T, T> {
const T _c;
__host__ __device__ saxpy(T c) : _c{c} {}
__host__ __device__ T operator()(const T &x, const T &y) const {
return x + _c * y;
}
};
CudaRateMatrix::CudaRateMatrix(size_t n) {
_row_size = n;
_matrix_size = _row_size * _row_size;
_workspace_size = 8096;
_A_host.resize(_matrix_size, 0.0);
_eA_host.resize(_matrix_size, 0.0);
_eA_dev.resize(_matrix_size, 0.0);
_A_dev.resize(_matrix_size, 0.0);
_X_dev.resize(_matrix_size, 0.0);
_N_dev.resize(_matrix_size, 0.0);
_D_dev.resize(_matrix_size, 0.0);
_I_dev.resize(_matrix_size, 0.0);
_P_dev.resize(_row_size);
init_I();
_workspace = thrust::device_malloc<uint8_t>(_workspace_size);
_info = thrust::device_malloc<int>(1);
init_cublas();
init_cublaslt();
init_cusolver();
}
CudaRateMatrix::~CudaRateMatrix() { thrust::device_free(_workspace); }
void CudaRateMatrix::set_identity(thrust::device_vector<double> &I) {
thrust::sequence(I.begin(), I.end(), 0);
thrust::transform(I.begin(),
I.end(),
I.begin(),
identity_matrix_functor<double>(_row_size));
}
void CudaRateMatrix::init_I() {
for (size_t i = 0; i < _matrix_size; i += _row_size + 1) { _I_dev[i] = 1.0; }
}
void CudaRateMatrix::init_cublas() {
checkCublasStatus(hipblasCreate(&_blas_handle));
}
void CudaRateMatrix::init_cublaslt() {
checkCublasStatus(cublasLtCreate(&_lthandle));
checkCublasStatus(
cublasLtMatmulDescCreate(&_operation, CUBLAS_COMPUTE_64F, HIP_R_64F));
checkCublasStatus(cublasLtMatmulDescSetAttribute(_operation,
CUBLASLT_MATMUL_DESC_TRANSA,
&_transpose,
sizeof(_transpose)));
checkCublasStatus(cublasLtMatmulDescSetAttribute(_operation,
CUBLASLT_MATMUL_DESC_TRANSB,
&_transpose,
sizeof(_transpose)));
checkCublasStatus(cublasLtMatrixLayoutCreate(
&_layout, // layout structure
HIP_R_64F, // Datatype
_row_size, // rows
_row_size, // cols
_row_size //"leading dim". the number of elements to
// skip to get to the next col. except in
// our case we specify the row size,
// because we are row major
));
checkCublasStatus(cublasLtMatrixLayoutSetAttribute( // set the matrix layout
_layout, // which layout to set
CUBLASLT_MATRIX_LAYOUT_ORDER, // what we are doing
&_row_order, // we are setting to a row major order
sizeof(_row_order) // Size of the attribute
));
checkCublasStatus(cublasLtMatmulPreferenceCreate(&_preference));
checkCublasStatus(cublasLtMatmulPreferenceSetAttribute(
_preference,
CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES,
&_workspace_size,
sizeof(_workspace_size)));
int returned_results = 0;
checkCublasStatus(cublasLtMatmulAlgoGetHeuristic(_lthandle,
_operation,
_layout,
_layout,
_layout,
_layout,
_preference,
1,
&_heuristics,
&returned_results));
if (returned_results == 0) {
throw std::runtime_error{"failed to find an algorithm"};
}
}
void CudaRateMatrix::init_cusolver() {
checkCuSolverStatus(hipsolverDnCreate(&_solve_handle));
int required_work_size = 0;
checkCuSolverStatus(
hipsolverDnDgetrf_bufferSize(_solve_handle,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
&required_work_size));
if (required_work_size > _workspace_size) {
_workspace_size = required_work_size;
thrust::device_free(_workspace);
_workspace = thrust::device_malloc<uint8_t>(_workspace_size);
}
}
void CudaRateMatrix::expm_ss(double t) {
_A_dev = _A_host;
thrust::transform(
_A_dev.begin(), _A_dev.end(), _A_dev.begin(), scale_functor<double>(t));
int scale =
*thrust::max_element(_A_dev.begin(), _A_dev.end(), inf_norm<double>());
scale = ::max(0, 1 + scale);
thrust::transform(_A_dev.begin(),
_A_dev.end(),
_A_dev.begin(),
scale_functor<double>(1 / ::pow(2.0, scale)));
static constexpr int q = 3;
double c = 0.5;
double sign = -1.0;
thrust::transform(_I_dev.begin(),
_I_dev.end(),
_A_dev.begin(),
_N_dev.begin(),
saxpy<double>(c));
thrust::transform(_I_dev.begin(),
_I_dev.end(),
_A_dev.begin(),
_D_dev.begin(),
saxpy<double>(-c));
_X_dev = _A_dev;
static constexpr double alpha = 1.0;
static constexpr double beta = 0.0;
for (int i = 2; i < q; i++) {
c = c * (q - i + 1) / (i * (2 * q - i + 1));
/* X = A * X */
checkCublasStatus(cublasLtMatmul(_lthandle,
_operation,
&alpha,
thrust::raw_pointer_cast(_A_dev.data()),
_layout,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
&beta,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
&_heuristics.algo,
thrust::raw_pointer_cast(_workspace),
_workspace_size,
0));
/* N += c * X */
thrust::transform(_N_dev.begin(),
_N_dev.end(),
_X_dev.begin(),
_N_dev.begin(),
saxpy<double>(c));
sign *= -1.0;
/* D += sign * c * X */
thrust::transform(_D_dev.begin(),
_D_dev.end(),
_X_dev.begin(),
_D_dev.begin(),
saxpy<double>(sign * c));
}
set_identity(_eA_dev);
/* factorize D */
checkCuSolverStatus(
hipsolverDnDgeqrf(_solve_handle,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
(double *)thrust::raw_pointer_cast(_workspace),
thrust::raw_pointer_cast(_P_dev.data()),
thrust::raw_pointer_cast(_info)));
if (*_info != 0) {
throw std::runtime_error{"LU factorization was unsuccsessful"};
}
/*Solve D * A = N */
checkCuSolverStatus(hipsolverDnDgetrs(_solve_handle,
HIPBLAS_OP_N,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
thrust::raw_pointer_cast(_P_dev.data()),
thrust::raw_pointer_cast(_N_dev.data()),
_row_size,
thrust::raw_pointer_cast(_info)));
if (*_info != 0) {
throw std::runtime_error{"LU factorization was unsuccsessful"};
}
for (int i = 0; i < scale; i++) {
/* N *= N */
checkCublasStatus(cublasLtMatmul(_lthandle,
_operation,
&alpha,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
&beta,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
&_heuristics.algo,
thrust::raw_pointer_cast(_workspace),
_workspace_size,
0));
}
_eA_host = _N_dev;
std::cout << "eA dev: " << std::endl;
print_matrix(_eA_host, _row_size);
}
|
c804b96fa7db46b085031c72752d55ae53b4f312.cu
|
#include "expm.cuh"
#include <cublasLt.h>
#include <stdexcept>
#include <thrust/device_free.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
inline void checkCudaStatus(cudaError_t status) {
if (status != cudaSuccess) {
printf("cuda API failed with status %d: %s\n",
status,
cudaGetErrorString(status));
throw std::logic_error("cuda API failed");
}
}
inline void checkCublasStatus(cublasStatus_t status) {
if (status != CUBLAS_STATUS_SUCCESS) {
printf("cuBLAS API failed with status %d\n", status);
throw std::logic_error("cuBLAS API failed");
}
}
inline void checkCuSolverStatus(cusolverStatus_t status) {
if (status != CUSOLVER_STATUS_SUCCESS) {
printf("cuSOLVER API failed with status %d\n", status);
throw std::logic_error("cuSolver API failed");
}
}
inline void print_matrix(const thrust::host_vector<double> &a,
size_t row_size) {
std::cout << "[ ";
for (size_t i = 0; i < row_size; i++) {
if (i != 0) { std::cout << " "; }
for (size_t j = 0; j < row_size; j++) {
std::cout << a[i * row_size + j];
if (j != row_size - 1) {
std::cout << " ";
} else {
if (i == row_size - 1) { std::cout << "]"; }
std::cout << "\n";
}
}
}
std::cout << "\n";
}
template <typename T>
struct identity_matrix_functor : public thrust::unary_function<T, T> {
const size_t _row_size;
__host__ __device__ identity_matrix_functor(size_t row_size) :
_row_size{row_size} {}
__host__ __device__ T operator()(T a) {
size_t i = static_cast<size_t>(a + 0.5);
size_t r = i / _row_size;
size_t c = i % _row_size;
return 1.0 * (r == c);
}
};
template <typename T>
struct scale_functor : public thrust::unary_function<T, T> {
const T _scale;
__host__ __device__ scale_functor(T scale) : _scale{scale} {}
__host__ __device__ T operator()(T x) { return x * _scale; }
};
template <typename T> struct inf_norm {
__host__ __device__ bool operator()(const T &lhs, const T &rhs) {
return fabs(lhs) < fabs(rhs);
}
};
template <typename T> struct saxpy : public thrust::binary_function<T, T, T> {
const T _c;
__host__ __device__ saxpy(T c) : _c{c} {}
__host__ __device__ T operator()(const T &x, const T &y) const {
return x + _c * y;
}
};
CudaRateMatrix::CudaRateMatrix(size_t n) {
_row_size = n;
_matrix_size = _row_size * _row_size;
_workspace_size = 8096;
_A_host.resize(_matrix_size, 0.0);
_eA_host.resize(_matrix_size, 0.0);
_eA_dev.resize(_matrix_size, 0.0);
_A_dev.resize(_matrix_size, 0.0);
_X_dev.resize(_matrix_size, 0.0);
_N_dev.resize(_matrix_size, 0.0);
_D_dev.resize(_matrix_size, 0.0);
_I_dev.resize(_matrix_size, 0.0);
_P_dev.resize(_row_size);
init_I();
_workspace = thrust::device_malloc<uint8_t>(_workspace_size);
_info = thrust::device_malloc<int>(1);
init_cublas();
init_cublaslt();
init_cusolver();
}
CudaRateMatrix::~CudaRateMatrix() { thrust::device_free(_workspace); }
void CudaRateMatrix::set_identity(thrust::device_vector<double> &I) {
thrust::sequence(I.begin(), I.end(), 0);
thrust::transform(I.begin(),
I.end(),
I.begin(),
identity_matrix_functor<double>(_row_size));
}
void CudaRateMatrix::init_I() {
for (size_t i = 0; i < _matrix_size; i += _row_size + 1) { _I_dev[i] = 1.0; }
}
void CudaRateMatrix::init_cublas() {
checkCublasStatus(cublasCreate_v2(&_blas_handle));
}
void CudaRateMatrix::init_cublaslt() {
checkCublasStatus(cublasLtCreate(&_lthandle));
checkCublasStatus(
cublasLtMatmulDescCreate(&_operation, CUBLAS_COMPUTE_64F, CUDA_R_64F));
checkCublasStatus(cublasLtMatmulDescSetAttribute(_operation,
CUBLASLT_MATMUL_DESC_TRANSA,
&_transpose,
sizeof(_transpose)));
checkCublasStatus(cublasLtMatmulDescSetAttribute(_operation,
CUBLASLT_MATMUL_DESC_TRANSB,
&_transpose,
sizeof(_transpose)));
checkCublasStatus(cublasLtMatrixLayoutCreate(
&_layout, // layout structure
CUDA_R_64F, // Datatype
_row_size, // rows
_row_size, // cols
_row_size //"leading dim". the number of elements to
// skip to get to the next col. except in
// our case we specify the row size,
// because we are row major
));
checkCublasStatus(cublasLtMatrixLayoutSetAttribute( // set the matrix layout
_layout, // which layout to set
CUBLASLT_MATRIX_LAYOUT_ORDER, // what we are doing
&_row_order, // we are setting to a row major order
sizeof(_row_order) // Size of the attribute
));
checkCublasStatus(cublasLtMatmulPreferenceCreate(&_preference));
checkCublasStatus(cublasLtMatmulPreferenceSetAttribute(
_preference,
CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES,
&_workspace_size,
sizeof(_workspace_size)));
int returned_results = 0;
checkCublasStatus(cublasLtMatmulAlgoGetHeuristic(_lthandle,
_operation,
_layout,
_layout,
_layout,
_layout,
_preference,
1,
&_heuristics,
&returned_results));
if (returned_results == 0) {
throw std::runtime_error{"failed to find an algorithm"};
}
}
void CudaRateMatrix::init_cusolver() {
checkCuSolverStatus(cusolverDnCreate(&_solve_handle));
int required_work_size = 0;
checkCuSolverStatus(
cusolverDnDgetrf_bufferSize(_solve_handle,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
&required_work_size));
if (required_work_size > _workspace_size) {
_workspace_size = required_work_size;
thrust::device_free(_workspace);
_workspace = thrust::device_malloc<uint8_t>(_workspace_size);
}
}
void CudaRateMatrix::expm_ss(double t) {
_A_dev = _A_host;
thrust::transform(
_A_dev.begin(), _A_dev.end(), _A_dev.begin(), scale_functor<double>(t));
int scale =
*thrust::max_element(_A_dev.begin(), _A_dev.end(), inf_norm<double>());
scale = std::max(0, 1 + scale);
thrust::transform(_A_dev.begin(),
_A_dev.end(),
_A_dev.begin(),
scale_functor<double>(1 / std::pow(2.0, scale)));
static constexpr int q = 3;
double c = 0.5;
double sign = -1.0;
thrust::transform(_I_dev.begin(),
_I_dev.end(),
_A_dev.begin(),
_N_dev.begin(),
saxpy<double>(c));
thrust::transform(_I_dev.begin(),
_I_dev.end(),
_A_dev.begin(),
_D_dev.begin(),
saxpy<double>(-c));
_X_dev = _A_dev;
static constexpr double alpha = 1.0;
static constexpr double beta = 0.0;
for (int i = 2; i < q; i++) {
c = c * (q - i + 1) / (i * (2 * q - i + 1));
/* X = A * X */
checkCublasStatus(cublasLtMatmul(_lthandle,
_operation,
&alpha,
thrust::raw_pointer_cast(_A_dev.data()),
_layout,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
&beta,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
thrust::raw_pointer_cast(_X_dev.data()),
_layout,
&_heuristics.algo,
thrust::raw_pointer_cast(_workspace),
_workspace_size,
0));
/* N += c * X */
thrust::transform(_N_dev.begin(),
_N_dev.end(),
_X_dev.begin(),
_N_dev.begin(),
saxpy<double>(c));
sign *= -1.0;
/* D += sign * c * X */
thrust::transform(_D_dev.begin(),
_D_dev.end(),
_X_dev.begin(),
_D_dev.begin(),
saxpy<double>(sign * c));
}
set_identity(_eA_dev);
/* factorize D */
checkCuSolverStatus(
cusolverDnDgeqrf(_solve_handle,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
(double *)thrust::raw_pointer_cast(_workspace),
thrust::raw_pointer_cast(_P_dev.data()),
thrust::raw_pointer_cast(_info)));
if (*_info != 0) {
throw std::runtime_error{"LU factorization was unsuccsessful"};
}
/*Solve D * A = N */
checkCuSolverStatus(cusolverDnDgetrs(_solve_handle,
CUBLAS_OP_N,
_row_size,
_row_size,
thrust::raw_pointer_cast(_D_dev.data()),
_row_size,
thrust::raw_pointer_cast(_P_dev.data()),
thrust::raw_pointer_cast(_N_dev.data()),
_row_size,
thrust::raw_pointer_cast(_info)));
if (*_info != 0) {
throw std::runtime_error{"LU factorization was unsuccsessful"};
}
for (int i = 0; i < scale; i++) {
/* N *= N */
checkCublasStatus(cublasLtMatmul(_lthandle,
_operation,
&alpha,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
&beta,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
thrust::raw_pointer_cast(_N_dev.data()),
_layout,
&_heuristics.algo,
thrust::raw_pointer_cast(_workspace),
_workspace_size,
0));
}
_eA_host = _N_dev;
std::cout << "eA dev: " << std::endl;
print_matrix(_eA_host, _row_size);
}
|
9a17b2c0a359856bcdda1fd90e19943bf9e33545.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// CUDA runtime
#include <hip/hip_runtime.h>
// includes
#include "helper_functions.h" // helper for shared functions common to CUDA Samples
#include "helper_cuda.h" // helper functions for CUDA error checking and initialization
#include <hip/hip_runtime.h>
#include <memory>
#include <iostream>
#include <cassert>
static const char *sSDKsample = "CUDA Bandwidth Test";
// defines, project
#define MEMCOPY_ITERATIONS 100
#define DEFAULT_SIZE ( 32 * (1e6) ) //32 M
#define DEFAULT_INCREMENT ( 4 * (1e6) ) //4 M
#define CACHE_CLEAR_SIZE ( 16 * (1e6) ) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (64 * (1e6)) //64 M
#define SHMOO_MEMSIZE_START (1e3) //1 KB
#define SHMOO_INCREMENT_1KB (1e3) //1 KB
#define SHMOO_INCREMENT_2KB (2 * 1e3) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1e3)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1e3)) //100 KB
#define SHMOO_INCREMENT_1MB (1e6) //1 MB
#define SHMOO_INCREMENT_2MB (2 * 1e6) //2 MB
#define SHMOO_INCREMENT_4MB (4 * 1e6) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1e3)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1e3)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1e3)) //100 KB
#define SHMOO_LIMIT_1MB (1e6) //1 MB
#define SHMOO_LIMIT_16MB (16 * 1e6) //16 MB
#define SHMOO_LIMIT_32MB (32 * 1e6) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
const char *sMemoryCopyKind[] =
{
"Device to Host",
"Host to Device",
"Device to Device",
NULL
};
const char *sMemoryMode[] =
{
"PINNED",
"PAGEABLE",
NULL
};
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
// set logfile name and start logs
printf("[%s] - Starting...\n", sSDKsample);
int iRetVal = runTest(argc, (const char **)argv);
if (iRetVal < 0)
{
checkCudaErrors(hipSetDevice(0));
}
// finish
printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL");
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PINNED;
//process command line args
if (checkCmdLineFlag(argc, argv, "help"))
{
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, argv, "csv"))
{
printmode = CSV;
}
if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr))
{
if (strcmp(memModeStr, "pageable") == 0)
{
memMode = PAGEABLE;
}
else if (strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return -1000;
}
}
else
{
//default - pinned memory
memMode = PINNED;
}
if (getCmdLineArgumentString(argc, argv, "device", &device))
{
int deviceCount;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("!!!!!No devices found!!!!!\n");
return -2000;
}
if (strcmp(device, "all") == 0)
{
printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if (startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on...\n\n");
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipDeviceProp_t deviceProp;
hipError_t error_id = hipGetDeviceProperties(&deviceProp, currentDevice);
if (error_id == hipSuccess)
{
printf(" Device %d: %s\n", currentDevice, deviceProp.name);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
checkCudaErrors(hipSetDevice(currentDevice));
exit(EXIT_FAILURE);
}
}
else
{
printf("hipGetDeviceProperties returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
checkCudaErrors(hipSetDevice(currentDevice));
exit(EXIT_FAILURE);
}
}
if (getCmdLineArgumentString(argc, argv, "mode", &modeStr))
{
//figure out the mode
if (strcmp(modeStr, "quick") == 0)
{
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
else if (strcmp(modeStr, "shmoo") == 0)
{
printf(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
}
else if (strcmp(modeStr, "range") == 0)
{
printf(" Range Mode\n\n");
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return -3000;
}
}
else
{
//default mode - quick
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if (checkCmdLineFlag(argc, argv, "htod"))
{
htod = true;
}
if (checkCmdLineFlag(argc, argv, "dtoh"))
{
dtoh = true;
}
if (checkCmdLineFlag(argc, argv, "dtod"))
{
dtod = true;
}
#if CUDART_VERSION >= 2020
if (checkCmdLineFlag(argc, argv, "wc"))
{
wc = true;
}
#endif
if (checkCmdLineFlag(argc, argv, "cputiming"))
{
bDontUseGPUTiming = true;
}
if (!htod && !dtoh && !dtod)
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if (RANGE_MODE == mode)
{
if (checkCmdLineFlag(argc, (const char **)argv, "start"))
{
start = getCmdLineArgumentInt(argc, argv, "start");
if (start <= 0)
{
printf("Illegal argument - start must be greater than zero\n");
return -4000;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return -5000;
}
if (checkCmdLineFlag(argc, (const char **)argv, "end"))
{
end = getCmdLineArgumentInt(argc, argv, "end");
if (end <= 0)
{
printf("Illegal argument - end must be greater than zero\n");
return -6000;
}
if (start > end)
{
printf("Illegal argument - start is greater than end\n");
return -7000;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return -8000;
}
if (checkCmdLineFlag(argc, argv, "increment"))
{
increment = getCmdLineArgumentInt(argc, argv, "increment");
if (increment <= 0)
{
printf("Illegal argument - increment must be greater than zero\n");
return -9000;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return -10000;
}
}
if (htod)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if (dtoh)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if (dtod)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice <= endDevice; nDevice++)
{
hipSetDevice(nDevice);
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch (mode)
{
case QUICK_MODE:
testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
{
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//run each of the copies
for (unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch (kind)
{
case DEVICE_TO_HOST:
bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]);
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if (printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
{
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while (memSize <= SHMOO_MEMSIZE_MAX)
{
if (memSize < SHMOO_LIMIT_20KB)
{
memSize += SHMOO_INCREMENT_1KB;
}
else if (memSize < SHMOO_LIMIT_50KB)
{
memSize += SHMOO_INCREMENT_2KB;
}
else if (memSize < SHMOO_LIMIT_100KB)
{
memSize += SHMOO_INCREMENT_10KB;
}
else if (memSize < SHMOO_LIMIT_1MB)
{
memSize += SHMOO_INCREMENT_100KB;
}
else if (memSize < SHMOO_LIMIT_16MB)
{
memSize += SHMOO_INCREMENT_1MB;
}
else if (memSize < SHMOO_LIMIT_32MB)
{
memSize += SHMOO_INCREMENT_2MB;
}
else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch (kind)
{
case DEVICE_TO_HOST:
bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]);
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
//print results
printf("\n");
if (CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
if (PINNED == memMode)
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
#else
checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize));
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize));
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc(memSize);
h_odata = (unsigned char *)malloc(memSize);
if (h_idata == 0 || h_odata == 0)
{
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
}
//initialize the memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char)(i & 0xff);
}
// allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void **) &d_idata, memSize));
//initialize the device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize,
hipMemcpyHostToDevice));
//copy data from GPU to Host
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
if (PINNED == memMode)
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(hipMemcpyAsync(h_odata, d_idata, memSize,
hipMemcpyDeviceToHost, 0));
}
}
else
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(hipMemcpy(h_odata, d_idata, memSize,
hipMemcpyDeviceToHost));
}
}
checkCudaErrors(hipEventRecord(stop, 0));
// make sure GPU has finished copying
checkCudaErrors(hipDeviceSynchronize());
//get the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode)
{
checkCudaErrors(hipHostFree(h_idata));
checkCudaErrors(hipHostFree(h_odata));
}
else
{
free(h_idata);
free(h_odata);
}
checkCudaErrors(hipFree(d_idata));
return bandwidthInGBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
unsigned char *h_odata = NULL;
if (PINNED == memMode)
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize));
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc(memSize);
if (h_odata == 0)
{
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE);
unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE);
if (h_cacheClear1 == 0 || h_cacheClear2 == 0)
{
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char)(i & 0xff);
}
for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char)(i & 0xff);
h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff));
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void **) &d_idata, memSize));
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
//copy host memory to device memory
if (PINNED == memMode)
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(hipMemcpyAsync(d_idata, h_odata, memSize,
hipMemcpyHostToDevice, 0));
}
}
else
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(hipMemcpy(d_idata, h_odata, memSize,
hipMemcpyHostToDevice));
}
}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipDeviceSynchronize());
//total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
sdkResetTimer(&timer);
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode)
{
checkCudaErrors(hipHostFree(h_odata));
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(hipFree(d_idata));
return bandwidthInGBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc(memSize);
if (h_idata == 0)
{
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the host memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char)(i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void **) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors(hipMalloc((void **) &d_odata, memSize));
//initialize memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize,
hipMemcpyHostToDevice));
//run the memcopy
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(hipMemcpy(d_odata, d_idata, memSize,
hipMemcpyDeviceToDevice));
}
checkCudaErrors(hipEventRecord(stop, 0));
//Since device to device memory copies are non-blocking,
//hipDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors(hipDeviceSynchronize());
//get the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (2.0f * memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
sdkDeleteTimer(&timer);
free(h_idata);
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return bandwidthInGBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs);
printf(" %s Memory Transfers\n", sMemoryMode[memMode]);
if (wc)
{
printf(" Write-Combined Memory Writes are Enabled");
}
printf(" Transfer Size (Bytes)\tBandwidth(GB/s)\n");
unsigned int i;
for (i = 0; i < (count - 1); i++)
{
printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE)
{
sConfig += "D2D";
}
else
{
if (kind == DEVICE_TO_HOST)
{
sConfig += "D2H";
}
else if (kind == HOST_TO_DEVICE)
{
sConfig += "H2D";
}
if (memMode == PAGEABLE)
{
sConfig += "-Paged";
}
else if (memMode == PINNED)
{
sConfig += "-Pinned";
if (wc)
{
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for (i = 0; i < count; i++)
{
dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20));
printf("bandwidthTest-%s, Bandwidth = %.1f GB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
9a17b2c0a359856bcdda1fd90e19943bf9e33545.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// CUDA runtime
#include <cuda_runtime.h>
// includes
#include "helper_functions.h" // helper for shared functions common to CUDA Samples
#include "helper_cuda.h" // helper functions for CUDA error checking and initialization
#include <cuda.h>
#include <memory>
#include <iostream>
#include <cassert>
static const char *sSDKsample = "CUDA Bandwidth Test";
// defines, project
#define MEMCOPY_ITERATIONS 100
#define DEFAULT_SIZE ( 32 * (1e6) ) //32 M
#define DEFAULT_INCREMENT ( 4 * (1e6) ) //4 M
#define CACHE_CLEAR_SIZE ( 16 * (1e6) ) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (64 * (1e6)) //64 M
#define SHMOO_MEMSIZE_START (1e3) //1 KB
#define SHMOO_INCREMENT_1KB (1e3) //1 KB
#define SHMOO_INCREMENT_2KB (2 * 1e3) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1e3)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1e3)) //100 KB
#define SHMOO_INCREMENT_1MB (1e6) //1 MB
#define SHMOO_INCREMENT_2MB (2 * 1e6) //2 MB
#define SHMOO_INCREMENT_4MB (4 * 1e6) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1e3)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1e3)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1e3)) //100 KB
#define SHMOO_LIMIT_1MB (1e6) //1 MB
#define SHMOO_LIMIT_16MB (16 * 1e6) //16 MB
#define SHMOO_LIMIT_32MB (32 * 1e6) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
const char *sMemoryCopyKind[] =
{
"Device to Host",
"Host to Device",
"Device to Device",
NULL
};
const char *sMemoryMode[] =
{
"PINNED",
"PAGEABLE",
NULL
};
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
// set logfile name and start logs
printf("[%s] - Starting...\n", sSDKsample);
int iRetVal = runTest(argc, (const char **)argv);
if (iRetVal < 0)
{
checkCudaErrors(cudaSetDevice(0));
}
// finish
printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL");
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PINNED;
//process command line args
if (checkCmdLineFlag(argc, argv, "help"))
{
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, argv, "csv"))
{
printmode = CSV;
}
if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr))
{
if (strcmp(memModeStr, "pageable") == 0)
{
memMode = PAGEABLE;
}
else if (strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
printf("Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return -1000;
}
}
else
{
//default - pinned memory
memMode = PINNED;
}
if (getCmdLineArgumentString(argc, argv, "device", &device))
{
int deviceCount;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("!!!!!No devices found!!!!!\n");
return -2000;
}
if (strcmp(device, "all") == 0)
{
printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if (startDevice >= deviceCount || startDevice < 0)
{
printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
printf("Running on...\n\n");
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaDeviceProp deviceProp;
cudaError_t error_id = cudaGetDeviceProperties(&deviceProp, currentDevice);
if (error_id == cudaSuccess)
{
printf(" Device %d: %s\n", currentDevice, deviceProp.name);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
checkCudaErrors(cudaSetDevice(currentDevice));
exit(EXIT_FAILURE);
}
}
else
{
printf("cudaGetDeviceProperties returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
checkCudaErrors(cudaSetDevice(currentDevice));
exit(EXIT_FAILURE);
}
}
if (getCmdLineArgumentString(argc, argv, "mode", &modeStr))
{
//figure out the mode
if (strcmp(modeStr, "quick") == 0)
{
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
else if (strcmp(modeStr, "shmoo") == 0)
{
printf(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
}
else if (strcmp(modeStr, "range") == 0)
{
printf(" Range Mode\n\n");
mode = RANGE_MODE;
}
else
{
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return -3000;
}
}
else
{
//default mode - quick
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if (checkCmdLineFlag(argc, argv, "htod"))
{
htod = true;
}
if (checkCmdLineFlag(argc, argv, "dtoh"))
{
dtoh = true;
}
if (checkCmdLineFlag(argc, argv, "dtod"))
{
dtod = true;
}
#if CUDART_VERSION >= 2020
if (checkCmdLineFlag(argc, argv, "wc"))
{
wc = true;
}
#endif
if (checkCmdLineFlag(argc, argv, "cputiming"))
{
bDontUseGPUTiming = true;
}
if (!htod && !dtoh && !dtod)
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if (RANGE_MODE == mode)
{
if (checkCmdLineFlag(argc, (const char **)argv, "start"))
{
start = getCmdLineArgumentInt(argc, argv, "start");
if (start <= 0)
{
printf("Illegal argument - start must be greater than zero\n");
return -4000;
}
}
else
{
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return -5000;
}
if (checkCmdLineFlag(argc, (const char **)argv, "end"))
{
end = getCmdLineArgumentInt(argc, argv, "end");
if (end <= 0)
{
printf("Illegal argument - end must be greater than zero\n");
return -6000;
}
if (start > end)
{
printf("Illegal argument - start is greater than end\n");
return -7000;
}
}
else
{
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return -8000;
}
if (checkCmdLineFlag(argc, argv, "increment"))
{
increment = getCmdLineArgumentInt(argc, argv, "increment");
if (increment <= 0)
{
printf("Illegal argument - increment must be greater than zero\n");
return -9000;
}
}
else
{
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return -10000;
}
}
if (htod)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if (dtoh)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if (dtod)
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice <= endDevice; nDevice++)
{
cudaSetDevice(nDevice);
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch (mode)
{
case QUICK_MODE:
testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
{
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//run each of the copies
for (unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch (kind)
{
case DEVICE_TO_HOST:
bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]);
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if (printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
{
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while (memSize <= SHMOO_MEMSIZE_MAX)
{
if (memSize < SHMOO_LIMIT_20KB)
{
memSize += SHMOO_INCREMENT_1KB;
}
else if (memSize < SHMOO_LIMIT_50KB)
{
memSize += SHMOO_INCREMENT_2KB;
}
else if (memSize < SHMOO_LIMIT_100KB)
{
memSize += SHMOO_INCREMENT_10KB;
}
else if (memSize < SHMOO_LIMIT_1MB)
{
memSize += SHMOO_INCREMENT_100KB;
}
else if (memSize < SHMOO_LIMIT_16MB)
{
memSize += SHMOO_INCREMENT_1MB;
}
else if (memSize < SHMOO_LIMIT_32MB)
{
memSize += SHMOO_INCREMENT_2MB;
}
else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch (kind)
{
case DEVICE_TO_HOST:
bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]);
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
//print results
printf("\n");
if (CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
if (PINNED == memMode)
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors(cudaHostAlloc((void **)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
#else
checkCudaErrors(cudaMallocHost((void **)&h_idata, memSize));
checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize));
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc(memSize);
h_odata = (unsigned char *)malloc(memSize);
if (h_idata == 0 || h_odata == 0)
{
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
}
//initialize the memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char)(i & 0xff);
}
// allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void **) &d_idata, memSize));
//initialize the device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize,
cudaMemcpyHostToDevice));
//copy data from GPU to Host
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
if (PINNED == memMode)
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(cudaMemcpyAsync(h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost, 0));
}
}
else
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(cudaMemcpy(h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost));
}
}
checkCudaErrors(cudaEventRecord(stop, 0));
// make sure GPU has finished copying
checkCudaErrors(cudaDeviceSynchronize());
//get the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode)
{
checkCudaErrors(cudaFreeHost(h_idata));
checkCudaErrors(cudaFreeHost(h_odata));
}
else
{
free(h_idata);
free(h_odata);
}
checkCudaErrors(cudaFree(d_idata));
return bandwidthInGBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
unsigned char *h_odata = NULL;
if (PINNED == memMode)
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize));
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc(memSize);
if (h_odata == 0)
{
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE);
unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE);
if (h_cacheClear1 == 0 || h_cacheClear2 == 0)
{
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char)(i & 0xff);
}
for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char)(i & 0xff);
h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff));
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void **) &d_idata, memSize));
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
//copy host memory to device memory
if (PINNED == memMode)
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(cudaMemcpyAsync(d_idata, h_odata, memSize,
cudaMemcpyHostToDevice, 0));
}
}
else
{
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(cudaMemcpy(d_idata, h_odata, memSize,
cudaMemcpyHostToDevice));
}
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaDeviceSynchronize());
//total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
sdkResetTimer(&timer);
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode)
{
checkCudaErrors(cudaFreeHost(h_odata));
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(cudaFree(d_idata));
return bandwidthInGBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInGBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc(memSize);
if (h_idata == 0)
{
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the host memory
for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char)(i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void **) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors(cudaMalloc((void **) &d_odata, memSize));
//initialize memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize,
cudaMemcpyHostToDevice));
//run the memcopy
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors(cudaMemcpy(d_odata, d_idata, memSize,
cudaMemcpyDeviceToDevice));
}
checkCudaErrors(cudaEventRecord(stop, 0));
//Since device to device memory copies are non-blocking,
//cudaDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors(cudaDeviceSynchronize());
//get the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (bDontUseGPUTiming)
{
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in GB/s
double time_s = elapsedTimeInMs / 1e3;
bandwidthInGBs = (2.0f * memSize * (float)MEMCOPY_ITERATIONS) / (double)1e9;
bandwidthInGBs = bandwidthInGBs / time_s;
//clean up memory
sdkDeleteTimer(&timer);
free(h_idata);
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return bandwidthInGBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs);
printf(" %s Memory Transfers\n", sMemoryMode[memMode]);
if (wc)
{
printf(" Write-Combined Memory Writes are Enabled");
}
printf(" Transfer Size (Bytes)\tBandwidth(GB/s)\n");
unsigned int i;
for (i = 0; i < (count - 1); i++)
{
printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE)
{
sConfig += "D2D";
}
else
{
if (kind == DEVICE_TO_HOST)
{
sConfig += "D2H";
}
else if (kind == HOST_TO_DEVICE)
{
sConfig += "H2D";
}
if (memMode == PAGEABLE)
{
sConfig += "-Paged";
}
else if (memMode == PINNED)
{
sConfig += "-Pinned";
if (wc)
{
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for (i = 0; i < count; i++)
{
dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20));
printf("bandwidthTest-%s, Bandwidth = %.1f GB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
printf("Usage: bandwidthTest [OPTION]...\n");
printf("Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
7b6d1f2c7ad2cc085e4830d44fb5d3763885821c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hipLaunchKernelGGL(( hypterm) , dim3(gridconfig), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
}
|
7b6d1f2c7ad2cc085e4830d44fb5d3763885821c.cu
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
2d5b8ba14f37ae3b79a7143f9acd6701ef59fd58.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include <stdint.h>
#include "gpu_hashtable.hpp"
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
hash_size = size; // actual size of hashtable
num_entries = 0; // number of occupied slots
hipMalloc((void **) &hashtable, size * sizeof(entry));
hipMemset(hashtable, KEY_INVALID, size * sizeof(entry));
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
hipFree(hashtable);
}
/* Hash function used by hashtable
*/
__device__ uint32_t hash_func(int data, int limit) {
return ((long)abs(data) * 105359939) % 1685759167 % limit;
}
/* resize function that will be run by GPU
*/
__global__ void resize(GpuHashTable::entry *hashtable, GpuHashTable::entry *new_hash,
int hash_size, int numBucketsReshape) {
/* each thread will copy one element from hashtable to new_hash */
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < hash_size) {
if (hashtable[tid].key == KEY_INVALID)
return;
/* rehash each key */
uint32_t key = hash_func(hashtable[tid].key, numBucketsReshape);
while (true) {
/* find empty slot and add pair */
uint32_t prev = atomicCAS(&new_hash[key].key, KEY_INVALID, hashtable[tid].key);
if (prev == hashtable[tid].key || prev == KEY_INVALID) {
new_hash[key].value = hashtable[tid].value;
break;
}
key++;
key %= numBucketsReshape;
}
}
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
uint32_t block_size = 100;
uint32_t blocks_no = hash_size / block_size;
if (hash_size % block_size)
++blocks_no;
struct entry *new_hash;
/* alloc new hash */
hipMalloc((void **) &new_hash, numBucketsReshape * sizeof(entry));
hipMemset(new_hash, KEY_INVALID, numBucketsReshape * sizeof(entry));
hipLaunchKernelGGL(( resize), dim3(blocks_no), dim3(block_size), 0, 0, hashtable, new_hash, hash_size, numBucketsReshape);
hipDeviceSynchronize();
hipFree(hashtable);
hashtable = new_hash;
hash_size = numBucketsReshape;
}
/* insert function that will be run by GPU
*/
__global__ void insert(GpuHashTable::entry *hashtable, int hash_size,
int *keys, int* values, int numKeys) {
/* each thread will insert one element into hashtable */
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numKeys) {
/* compute hash for key */
uint32_t key = hash_func(keys[tid], hash_size);
while (true) {
/* find empty spot or update value if the key already exists */
uint32_t prev = atomicCAS(&hashtable[key].key, KEY_INVALID, keys[tid]);
if (prev == keys[tid] || prev == KEY_INVALID) {
hashtable[key].value = values[tid];
return;
}
key++;
key %= hash_size;
}
}
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int *new_values;
/* compute number of entries before calling insert in order to perform
* reshape if needed
*/
new_values = getBatch(keys, numKeys);
for (int i = 0; i < numKeys; i++)
if (new_values[i] == KEY_INVALID)
num_entries++;
if ((float)(num_entries) / hash_size >= 0.9)
reshape(num_entries + (int)(0.1 * num_entries));
uint32_t block_size = 100;
uint32_t blocks_no = numKeys / block_size;
if (numKeys % block_size)
++blocks_no;
int *dev_keys = 0;
int *dev_values = 0;
/* alloc memory for GPU and copy keys and values arrays into GPU memory */
hipMalloc((void **) &dev_keys, numKeys * sizeof(int));
hipMalloc((void **) &dev_values, numKeys * sizeof(int));
hipMemcpy(dev_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_values, values, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( insert), dim3(blocks_no), dim3(block_size), 0, 0, hashtable, hash_size, dev_keys, dev_values, numKeys);
hipDeviceSynchronize();
hipFree(dev_keys);
hipFree(dev_values);
free(new_values);
return true;
}
/* get function that will be run by GPU
*/
__global__ void get(GpuHashTable::entry *hashtable, int hash_size,
int *keys, int *values, int numKeys) {
/* each thread will add to the result array one element from hashtable
* corresponding to one key form keys array
*/
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numKeys) {
/* compute hash for key */
uint32_t key = hash_func(keys[tid], hash_size);
while (true) {
if (hashtable[key].key == keys[tid]) {
values[tid] = hashtable[key].value;
break;
}
if (hashtable[key].key == KEY_INVALID) {
values[tid] = KEY_INVALID;
break;
}
key++;
key %= hash_size;
}
}
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int *results = (int *)malloc(numKeys * sizeof(int));
uint32_t block_size = 100;
uint32_t blocks_no = numKeys / block_size;
if (numKeys % block_size)
++blocks_no;
int *dev_keys = 0;
int *dev_values = 0;
/* alloc memory for GPU and copy keys and values arrays into GPU memory */
hipMalloc((void **) &dev_keys, numKeys * sizeof(int));
hipMalloc((void **) &dev_values, numKeys * sizeof(int));
hipMemcpy(dev_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
hipMemset(dev_values, KEY_INVALID, numKeys * sizeof(int));
hipLaunchKernelGGL(( get), dim3(blocks_no), dim3(block_size), 0, 0, hashtable, hash_size, dev_keys, dev_values, numKeys);
hipDeviceSynchronize();
/* copy vallues array from GPU memory into results array (CPU memory) */
hipMemcpy(results, dev_values, numKeys * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_keys);
hipFree(dev_values);
return results;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
return (float)num_entries / hash_size; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#define HASH_DESTROY GpuHashTable.~GpuHashTable();
#include "test_map.cpp"
|
2d5b8ba14f37ae3b79a7143f9acd6701ef59fd58.cu
|
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include <stdint.h>
#include "gpu_hashtable.hpp"
/* INIT HASH
*/
GpuHashTable::GpuHashTable(int size) {
hash_size = size; // actual size of hashtable
num_entries = 0; // number of occupied slots
cudaMalloc((void **) &hashtable, size * sizeof(entry));
cudaMemset(hashtable, KEY_INVALID, size * sizeof(entry));
}
/* DESTROY HASH
*/
GpuHashTable::~GpuHashTable() {
cudaFree(hashtable);
}
/* Hash function used by hashtable
*/
__device__ uint32_t hash_func(int data, int limit) {
return ((long)abs(data) * 105359939) % 1685759167 % limit;
}
/* resize function that will be run by GPU
*/
__global__ void resize(GpuHashTable::entry *hashtable, GpuHashTable::entry *new_hash,
int hash_size, int numBucketsReshape) {
/* each thread will copy one element from hashtable to new_hash */
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < hash_size) {
if (hashtable[tid].key == KEY_INVALID)
return;
/* rehash each key */
uint32_t key = hash_func(hashtable[tid].key, numBucketsReshape);
while (true) {
/* find empty slot and add pair */
uint32_t prev = atomicCAS(&new_hash[key].key, KEY_INVALID, hashtable[tid].key);
if (prev == hashtable[tid].key || prev == KEY_INVALID) {
new_hash[key].value = hashtable[tid].value;
break;
}
key++;
key %= numBucketsReshape;
}
}
}
/* RESHAPE HASH
*/
void GpuHashTable::reshape(int numBucketsReshape) {
uint32_t block_size = 100;
uint32_t blocks_no = hash_size / block_size;
if (hash_size % block_size)
++blocks_no;
struct entry *new_hash;
/* alloc new hash */
cudaMalloc((void **) &new_hash, numBucketsReshape * sizeof(entry));
cudaMemset(new_hash, KEY_INVALID, numBucketsReshape * sizeof(entry));
resize<<<blocks_no, block_size>>>(hashtable, new_hash, hash_size, numBucketsReshape);
cudaDeviceSynchronize();
cudaFree(hashtable);
hashtable = new_hash;
hash_size = numBucketsReshape;
}
/* insert function that will be run by GPU
*/
__global__ void insert(GpuHashTable::entry *hashtable, int hash_size,
int *keys, int* values, int numKeys) {
/* each thread will insert one element into hashtable */
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numKeys) {
/* compute hash for key */
uint32_t key = hash_func(keys[tid], hash_size);
while (true) {
/* find empty spot or update value if the key already exists */
uint32_t prev = atomicCAS(&hashtable[key].key, KEY_INVALID, keys[tid]);
if (prev == keys[tid] || prev == KEY_INVALID) {
hashtable[key].value = values[tid];
return;
}
key++;
key %= hash_size;
}
}
}
/* INSERT BATCH
*/
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
int *new_values;
/* compute number of entries before calling insert in order to perform
* reshape if needed
*/
new_values = getBatch(keys, numKeys);
for (int i = 0; i < numKeys; i++)
if (new_values[i] == KEY_INVALID)
num_entries++;
if ((float)(num_entries) / hash_size >= 0.9)
reshape(num_entries + (int)(0.1 * num_entries));
uint32_t block_size = 100;
uint32_t blocks_no = numKeys / block_size;
if (numKeys % block_size)
++blocks_no;
int *dev_keys = 0;
int *dev_values = 0;
/* alloc memory for GPU and copy keys and values arrays into GPU memory */
cudaMalloc((void **) &dev_keys, numKeys * sizeof(int));
cudaMalloc((void **) &dev_values, numKeys * sizeof(int));
cudaMemcpy(dev_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_values, values, numKeys * sizeof(int), cudaMemcpyHostToDevice);
insert<<<blocks_no, block_size>>>(hashtable, hash_size, dev_keys, dev_values, numKeys);
cudaDeviceSynchronize();
cudaFree(dev_keys);
cudaFree(dev_values);
free(new_values);
return true;
}
/* get function that will be run by GPU
*/
__global__ void get(GpuHashTable::entry *hashtable, int hash_size,
int *keys, int *values, int numKeys) {
/* each thread will add to the result array one element from hashtable
* corresponding to one key form keys array
*/
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numKeys) {
/* compute hash for key */
uint32_t key = hash_func(keys[tid], hash_size);
while (true) {
if (hashtable[key].key == keys[tid]) {
values[tid] = hashtable[key].value;
break;
}
if (hashtable[key].key == KEY_INVALID) {
values[tid] = KEY_INVALID;
break;
}
key++;
key %= hash_size;
}
}
}
/* GET BATCH
*/
int* GpuHashTable::getBatch(int* keys, int numKeys) {
int *results = (int *)malloc(numKeys * sizeof(int));
uint32_t block_size = 100;
uint32_t blocks_no = numKeys / block_size;
if (numKeys % block_size)
++blocks_no;
int *dev_keys = 0;
int *dev_values = 0;
/* alloc memory for GPU and copy keys and values arrays into GPU memory */
cudaMalloc((void **) &dev_keys, numKeys * sizeof(int));
cudaMalloc((void **) &dev_values, numKeys * sizeof(int));
cudaMemcpy(dev_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(dev_values, KEY_INVALID, numKeys * sizeof(int));
get<<<blocks_no, block_size>>>(hashtable, hash_size, dev_keys, dev_values, numKeys);
cudaDeviceSynchronize();
/* copy vallues array from GPU memory into results array (CPU memory) */
cudaMemcpy(results, dev_values, numKeys * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_keys);
cudaFree(dev_values);
return results;
}
/* GET LOAD FACTOR
* num elements / hash total slots elements
*/
float GpuHashTable::loadFactor() {
return (float)num_entries / hash_size; // no larger than 1.0f = 100%
}
/*********************************************************/
#define HASH_INIT GpuHashTable GpuHashTable(1);
#define HASH_RESERVE(size) GpuHashTable.reshape(size);
#define HASH_BATCH_INSERT(keys, values, numKeys) GpuHashTable.insertBatch(keys, values, numKeys)
#define HASH_BATCH_GET(keys, numKeys) GpuHashTable.getBatch(keys, numKeys)
#define HASH_LOAD_FACTOR GpuHashTable.loadFactor()
#define HASH_DESTROY GpuHashTable.~GpuHashTable();
#include "test_map.cpp"
|
48eece5db3e1d3c1b05e72e558b243dba99f1ecb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#define ITER 1000000
__global__
void vecAddKernel(float* d_A, float* d_B, float* d_C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
d_C[i] = d_A[i] + d_B[i];
}
}
void vecAdd(float* h_A, float* h_B, float* h_C, int n)
{
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
hipLaunchKernelGGL(( vecAddKernel), dim3(ceil(n/256.0)), dim3(256), 0, 0, d_A, d_B, d_C, n);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_B);
}
int main()
{
float *h_a, *h_b, *h_c;
h_a = (float *)malloc(ITER * sizeof(float));
h_b = (float *)malloc(ITER * sizeof(float));
h_c = (float *)malloc(ITER * sizeof(float));
for (int i = 0; i < ITER; ++i)
{
h_a[i] = i;
h_b[i] = i;
h_c[i] = i;
}
vecAdd(h_a, h_b, h_c, ITER);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
48eece5db3e1d3c1b05e72e558b243dba99f1ecb.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#define ITER 1000000
__global__
void vecAddKernel(float* d_A, float* d_B, float* d_C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
d_C[i] = d_A[i] + d_B[i];
}
}
void vecAdd(float* h_A, float* h_B, float* h_C, int n)
{
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
vecAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_B);
}
int main()
{
float *h_a, *h_b, *h_c;
h_a = (float *)malloc(ITER * sizeof(float));
h_b = (float *)malloc(ITER * sizeof(float));
h_c = (float *)malloc(ITER * sizeof(float));
for (int i = 0; i < ITER; ++i)
{
h_a[i] = i;
h_b[i] = i;
h_c[i] = i;
}
vecAdd(h_a, h_b, h_c, ITER);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
3f3128409f0fc7b13911688bcfc95515d5c8bc5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (thread_2D_pos.x, thread_2D_pos.y)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//place each color into the proper array
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
int num_filter_bytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(hipMalloc(&d_filter, num_filter_bytes));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, num_filter_bytes, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(1, 1, 1);
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols, numRows, 1);
// Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
3f3128409f0fc7b13911688bcfc95515d5c8bc5e.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float result = 0.f;
//For every value in the filter around the pixel (thread_2D_pos.x, thread_2D_pos.y)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//place each color into the proper array
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
int num_filter_bytes = sizeof(float) * filterWidth * filterWidth;
checkCudaErrors(cudaMalloc(&d_filter, num_filter_bytes));
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, num_filter_bytes, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(1, 1, 1);
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols, numRows, 1);
// Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
01510d3965482dc9c2085625ba76744f845c242b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "book.h"
#include "cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_VOL 1.0f
#define MIN_VOL 0.00001f
#define SPEED 0.25f
struct DataBlock{
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
hipEvent_t start, stop;
float totalTime;
float frames;
};
__global__ void copy_const_kernel(float *iptr, const float *cptr){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if(cptr[offset] != 0){
iptr[offset] = cptr[offset];
}
}
__global__ void blend_kernel(float *outSrc, const float *inSrc){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if(x==0) left++;
if(x==DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y==0) top += DIM;
if (y==DIM - 1) bottom -= DIM;
outSrc[offset] = inSrc[offset] + SPEED * (inSrc[top]
+ inSrc[bottom] + inSrc[right] + inSrc[left]
- 4 * inSrc[offset]);
}
void anim_gpu(DataBlock *d, int ticks){
HANDLE_ERROR(hipEventRecord(d->start, 0) );
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
for(int i=0;i<90;i++){
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, d->dev_inSrc, d->dev_constSrc);
hipLaunchKernelGGL(( blend_kernel), dim3(blocks), dim3(threads), 0, 0, d->dev_outSrc, d->dev_inSrc);
swap(d->dev_inSrc, d->dev_outSrc);
}
hipLaunchKernelGGL(( float_to_color), dim3(blocks), dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(hipMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(d->stop, 0) );
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime +=elapsedTime;
d->frames +=1;
printf("Average time per frame: %3.1f ms\n", d->totalTime / d->frames);
}
void anim_exit(DataBlock *d){
hipFree(d->dev_inSrc);
hipFree(d->dev_outSrc);
hipFree(d->dev_constSrc);
HANDLE_ERROR(hipEventDestroy(d->start) );
HANDLE_ERROR(hipEventDestroy(d->stop) );
}
int main(){
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start, 1) );
HANDLE_ERROR(hipEventCreate(&data.stop, 1) );
HANDLE_ERROR(hipMalloc( (void **) &data.output_bitmap, bitmap.image_size() ));
HANDLE_ERROR(hipMalloc( (void **) &data.dev_inSrc, bitmap.image_size() ));
HANDLE_ERROR(hipMalloc( (void **) &data.dev_outSrc, bitmap.image_size() ));
HANDLE_ERROR(hipMalloc( (void **) &data.dev_constSrc, bitmap.image_size() ));
float *temp = (float *) malloc(bitmap.image_size() );
int i = 0;
for (i= 0; i<DIM * DIM; i++){
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if( (x > 300) && (x < 600) && (y> 310) && (y<610) ){
temp[i] = MAX_VOL;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit( (void (*)(void *, int)) anim_gpu, (void (*)(void *)) anim_exit);
}
|
01510d3965482dc9c2085625ba76744f845c242b.cu
|
#include "cuda.h"
#include "book.h"
#include "cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_VOL 1.0f
#define MIN_VOL 0.00001f
#define SPEED 0.25f
struct DataBlock{
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
cudaEvent_t start, stop;
float totalTime;
float frames;
};
__global__ void copy_const_kernel(float *iptr, const float *cptr){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if(cptr[offset] != 0){
iptr[offset] = cptr[offset];
}
}
__global__ void blend_kernel(float *outSrc, const float *inSrc){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if(x==0) left++;
if(x==DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y==0) top += DIM;
if (y==DIM - 1) bottom -= DIM;
outSrc[offset] = inSrc[offset] + SPEED * (inSrc[top]
+ inSrc[bottom] + inSrc[right] + inSrc[left]
- 4 * inSrc[offset]);
}
void anim_gpu(DataBlock *d, int ticks){
HANDLE_ERROR(cudaEventRecord(d->start, 0) );
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
for(int i=0;i<90;i++){
copy_const_kernel<<<blocks, threads>>>(d->dev_inSrc, d->dev_constSrc);
blend_kernel<<<blocks, threads>>>(d->dev_outSrc, d->dev_inSrc);
swap(d->dev_inSrc, d->dev_outSrc);
}
float_to_color<<<blocks, threads>>> (d->output_bitmap, d->dev_inSrc);
HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(d->stop, 0) );
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime +=elapsedTime;
d->frames +=1;
printf("Average time per frame: %3.1f ms\n", d->totalTime / d->frames);
}
void anim_exit(DataBlock *d){
cudaFree(d->dev_inSrc);
cudaFree(d->dev_outSrc);
cudaFree(d->dev_constSrc);
HANDLE_ERROR(cudaEventDestroy(d->start) );
HANDLE_ERROR(cudaEventDestroy(d->stop) );
}
int main(){
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start, 1) );
HANDLE_ERROR(cudaEventCreate(&data.stop, 1) );
HANDLE_ERROR(cudaMalloc( (void **) &data.output_bitmap, bitmap.image_size() ));
HANDLE_ERROR(cudaMalloc( (void **) &data.dev_inSrc, bitmap.image_size() ));
HANDLE_ERROR(cudaMalloc( (void **) &data.dev_outSrc, bitmap.image_size() ));
HANDLE_ERROR(cudaMalloc( (void **) &data.dev_constSrc, bitmap.image_size() ));
float *temp = (float *) malloc(bitmap.image_size() );
int i = 0;
for (i= 0; i<DIM * DIM; i++){
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if( (x > 300) && (x < 600) && (y> 310) && (y<610) ){
temp[i] = MAX_VOL;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit( (void (*)(void *, int)) anim_gpu, (void (*)(void *)) anim_exit);
}
|
ab1bb0d5a1185c3e760e0dea393ed2f8f06ba21a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/DeviceUtils.cuh>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/block_reduce.cuh>
#include <ATen/native/hip/ForeachFunctors.cuh>
#include <ATen/native/hip/MultiTensorApply.cuh>
namespace at {
namespace native {
template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0>
struct LpNormFunctor {
static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm");
__device__ __forceinline__ void operator() (
int chunk_size,
TensorListMetadata<depth>& tl,
T* output_per_tensor,
const int max_chunks_per_tensor
) {
using opmath_t = typename at::opmath_type<T>;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.numel_for_tensor[tensor_loc];
T* x = (T*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ opmath_t s_vals[512];
opmath_t vals[kILP];
T r_x[kILP];
for (int i = 0; i < kILP; i++) {
vals[i] = opmath_t(0);
r_x[i] = T(0);
}
if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
opmath_t next = static_cast<opmath_t>(r_x[ii]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) {
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
opmath_t next = static_cast<opmath_t>(x[i]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
}
}
auto val = opmath_t(0);
for (int i = 0; i < kILP; i++) {
val += vals[i];
}
auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals);
if (threadIdx.x == 0) {
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
template<typename T, int NormType>
__global__ void lpnorm_cleanup(
T* output_per_tensor,
T* ret_per_tensor,
int max_chunks_per_tensor) {
using opmath_t = typename at::opmath_type<T>;
__shared__ opmath_t vals[512];
T* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
T val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) {
val += output_this_tensor[i];
}
opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals);
if(threadIdx.x == 0) {
ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final);
}
}
// note(mkozuki): Why excluding Int and Complex from fast path
// - Int: at::norm does not support.
// - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different.
std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) {
double p;
if (ord.isIntegral(false)) {
p = ord.to<int64_t>();
} else if (ord.isFloatingPoint()) {
p = ord.to<double>();
} else {
AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float");
}
check_foreach_api_restrictions(tensors);
const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) {
const auto scalar_type = t.scalar_type();
return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type);
});
if (!can_use_fast_route(tensors) ||
has_int_or_complex ||
!(p == static_cast<double>(1) || p == static_cast<double>(2))) {
return foreach_tensor_norm_slow(tensors, ord);
}
const int ntensors = tensors.size();
int max_chunks_per_tensor = -1;
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensors[0][t].numel() + kChunkSize - 1) / kChunkSize;
if(max_chunks_this_tensor > max_chunks_per_tensor) {
max_chunks_per_tensor = max_chunks_this_tensor;
}
}
const auto options = tensors[0].options();
auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options);
auto ret_per_tensor = at::empty({ntensors}, options);
auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()};
if (p == static_cast<double>(1)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 1>(),
output_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 1>), dim3(ntensors), dim3(512), 0, stream,
output_per_tensor.data_ptr<scalar_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else if (p == static_cast<double>(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 2>(),
output_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 2>), dim3(ntensors), dim3(512), 0, stream,
output_per_tensor.data_ptr<scalar_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p);
}
std::vector<Tensor> result;
result.reserve(ntensors);
for (const auto& i : c10::irange(ntensors)) {
result.emplace_back(ret_per_tensor[i]);
}
return result;
}
} // namespace native
} // namespace at
|
ab1bb0d5a1185c3e760e0dea393ed2f8f06ba21a.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/block_reduce.cuh>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
namespace at {
namespace native {
template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0>
struct LpNormFunctor {
static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm");
__device__ __forceinline__ void operator() (
int chunk_size,
TensorListMetadata<depth>& tl,
T* output_per_tensor,
const int max_chunks_per_tensor
) {
using opmath_t = typename at::opmath_type<T>;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.numel_for_tensor[tensor_loc];
T* x = (T*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ opmath_t s_vals[512];
opmath_t vals[kILP];
T r_x[kILP];
for (int i = 0; i < kILP; i++) {
vals[i] = opmath_t(0);
r_x[i] = T(0);
}
if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
opmath_t next = static_cast<opmath_t>(r_x[ii]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) {
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
opmath_t next = static_cast<opmath_t>(x[i]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
}
}
auto val = opmath_t(0);
for (int i = 0; i < kILP; i++) {
val += vals[i];
}
auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals);
if (threadIdx.x == 0) {
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
template<typename T, int NormType>
__global__ void lpnorm_cleanup(
T* output_per_tensor,
T* ret_per_tensor,
int max_chunks_per_tensor) {
using opmath_t = typename at::opmath_type<T>;
__shared__ opmath_t vals[512];
T* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
T val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) {
val += output_this_tensor[i];
}
opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals);
if(threadIdx.x == 0) {
ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final);
}
}
// note(mkozuki): Why excluding Int and Complex from fast path
// - Int: at::norm does not support.
// - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different.
std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) {
double p;
if (ord.isIntegral(false)) {
p = ord.to<int64_t>();
} else if (ord.isFloatingPoint()) {
p = ord.to<double>();
} else {
AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float");
}
check_foreach_api_restrictions(tensors);
const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) {
const auto scalar_type = t.scalar_type();
return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type);
});
if (!can_use_fast_route(tensors) ||
has_int_or_complex ||
!(p == static_cast<double>(1) || p == static_cast<double>(2))) {
return foreach_tensor_norm_slow(tensors, ord);
}
const int ntensors = tensors.size();
int max_chunks_per_tensor = -1;
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensors[0][t].numel() + kChunkSize - 1) / kChunkSize;
if(max_chunks_this_tensor > max_chunks_per_tensor) {
max_chunks_per_tensor = max_chunks_this_tensor;
}
}
const auto options = tensors[0].options();
auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options);
auto ret_per_tensor = at::empty({ntensors}, options);
auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()};
if (p == static_cast<double>(1)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 1>(),
output_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor));
auto stream = at::cuda::getCurrentCUDAStream();
lpnorm_cleanup<scalar_t, 1><<<ntensors, 512, 0, stream>>>(
output_per_tensor.data_ptr<scalar_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else if (p == static_cast<double>(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 2>(),
output_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor));
auto stream = at::cuda::getCurrentCUDAStream();
lpnorm_cleanup<scalar_t, 2><<<ntensors, 512, 0, stream>>>(
output_per_tensor.data_ptr<scalar_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p);
}
std::vector<Tensor> result;
result.reserve(ntensors);
for (const auto& i : c10::irange(ntensors)) {
result.emplace_back(ret_per_tensor[i]);
}
return result;
}
} // namespace native
} // namespace at
|
302b2f7d423c09d538e9158767eef44c43038795.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "NumCpp.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_iterator.h>
#include <math.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <stdio.h>
hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size);
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
nc::NdArray<float> generateDataTestingForCLass(int datapoints, int constraintX, int constraintY, bool garunteedGoodClustering, int numclusters = 3) {
nc::NdArray<float> generatedDataset = nc::empty<float>(nc::Shape(1, 2));
for (int i = 0; i < datapoints; i++) {
if (garunteedGoodClustering) {
// nc::NdArray<float> randomPointX = nc::random::uniform<float>(nc::Shape(1), 0, constraintX);
// nc::NdArray<float> randomPointY = nc::random::uniform<float>(nc::Shape(1), 0, constraintY);
// nc::NdArray<float> randomPoint = nc::append<float>(randomPointX, randomPointY, nc::Axis::NONE);
// nc::take()
// if
// generatedDataset = nc::append<float>(generatedDataset, randomPoint, nc::Axis::ROW);
// nc::norm();
}
else {
nc::NdArray<float> randomPointX = nc::random::uniform<float>(nc::Shape(1), (float)0.0, (float)constraintX);
nc::NdArray<float> randomPointY = nc::random::uniform<float>(nc::Shape(1), (float)0.0, (float)constraintY);
nc::NdArray<float> randomPoint = nc::append<float>(randomPointX, randomPointY, nc::Axis::NONE);
generatedDataset = nc::append<float>(generatedDataset, randomPoint, nc::Axis::ROW);
}
}
generatedDataset = nc::deleteIndices(generatedDataset, 0, nc::Axis::ROW);
return generatedDataset;
}
nc::NdArray<float> euclidianDistanceMatrix(nc::NdArray<float> dataset) {
nc::NdArray<float> xPoints = nc::deleteIndices(dataset, 1, nc::Axis::COL);
nc::NdArray<float> yPoints = nc::deleteIndices(dataset, 0, nc::Axis::COL);
std::pair<nc::NdArray<float>, nc::NdArray<float>> meshpairX = nc::meshgrid(xPoints, xPoints);
std::pair<nc::NdArray<float>, nc::NdArray<float>> meshpairY = nc::meshgrid(yPoints, yPoints);
nc::NdArray<float> xDistances = nc::abs(std::get<0>(meshpairX) - std::get<1>(meshpairX));
nc::NdArray<float> yDistances = nc::abs(std::get<0>(meshpairY) - std::get<1>(meshpairY));
nc::NdArray<float> euclidianDistances = nc::sqrt(nc::power(xDistances, 2) + nc::power(yDistances, 2));
euclidianDistances = nc::replace(euclidianDistances, (float)0.0, (float)999999.9);
return euclidianDistances;
}
nc::NdArray<int> initialClusterAssignment(int datapoints, bool garunteedGoodClustering, int numclusters = 3) {
nc::NdArray<int> clusterAssignment = nc::arange<int>(0, datapoints);
clusterAssignment = clusterAssignment.reshape(datapoints, 1);
nc::NdArray<int> clusterZeros = nc::zeros<int>(datapoints, datapoints - 1);
clusterZeros = nc::where(clusterZeros == 0, -1, -1);
clusterAssignment = nc::append<int>(clusterAssignment, clusterZeros, nc::Axis::COL);
return clusterAssignment;
}
void agglomerativeShortestLinkSequential(int datapoints, int numClusters, nc::NdArray<float> distances, nc::NdArray<int> clusterAssignments) {
//Find minimum distance and record index and value
nc::NdArray<float> distanceAssessment = nc::where(distances > (float)0.0, distances, (float)999999.9);
nc::NdArray<float> min = nc::min(distanceAssessment);
float minValue = min(0, 0);
nc::NdArray<nc::uint32> minIndicies = nc::argmin(distanceAssessment, nc::Axis::NONE);
int minInt = int(minIndicies(0, 0));
//Always cluster left
int row = minInt / datapoints;
int column = minInt % datapoints;
int removal = 0;
int rewrite = 0;
if (row >= column) {
removal = row;
rewrite = column;
}
else {
removal = column;
rewrite = row;
}
//Merges removed columns
nc::NdArray<float> firstMergePointDistances = distances(distances.rSlice(), removal);
nc::NdArray<float> secondMergePointDistances = distances(distances.rSlice(), rewrite);
nc::NdArray<float> mergeSet = nc::stack({ firstMergePointDistances, secondMergePointDistances }, nc::Axis::COL);
mergeSet = nc::amin(mergeSet, nc::Axis::COL);
nc::NdArray<float> mergeSetRow = nc::deleteIndices(mergeSet, removal, nc::Axis::COL);
mergeSetRow = nc::deleteIndices(mergeSetRow, rewrite, nc::Axis::COL);
nc::NdArray<float> negitiveOne = { -1.0 };
mergeSetRow = nc::append<float>(negitiveOne, mergeSetRow, nc::Axis::NONE);
nc::NdArray<float> mergeSetCol = nc::deleteIndices(mergeSetRow, 0, nc::Axis::COL);
//Clusters points together based on min distance calculated
int clustersOG = clusterAssignments.shape().cols;
nc::NdArray<int> clusterZeros = nc::zeros<int>(1, clustersOG);
clusterZeros = nc::where(clusterZeros == 0, -1, -1);
nc::NdArray<int> mergeInClusterOne = clusterAssignments.row(removal);
for (int value : mergeInClusterOne) {
if (value > -1) {
nc::NdArray<int> valueint = { value };
clusterZeros = nc::deleteIndices(clusterZeros, clustersOG - 1, nc::Axis::COL);
clusterZeros = nc::append<int>(valueint, clusterZeros, nc::Axis::COL);
}
}
nc::NdArray<int> mergeInClusterTwo = clusterAssignments.row(rewrite);
for (int value : mergeInClusterTwo) {
if (value > -1) {
nc::NdArray<int> valueint = { value };
clusterZeros = nc::deleteIndices(clusterZeros, clustersOG - 1, nc::Axis::COL);
clusterZeros = nc::append<int>(valueint, clusterZeros, nc::Axis::COL);
}
}
//Remove all values we no longer need because they were in a row or col with min distance
//Replace 2 rows and 2 cols removed with 1 row and col for new cluster
clusterAssignments = nc::deleteIndices(clusterAssignments, removal, nc::Axis::ROW);
clusterAssignments = nc::deleteIndices(clusterAssignments, rewrite, nc::Axis::ROW);
clusterAssignments = nc::append<int>(clusterZeros, clusterAssignments, nc::Axis::ROW);
distances = nc::deleteIndices(distances, removal, nc::Axis::ROW);
distances = nc::deleteIndices(distances, removal, nc::Axis::COL);
distances = nc::deleteIndices(distances, rewrite, nc::Axis::ROW);
distances = nc::deleteIndices(distances, rewrite, nc::Axis::COL);
distances = nc::stack({ mergeSetCol.reshape(datapoints - 2,1), distances }, nc::Axis::COL);
distances = nc::stack({ mergeSetRow, distances }, nc::Axis::ROW);
if (datapoints - 1 > numClusters) {
datapoints = datapoints - 1;
agglomerativeShortestLinkSequential(datapoints, numClusters, distances, clusterAssignments);
}
else {
clusterAssignments.print();
}
}
struct gtz {
__device__ bool operator() (double x) { return x > 0.; }
};
struct delPoint {
__device__ bool operator() (int x) { return (x == 1); }
};
typedef thrust::tuple<int, float> argMinType;
void agglomerativeShortestLinkCuda(int numPoints, int originalNumPoints, int numCluster, float* distancePointer, int* clusterPointer) {
//Convert Distance Vector to Thrust Vector for parallel compuation
//https://github.com/NVIDIA/thrust/
//WARNING: ACTIVELY BUGGED IN NEWEST VERSION OF CUDA
//IF YOU HAVE CUDA 11.0 OR 11.1, THIS WILL NOT WORK
//FOLLOW WORKAROUND HERE:
thrust::device_vector<float> cudaDistanceVector(distancePointer, distancePointer + numPoints * numPoints);
thrust::device_vector<int> cudaClusterVector(clusterPointer, clusterPointer + originalNumPoints * numPoints);
//Find min distance using thrust min element divide and conqour approach on device
thrust::device_ptr<float> CDVPtr = cudaDistanceVector.data();
thrust::device_ptr<int> CCVPtr = cudaClusterVector.data();
thrust::device_vector<float>::iterator minIterator = thrust::min_element(thrust::device, CDVPtr, CDVPtr + cudaDistanceVector.size());
//Get value for index of vector
unsigned int index = minIterator - cudaDistanceVector.begin();
//Transform index into row cloumn data using divide and modulo
//No need for cuda since these are 1 step
unsigned int row = index / numPoints;
unsigned int col = index % numPoints;
//To avoid indexing issues, always remove the rightmost column and downmost row first
//Rename closest index between row and column to 0, named leftIndex
//Rename farthest index between row and column to 0, named rightIndex
//No need for cuda since these are O(1)
unsigned int rightIndex = 0;
unsigned int leftIndex = 0;
if (row >= col) {
rightIndex = row;
leftIndex = col;
}
else {
rightIndex = col;
leftIndex = row;
}
//Declaring keys to delete from distance vector
//Could not find a way to do this more efficiently using thrust in time
//Issue could potentially be solved by setting two thrust sequences and combining them, but order matters
thrust::device_vector<int> deleteKeys(numPoints * numPoints);
for (int i = 0; i < (numPoints * numPoints); i++) {
if (i % numPoints == leftIndex || i / numPoints == leftIndex || i % numPoints == rightIndex || i / numPoints == rightIndex) {
deleteKeys[i] = 1;
}
}
thrust::device_vector<int> deleteKeysLabels(originalNumPoints * (numPoints));
for (int i = 0; i < (originalNumPoints * (numPoints)); i++) {
if (i / originalNumPoints == leftIndex || i / originalNumPoints == rightIndex) {
deleteKeysLabels[i] = 1;
}
}
//Get columns to merge together
thrust::device_vector<float> mergeRowOne(numPoints);
thrust::copy(thrust::device, CDVPtr + rightIndex * numPoints, CDVPtr + (rightIndex * numPoints) + numPoints, mergeRowOne.begin());
thrust::device_vector<float> mergeRowTwo(numPoints);
thrust::copy(thrust::device, CDVPtr + leftIndex * numPoints, CDVPtr + (leftIndex * numPoints) + numPoints, mergeRowTwo.begin());
//Create new vector containing those two columns
mergeRowOne.insert(mergeRowOne.begin() + numPoints, mergeRowTwo.begin(), mergeRowTwo.begin() + numPoints);
//Get min from each column of mergeRowOne, merge into new vector of minimums
//With help from advice on this thread:
//https://stackoverflow.com/questions/17698969/determining-the-least-element-and-its-position-in-each-matrix-column-with-cuda-t/29841094#29841094
thrust::device_vector<float> distanceMinVector(numPoints);
thrust::device_vector<int> distanceMinIndicies(numPoints);
thrust::reduce_by_key(
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0),
thrust::placeholders::_1 / 2),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0),
thrust::placeholders::_1 / 2) + 2 * numPoints,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
mergeRowOne.begin(),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0), (thrust::device, thrust::placeholders::_1 % 2) * numPoints + thrust::placeholders::_1 / 2)),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0), thrust::placeholders::_1 % 2))),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(
thrust::make_tuple(
distanceMinVector.begin(),
distanceMinIndicies.begin())),
thrust::equal_to<int>(),
thrust::minimum<thrust::tuple<float, int> >()
);
//Get clusters to merge together, cant use min column comparison trick here because need all values
thrust::device_vector<int> mergeCRowOne(cudaClusterVector.begin() + (rightIndex * originalNumPoints), cudaClusterVector.begin() + (rightIndex * originalNumPoints) + originalNumPoints);
thrust::device_vector<int> mergeCRowTwo(cudaClusterVector.begin() + (leftIndex * originalNumPoints), cudaClusterVector.begin() + (leftIndex * originalNumPoints) + originalNumPoints);
thrust::device_vector<int> newClusterLabels(originalNumPoints);
int externalCountClustering = 0;
for (int i = 0; i < originalNumPoints; i++) {
if (mergeCRowOne[i] != -1) {
newClusterLabels[externalCountClustering] = ((int) mergeCRowOne[i]);
externalCountClustering++;
}
if (mergeCRowTwo[i] != -1) {
newClusterLabels[externalCountClustering] = ((int) mergeCRowTwo[i]);
externalCountClustering++;
}
}
for (int i = externalCountClustering; i < originalNumPoints; i++) {
newClusterLabels[i] = ((int)-1);
}
//Make new cluster vector
thrust::device_vector<int> cudaClusterVectorNew(originalNumPoints * numPoints-1);
thrust::device_vector<int>::iterator delIteratorLabel = thrust::remove_if(cudaClusterVector.begin(), cudaClusterVector.begin() + cudaClusterVector.size(), deleteKeysLabels.begin(), delPoint());
newClusterLabels.insert(newClusterLabels.begin() + originalNumPoints, cudaClusterVector.begin(), cudaClusterVector.begin() + cudaClusterVector.size() - (2*originalNumPoints));
//Remove the minvalue from minarray of new cluster, top left most value will always be 999999.9 once inserted.
distanceMinVector.erase(distanceMinVector.begin() + rightIndex);
distanceMinVector.erase(distanceMinVector.begin() + leftIndex);
//Delete old clusters from distance vector
thrust::device_vector<float>::iterator delIterator = thrust::remove_if(cudaDistanceVector.begin(), cudaDistanceVector.begin() + cudaDistanceVector.size(), deleteKeys.begin(), delPoint());
//Insert new min row for new cluster into distance matrix
distanceMinVector.insert(distanceMinVector.begin() + numPoints - 2, cudaDistanceVector.begin(), cudaDistanceVector.begin() + cudaDistanceVector.size());
//Creating new vector with for distance
//Fill new vector with data (currently no way to insert column to left that I know of in cuda without a more time complexive method),
//Have to use sequential for loop here, time constraints on project
numPoints = numPoints - 1;
thrust::device_vector<float> cudaDistanceVectorNew((numPoints) * (numPoints));
for (int i = 0; i < ((numPoints) * (numPoints)); i++) {
int rowTemp = i / numPoints;
int colTemp = i % numPoints;
if (colTemp == 0) {
if (rowTemp == 0) {
cudaDistanceVectorNew[i]=((float) 999999.9);
}
else {
cudaDistanceVectorNew[i]=(distanceMinVector[rowTemp-1]);
}
}
else {
cudaDistanceVectorNew[i] = (distanceMinVector[(rowTemp)*(numPoints-1)+(colTemp-1)]);
}
}
//Send to next iteration
float* distanceNewPtr = thrust::raw_pointer_cast(cudaDistanceVectorNew.data());
int* clusterNewPtr = thrust::raw_pointer_cast(newClusterLabels.data());
if (numPoints != numCluster) {
agglomerativeShortestLinkCuda(numPoints, originalNumPoints, numCluster, distanceNewPtr, clusterNewPtr);
}
else {
for (int i = 0; i < numPoints; i++) {
for (int j = 0; j < originalNumPoints; j++) {
std::cout << newClusterLabels[(i*originalNumPoints)+j] << " ";
}
std::cout << "\n";
}
}
}
//Prompts users for dataset generation and then starts clustering on service specified
void setup(bool cuda) {
std::cout << "Enter number of datapoints to generate: ";
int numPoints = -1;
std::cin >> numPoints;
std::cout << "Enter cluster number to stop generating at: ";
int numCluster = -1;
std::cin >> numCluster;
std::cout << "Enter x max (less than 500000): ";
int xMax = -1;
std::cin >> xMax;
std::cout << "Enter y max (less than 500000): ";
int yMax = -1;
std::cin >> yMax;
if (numPoints < 0 || yMax > 500000 || yMax < 0 || xMax > 500000 || xMax < 0) {
std::cout << "Unacceptable Values, try again \n";
return;
}
//Setup data
nc::NdArray<float> dataSet = generateDataTestingForCLass(numPoints, xMax, yMax, false);
nc::NdArray<float> euclidianDistances = euclidianDistanceMatrix(dataSet);
nc::NdArray<int> clusterAssignments = initialClusterAssignment(numPoints, false);
clock_t timer;
std::cout << "\nStarting with euclidian distance matrix: \n";
euclidianDistances.print();
std::cout << "\nStarting with each point in seperate clustering. \n";
if (!cuda) {
std::cout << "\nStarting sequential. \n";
timer = clock();
agglomerativeShortestLinkSequential(numPoints, numCluster, euclidianDistances, clusterAssignments);
float dt = clock() - timer;
std::cout << "took " << dt << " ms \n";
}
else {
//Prepare data for cuda
std::vector<float> distanceVector = euclidianDistances.toStlVector();
std::vector<int> clusterVector = clusterAssignments.toStlVector();
float* distancePointer = distanceVector.data();
int* clusterPointer = clusterVector.data();
//Calling this appearently makes cuda start faster for loading thrust
hipFree(0);
std::cout << "\nStarting CUDA. \n";
timer = clock();
agglomerativeShortestLinkCuda(numPoints, numPoints, numCluster, distancePointer, clusterPointer);
float dt = clock() - timer;
std::cout << "took " << dt << " ms \n";
}
}
//Main GUI loop
int main()
{
bool exitLoop = false;
while (!exitLoop) {
std::cout << "Enter 1 to run sequential, Enter 2 to run parallel, Other key to exit: ";
int option = -1;
std::cin >> option;
if (std::cin.good()) {
if (option == 1) {
setup(false);
}
else if (option == 2) {
setup(true);
}
else {
return 0;
}
}
else {
return 0;
}
}
return 0;
}
|
302b2f7d423c09d538e9158767eef44c43038795.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include "NumCpp.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_iterator.h>
#include <math.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/remove.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <stdio.h>
cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size);
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
nc::NdArray<float> generateDataTestingForCLass(int datapoints, int constraintX, int constraintY, bool garunteedGoodClustering, int numclusters = 3) {
nc::NdArray<float> generatedDataset = nc::empty<float>(nc::Shape(1, 2));
for (int i = 0; i < datapoints; i++) {
if (garunteedGoodClustering) {
// nc::NdArray<float> randomPointX = nc::random::uniform<float>(nc::Shape(1), 0, constraintX);
// nc::NdArray<float> randomPointY = nc::random::uniform<float>(nc::Shape(1), 0, constraintY);
// nc::NdArray<float> randomPoint = nc::append<float>(randomPointX, randomPointY, nc::Axis::NONE);
// nc::take()
// if
// generatedDataset = nc::append<float>(generatedDataset, randomPoint, nc::Axis::ROW);
// nc::norm();
}
else {
nc::NdArray<float> randomPointX = nc::random::uniform<float>(nc::Shape(1), (float)0.0, (float)constraintX);
nc::NdArray<float> randomPointY = nc::random::uniform<float>(nc::Shape(1), (float)0.0, (float)constraintY);
nc::NdArray<float> randomPoint = nc::append<float>(randomPointX, randomPointY, nc::Axis::NONE);
generatedDataset = nc::append<float>(generatedDataset, randomPoint, nc::Axis::ROW);
}
}
generatedDataset = nc::deleteIndices(generatedDataset, 0, nc::Axis::ROW);
return generatedDataset;
}
nc::NdArray<float> euclidianDistanceMatrix(nc::NdArray<float> dataset) {
nc::NdArray<float> xPoints = nc::deleteIndices(dataset, 1, nc::Axis::COL);
nc::NdArray<float> yPoints = nc::deleteIndices(dataset, 0, nc::Axis::COL);
std::pair<nc::NdArray<float>, nc::NdArray<float>> meshpairX = nc::meshgrid(xPoints, xPoints);
std::pair<nc::NdArray<float>, nc::NdArray<float>> meshpairY = nc::meshgrid(yPoints, yPoints);
nc::NdArray<float> xDistances = nc::abs(std::get<0>(meshpairX) - std::get<1>(meshpairX));
nc::NdArray<float> yDistances = nc::abs(std::get<0>(meshpairY) - std::get<1>(meshpairY));
nc::NdArray<float> euclidianDistances = nc::sqrt(nc::power(xDistances, 2) + nc::power(yDistances, 2));
euclidianDistances = nc::replace(euclidianDistances, (float)0.0, (float)999999.9);
return euclidianDistances;
}
nc::NdArray<int> initialClusterAssignment(int datapoints, bool garunteedGoodClustering, int numclusters = 3) {
nc::NdArray<int> clusterAssignment = nc::arange<int>(0, datapoints);
clusterAssignment = clusterAssignment.reshape(datapoints, 1);
nc::NdArray<int> clusterZeros = nc::zeros<int>(datapoints, datapoints - 1);
clusterZeros = nc::where(clusterZeros == 0, -1, -1);
clusterAssignment = nc::append<int>(clusterAssignment, clusterZeros, nc::Axis::COL);
return clusterAssignment;
}
void agglomerativeShortestLinkSequential(int datapoints, int numClusters, nc::NdArray<float> distances, nc::NdArray<int> clusterAssignments) {
//Find minimum distance and record index and value
nc::NdArray<float> distanceAssessment = nc::where(distances > (float)0.0, distances, (float)999999.9);
nc::NdArray<float> min = nc::min(distanceAssessment);
float minValue = min(0, 0);
nc::NdArray<nc::uint32> minIndicies = nc::argmin(distanceAssessment, nc::Axis::NONE);
int minInt = int(minIndicies(0, 0));
//Always cluster left
int row = minInt / datapoints;
int column = minInt % datapoints;
int removal = 0;
int rewrite = 0;
if (row >= column) {
removal = row;
rewrite = column;
}
else {
removal = column;
rewrite = row;
}
//Merges removed columns
nc::NdArray<float> firstMergePointDistances = distances(distances.rSlice(), removal);
nc::NdArray<float> secondMergePointDistances = distances(distances.rSlice(), rewrite);
nc::NdArray<float> mergeSet = nc::stack({ firstMergePointDistances, secondMergePointDistances }, nc::Axis::COL);
mergeSet = nc::amin(mergeSet, nc::Axis::COL);
nc::NdArray<float> mergeSetRow = nc::deleteIndices(mergeSet, removal, nc::Axis::COL);
mergeSetRow = nc::deleteIndices(mergeSetRow, rewrite, nc::Axis::COL);
nc::NdArray<float> negitiveOne = { -1.0 };
mergeSetRow = nc::append<float>(negitiveOne, mergeSetRow, nc::Axis::NONE);
nc::NdArray<float> mergeSetCol = nc::deleteIndices(mergeSetRow, 0, nc::Axis::COL);
//Clusters points together based on min distance calculated
int clustersOG = clusterAssignments.shape().cols;
nc::NdArray<int> clusterZeros = nc::zeros<int>(1, clustersOG);
clusterZeros = nc::where(clusterZeros == 0, -1, -1);
nc::NdArray<int> mergeInClusterOne = clusterAssignments.row(removal);
for (int value : mergeInClusterOne) {
if (value > -1) {
nc::NdArray<int> valueint = { value };
clusterZeros = nc::deleteIndices(clusterZeros, clustersOG - 1, nc::Axis::COL);
clusterZeros = nc::append<int>(valueint, clusterZeros, nc::Axis::COL);
}
}
nc::NdArray<int> mergeInClusterTwo = clusterAssignments.row(rewrite);
for (int value : mergeInClusterTwo) {
if (value > -1) {
nc::NdArray<int> valueint = { value };
clusterZeros = nc::deleteIndices(clusterZeros, clustersOG - 1, nc::Axis::COL);
clusterZeros = nc::append<int>(valueint, clusterZeros, nc::Axis::COL);
}
}
//Remove all values we no longer need because they were in a row or col with min distance
//Replace 2 rows and 2 cols removed with 1 row and col for new cluster
clusterAssignments = nc::deleteIndices(clusterAssignments, removal, nc::Axis::ROW);
clusterAssignments = nc::deleteIndices(clusterAssignments, rewrite, nc::Axis::ROW);
clusterAssignments = nc::append<int>(clusterZeros, clusterAssignments, nc::Axis::ROW);
distances = nc::deleteIndices(distances, removal, nc::Axis::ROW);
distances = nc::deleteIndices(distances, removal, nc::Axis::COL);
distances = nc::deleteIndices(distances, rewrite, nc::Axis::ROW);
distances = nc::deleteIndices(distances, rewrite, nc::Axis::COL);
distances = nc::stack({ mergeSetCol.reshape(datapoints - 2,1), distances }, nc::Axis::COL);
distances = nc::stack({ mergeSetRow, distances }, nc::Axis::ROW);
if (datapoints - 1 > numClusters) {
datapoints = datapoints - 1;
agglomerativeShortestLinkSequential(datapoints, numClusters, distances, clusterAssignments);
}
else {
clusterAssignments.print();
}
}
struct gtz {
__device__ bool operator() (double x) { return x > 0.; }
};
struct delPoint {
__device__ bool operator() (int x) { return (x == 1); }
};
typedef thrust::tuple<int, float> argMinType;
void agglomerativeShortestLinkCuda(int numPoints, int originalNumPoints, int numCluster, float* distancePointer, int* clusterPointer) {
//Convert Distance Vector to Thrust Vector for parallel compuation
//https://github.com/NVIDIA/thrust/
//WARNING: ACTIVELY BUGGED IN NEWEST VERSION OF CUDA
//IF YOU HAVE CUDA 11.0 OR 11.1, THIS WILL NOT WORK
//FOLLOW WORKAROUND HERE:
thrust::device_vector<float> cudaDistanceVector(distancePointer, distancePointer + numPoints * numPoints);
thrust::device_vector<int> cudaClusterVector(clusterPointer, clusterPointer + originalNumPoints * numPoints);
//Find min distance using thrust min element divide and conqour approach on device
thrust::device_ptr<float> CDVPtr = cudaDistanceVector.data();
thrust::device_ptr<int> CCVPtr = cudaClusterVector.data();
thrust::device_vector<float>::iterator minIterator = thrust::min_element(thrust::device, CDVPtr, CDVPtr + cudaDistanceVector.size());
//Get value for index of vector
unsigned int index = minIterator - cudaDistanceVector.begin();
//Transform index into row cloumn data using divide and modulo
//No need for cuda since these are 1 step
unsigned int row = index / numPoints;
unsigned int col = index % numPoints;
//To avoid indexing issues, always remove the rightmost column and downmost row first
//Rename closest index between row and column to 0, named leftIndex
//Rename farthest index between row and column to 0, named rightIndex
//No need for cuda since these are O(1)
unsigned int rightIndex = 0;
unsigned int leftIndex = 0;
if (row >= col) {
rightIndex = row;
leftIndex = col;
}
else {
rightIndex = col;
leftIndex = row;
}
//Declaring keys to delete from distance vector
//Could not find a way to do this more efficiently using thrust in time
//Issue could potentially be solved by setting two thrust sequences and combining them, but order matters
thrust::device_vector<int> deleteKeys(numPoints * numPoints);
for (int i = 0; i < (numPoints * numPoints); i++) {
if (i % numPoints == leftIndex || i / numPoints == leftIndex || i % numPoints == rightIndex || i / numPoints == rightIndex) {
deleteKeys[i] = 1;
}
}
thrust::device_vector<int> deleteKeysLabels(originalNumPoints * (numPoints));
for (int i = 0; i < (originalNumPoints * (numPoints)); i++) {
if (i / originalNumPoints == leftIndex || i / originalNumPoints == rightIndex) {
deleteKeysLabels[i] = 1;
}
}
//Get columns to merge together
thrust::device_vector<float> mergeRowOne(numPoints);
thrust::copy(thrust::device, CDVPtr + rightIndex * numPoints, CDVPtr + (rightIndex * numPoints) + numPoints, mergeRowOne.begin());
thrust::device_vector<float> mergeRowTwo(numPoints);
thrust::copy(thrust::device, CDVPtr + leftIndex * numPoints, CDVPtr + (leftIndex * numPoints) + numPoints, mergeRowTwo.begin());
//Create new vector containing those two columns
mergeRowOne.insert(mergeRowOne.begin() + numPoints, mergeRowTwo.begin(), mergeRowTwo.begin() + numPoints);
//Get min from each column of mergeRowOne, merge into new vector of minimums
//With help from advice on this thread:
//https://stackoverflow.com/questions/17698969/determining-the-least-element-and-its-position-in-each-matrix-column-with-cuda-t/29841094#29841094
thrust::device_vector<float> distanceMinVector(numPoints);
thrust::device_vector<int> distanceMinIndicies(numPoints);
thrust::reduce_by_key(
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0),
thrust::placeholders::_1 / 2),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0),
thrust::placeholders::_1 / 2) + 2 * numPoints,
thrust::make_zip_iterator(
thrust::make_tuple(
thrust::make_permutation_iterator(
mergeRowOne.begin(),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0), (thrust::device, thrust::placeholders::_1 % 2) * numPoints + thrust::placeholders::_1 / 2)),
thrust::make_transform_iterator(
thrust::make_counting_iterator((int)0), thrust::placeholders::_1 % 2))),
thrust::make_discard_iterator(),
thrust::make_zip_iterator(
thrust::make_tuple(
distanceMinVector.begin(),
distanceMinIndicies.begin())),
thrust::equal_to<int>(),
thrust::minimum<thrust::tuple<float, int> >()
);
//Get clusters to merge together, cant use min column comparison trick here because need all values
thrust::device_vector<int> mergeCRowOne(cudaClusterVector.begin() + (rightIndex * originalNumPoints), cudaClusterVector.begin() + (rightIndex * originalNumPoints) + originalNumPoints);
thrust::device_vector<int> mergeCRowTwo(cudaClusterVector.begin() + (leftIndex * originalNumPoints), cudaClusterVector.begin() + (leftIndex * originalNumPoints) + originalNumPoints);
thrust::device_vector<int> newClusterLabels(originalNumPoints);
int externalCountClustering = 0;
for (int i = 0; i < originalNumPoints; i++) {
if (mergeCRowOne[i] != -1) {
newClusterLabels[externalCountClustering] = ((int) mergeCRowOne[i]);
externalCountClustering++;
}
if (mergeCRowTwo[i] != -1) {
newClusterLabels[externalCountClustering] = ((int) mergeCRowTwo[i]);
externalCountClustering++;
}
}
for (int i = externalCountClustering; i < originalNumPoints; i++) {
newClusterLabels[i] = ((int)-1);
}
//Make new cluster vector
thrust::device_vector<int> cudaClusterVectorNew(originalNumPoints * numPoints-1);
thrust::device_vector<int>::iterator delIteratorLabel = thrust::remove_if(cudaClusterVector.begin(), cudaClusterVector.begin() + cudaClusterVector.size(), deleteKeysLabels.begin(), delPoint());
newClusterLabels.insert(newClusterLabels.begin() + originalNumPoints, cudaClusterVector.begin(), cudaClusterVector.begin() + cudaClusterVector.size() - (2*originalNumPoints));
//Remove the minvalue from minarray of new cluster, top left most value will always be 999999.9 once inserted.
distanceMinVector.erase(distanceMinVector.begin() + rightIndex);
distanceMinVector.erase(distanceMinVector.begin() + leftIndex);
//Delete old clusters from distance vector
thrust::device_vector<float>::iterator delIterator = thrust::remove_if(cudaDistanceVector.begin(), cudaDistanceVector.begin() + cudaDistanceVector.size(), deleteKeys.begin(), delPoint());
//Insert new min row for new cluster into distance matrix
distanceMinVector.insert(distanceMinVector.begin() + numPoints - 2, cudaDistanceVector.begin(), cudaDistanceVector.begin() + cudaDistanceVector.size());
//Creating new vector with for distance
//Fill new vector with data (currently no way to insert column to left that I know of in cuda without a more time complexive method),
//Have to use sequential for loop here, time constraints on project
numPoints = numPoints - 1;
thrust::device_vector<float> cudaDistanceVectorNew((numPoints) * (numPoints));
for (int i = 0; i < ((numPoints) * (numPoints)); i++) {
int rowTemp = i / numPoints;
int colTemp = i % numPoints;
if (colTemp == 0) {
if (rowTemp == 0) {
cudaDistanceVectorNew[i]=((float) 999999.9);
}
else {
cudaDistanceVectorNew[i]=(distanceMinVector[rowTemp-1]);
}
}
else {
cudaDistanceVectorNew[i] = (distanceMinVector[(rowTemp)*(numPoints-1)+(colTemp-1)]);
}
}
//Send to next iteration
float* distanceNewPtr = thrust::raw_pointer_cast(cudaDistanceVectorNew.data());
int* clusterNewPtr = thrust::raw_pointer_cast(newClusterLabels.data());
if (numPoints != numCluster) {
agglomerativeShortestLinkCuda(numPoints, originalNumPoints, numCluster, distanceNewPtr, clusterNewPtr);
}
else {
for (int i = 0; i < numPoints; i++) {
for (int j = 0; j < originalNumPoints; j++) {
std::cout << newClusterLabels[(i*originalNumPoints)+j] << " ";
}
std::cout << "\n";
}
}
}
//Prompts users for dataset generation and then starts clustering on service specified
void setup(bool cuda) {
std::cout << "Enter number of datapoints to generate: ";
int numPoints = -1;
std::cin >> numPoints;
std::cout << "Enter cluster number to stop generating at: ";
int numCluster = -1;
std::cin >> numCluster;
std::cout << "Enter x max (less than 500000): ";
int xMax = -1;
std::cin >> xMax;
std::cout << "Enter y max (less than 500000): ";
int yMax = -1;
std::cin >> yMax;
if (numPoints < 0 || yMax > 500000 || yMax < 0 || xMax > 500000 || xMax < 0) {
std::cout << "Unacceptable Values, try again \n";
return;
}
//Setup data
nc::NdArray<float> dataSet = generateDataTestingForCLass(numPoints, xMax, yMax, false);
nc::NdArray<float> euclidianDistances = euclidianDistanceMatrix(dataSet);
nc::NdArray<int> clusterAssignments = initialClusterAssignment(numPoints, false);
clock_t timer;
std::cout << "\nStarting with euclidian distance matrix: \n";
euclidianDistances.print();
std::cout << "\nStarting with each point in seperate clustering. \n";
if (!cuda) {
std::cout << "\nStarting sequential. \n";
timer = clock();
agglomerativeShortestLinkSequential(numPoints, numCluster, euclidianDistances, clusterAssignments);
float dt = clock() - timer;
std::cout << "took " << dt << " ms \n";
}
else {
//Prepare data for cuda
std::vector<float> distanceVector = euclidianDistances.toStlVector();
std::vector<int> clusterVector = clusterAssignments.toStlVector();
float* distancePointer = distanceVector.data();
int* clusterPointer = clusterVector.data();
//Calling this appearently makes cuda start faster for loading thrust
cudaFree(0);
std::cout << "\nStarting CUDA. \n";
timer = clock();
agglomerativeShortestLinkCuda(numPoints, numPoints, numCluster, distancePointer, clusterPointer);
float dt = clock() - timer;
std::cout << "took " << dt << " ms \n";
}
}
//Main GUI loop
int main()
{
bool exitLoop = false;
while (!exitLoop) {
std::cout << "Enter 1 to run sequential, Enter 2 to run parallel, Other key to exit: ";
int option = -1;
std::cin >> option;
if (std::cin.good()) {
if (option == 1) {
setup(false);
}
else if (option == 2) {
setup(true);
}
else {
return 0;
}
}
else {
return 0;
}
}
return 0;
}
|
7802f2b645469b3ee03a99b4ed68bedce26dcbce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MIT License
// Copyright (c) 2021 Mike Gowanlock
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "kernel.h"
#include "structs.h"
#include <math.h>
#include <thrust/execution_policy.h>
#include <thrust/binary_search.h>
#include "params.h"
__device__ void evaluateCellEstimate(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE *epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt, bool differentCell);
__device__ void evaluateCell(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt,int* pointIDKey, int* pointInDistVal, int pointIdx, bool differentCell, unsigned int* nDCellIDs, CTYPE* workCounts);
__device__ void swap(unsigned int* a, unsigned int* b) {
unsigned int temp = *a;
*a = *b;
*b= temp;
}
__device__ void sortCell(unsigned int* list, DTYPE* database, int length, int tid){
bool odd=false;
for(int i=0; i<length; i++) {
for(int j=(tid*2)+(int)odd; j<length-1; j+=32) {
if(database[list[j]*GPUNUMDIM] > database[list[j+1]*GPUNUMDIM]) {
swap(&list[j], &list[j+1]);
}
}
odd = !odd;
}
}
__device__ void seqSortCell(unsigned int* list, DTYPE* database, int length){
int min;
int minIdx;
for(int i=0; i<length-1; i++ ) {
min = database[list[i]*GPUNUMDIM];
minIdx=i;
for(int j=i; j<length; i++) {
if(database[list[j]*GPUNUMDIM] < min) {
min = database[list[j]*GPUNUMDIM];
minIdx = j;
}
}
swap(&list[i], &list[minIdx]);
}
}
__global__ void kernelSortPointsInCells(DTYPE* database, struct grid * index, unsigned int* indexLookupArr, unsigned int nNonEmptyCells) {
int tid = threadIdx.x + (blockIdx.x*BLOCKSIZE);
int warpId = tid/32;
int totalWarps = (gridDim.x*BLOCKSIZE)/32;
int sortDim=0;
if(GPUNUMDIM > NUMINDEXEDDIM)
sortDim = NUMINDEXEDDIM;
for(int i=warpId; i<nNonEmptyCells; i+=totalWarps) {
if(index[i].indexmin < index[i].indexmax) {
sortCell(indexLookupArr+index[i].indexmin, database+sortDim, (index[i].indexmax-index[i].indexmin)+1, threadIdx.x%32);
}
}
}
/////////////////////////////////////////
//THE RESULTS GET GENERATED AS KEY/VALUE PAIRS IN TWO ARRAYS
//KEY- THE POINT ID BEING SEARCHED
//VALUE- A POINT ID WITHIN EPSILON OF THE KEY POINT THAT WAS SEARCHED
//THE RESULTS ARE SORTED IN SITU ON THE DEVICE BY THRUST AFTER THE KERNEL FINISHES
/////////////////////////////////////////
__device__ uint64_t getLinearID_nDimensionsGPU(unsigned int * indexes, unsigned int * dimLen, unsigned int nDimensions) {
uint64_t offset = 0;
uint64_t multiplier = 1;
for (int i = 0; i<nDimensions; i++){
offset += (uint64_t)indexes[i] * multiplier;
multiplier *= dimLen[i];
}
return offset;
}
//This version is the same as the batch estimator
//One query point per GPU thread
// unsigned int *debug1, unsigned int *debug2 ignore, debug values
// unsigned int *N total GPU threads for the kernel
// unsigned int * queryPts -- the Query Points to be searched on the GPU
// unsigned int * offset - This is to offset into every nth data point, e.g., every 100th point calculates its neighbors
// unsigned int *batchNum - The batch number being executed, used to calculate the point being processed
// DTYPE* database The points in the database as 1 array
// DTYPE* epsilon distance threshold
// struct grid * index each non-empty grid cell is one of these, stores the indices into indexLookupArray that coincide with the data points in the database that are in the cell
// unsigned int * indexLookupArr array of the size of database, has the indices of the datapoints in the database stored contiguously for each grid cell. each grid index cell references this
// struct gridCellLookup * gridCellLookupArr, - lookup array to the grid cells, needed to find if a grid cell exists (this is binary searched). Maps the location of the non-empty grid cells in grid * index to their linearized (1-D) array
// DTYPE* minArr The minimum edge of the grid in each dimension
// unsigned int * nCells The total number of cells in each dimension (if all were indexed), can compute the spatial extent, with minArr[0]+nCells[0]*epsilon, in the 1st dimension
// unsigned int * cnt the result set size
// unsigned int * nNonEmptyCells the number of non-empty cells in total, this is the size of the gridCellLookupArr
// int * pointIDKey, int * pointInDistVal - result set to be sorted as key/value pairs
//unsigned int * refPointBeginId -- the id that begins the reference points all ids < the value are normal data points (from the queryPts Array)
__global__ void kernelNDGridIndexGlobal(unsigned int *debug1, unsigned int *debug2, unsigned int *N, unsigned int * queryPts,
unsigned int * offset, unsigned int *batchNum, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr, DTYPE* minArr, unsigned int * nCells, unsigned int * cnt,
unsigned int * nNonEmptyCells, int * pointIDKey, int * pointInDistVal, CTYPE* workCounts, unsigned int * refPointBeginId)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
//original
// unsigned int pointIdx=tid*(*offset)+(*batchNum);
//considering the query point array
unsigned int pointIdx=queryPts[tid*(*offset)+(*batchNum)];
//The offset into the database, taking into consideration the length of each dimension
//original
// unsigned int pointOffset=tid*(GPUNUMDIM)*(*offset)+(*batchNum)*(GPUNUMDIM);
unsigned int pointOffset=pointIdx*(GPUNUMDIM);
//1.5 epsilon for the reference points
DTYPE eps=*epsilon;
if (pointIdx>=(*refPointBeginId))
{
eps=*epsilon*1.5;
// printf("gpu pnt: %u, %f\n",pointIdx,eps);
// unsigned int idx=atomicAdd(debug1,int(1));
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i=0; i<GPUNUMDIM; i++){
point[i]=database[pointOffset+i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i=0; i<NUMINDEXEDDIM; i++){
nDCellIDs[i]=(point[i]-minArr[i])/(*epsilon);
nDMinCellIDs[i]=max(0,nDCellIDs[i]-1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i]=min(nCells[i]-1,nDCellIDs[i]+1); //boundary conditions (don't go beyond the maximum number of cells)
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
// for (loopRng[0]=rangeFilteredCellIdsMin[0]; loopRng[0]<=rangeFilteredCellIdsMax[0]; loopRng[0]++)
// for (loopRng[1]=rangeFilteredCellIdsMin[1]; loopRng[1]<=rangeFilteredCellIdsMax[1]; loopRng[1]++)
for (loopRng[0]=nDMinCellIDs[0]; loopRng[0]<=nDMaxCellIDs[0]; loopRng[0]++)
for (loopRng[1]=nDMinCellIDs[1]; loopRng[1]<=nDMaxCellIDs[1]; loopRng[1]++)
{ //beginning of loop body
for (int x=0; x<NUMINDEXEDDIM; x++){
indexes[x]=loopRng[x];
}
//original
// evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs, workCounts);
//with 1.5 eps for detecting merges if the point is a reference point
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, &eps, index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs, workCounts);
} //end loop body
}
__forceinline__ __device__ void evalPoint(unsigned int* indexLookupArr, int k, DTYPE* database, DTYPE* epsilon, DTYPE* point, unsigned int* cnt, int* pointIDKey, int* pointInDistVal, int pointIdx)
{
DTYPE runningTotalDist=0;
unsigned int dataIdx=indexLookupArr[k];
for (int l=0; l<GPUNUMDIM; l++){
runningTotalDist+=(database[dataIdx*GPUNUMDIM+l]-point[l])*(database[dataIdx*GPUNUMDIM+l]-point[l]);
}
if (sqrt(runningTotalDist)<=(*epsilon)){
unsigned int idx=atomicAdd(cnt,int(1));
pointIDKey[idx]=pointIdx;
pointInDistVal[idx]=dataIdx;
}
}
__device__ void evaluateCell(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt, int* pointIDKey, int* pointInDistVal, int pointIdx, bool differentCell, unsigned int* nDCellIDs, CTYPE* workCounts) {
uint64_t calcLinearID=getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID=calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr+ (*nNonEmptyCells), gridCellLookup(tmp))){
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex=resultBinSearch->idx;
for (int k=index[GridIndex].indexmin; k<=index[GridIndex].indexmax; k++){
evalPoint(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}//end if binary search
}
//Kernel brute forces to generate the neighbor table for each point in the database
__global__ void kernelBruteForce(unsigned int *N, unsigned int *debug1, unsigned int *debug2, DTYPE* epsilon, unsigned long long int * cnt, DTYPE* database, int * pointIDKey, int * pointInDistVal) {
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
int dataOffset=tid*GPUNUMDIM;
DTYPE runningDist=0;
//compare my point to every other point
for (int i=0; i<(*N); i++)
{
runningDist=0;
for (int j=0; j<GPUNUMDIM; j++){
runningDist+=(database[(i*GPUNUMDIM)+j]-database[dataOffset+j])*(database[(i*GPUNUMDIM)+j]-database[dataOffset+j]);
}
//if within epsilon:
if ((sqrt(runningDist))<=(*epsilon)){
atomicAdd(cnt, (unsigned long long int)1);
}
}
return;
}
//Need to use the query points to get a good estimate of the total result set size
__global__ void kernelNDGridIndexBatchEstimator(unsigned int *debug1, unsigned int *debug2, unsigned int *N,
unsigned int * sampleOffset, DTYPE* database, unsigned int *queryPts, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr, DTYPE* minArr, unsigned int * nCells, unsigned int * cnt,
unsigned int * nNonEmptyCells)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
//Added this because of the offset, we may go beyond the end of the array
// if (tid*(*sampleOffset)>=*N)
// {
// return;
// }
//original
// unsigned int pointID=tid*(*sampleOffset)*(GPUNUMDIM);
//considering the query point array
unsigned int pointID=queryPts[tid*(*sampleOffset)];
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset=pointID*(GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i=0; i<GPUNUMDIM; i++){
point[i]=database[pointOffset+i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i=0; i<NUMINDEXEDDIM; i++){
nDCellIDs[i]=(point[i]-minArr[i])/(*epsilon);
nDMinCellIDs[i]=max(0,nDCellIDs[i]-1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i]=min(nCells[i]-1,nDCellIDs[i]+1); //boundary conditions (don't go beyond the maximum number of cells)
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0]=nDMinCellIDs[0]; loopRng[0]<=nDMaxCellIDs[0]; loopRng[0]++)
for (loopRng[1]=nDMinCellIDs[1]; loopRng[1]<=nDMaxCellIDs[1]; loopRng[1]++)
{ //beginning of loop body
for (int x=0; x<NUMINDEXEDDIM; x++){
indexes[x]=loopRng[x];
}
uint64_t calcLinearID=getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID=calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr+ (*nNonEmptyCells), gridCellLookup(tmp))){
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex=resultBinSearch->idx;
for (int k=index[GridIndex].indexmin; k<=index[GridIndex].indexmax; k++){
DTYPE runningTotalDist=0;
unsigned int dataIdx=indexLookupArr[k];
for (int l=0; l<GPUNUMDIM; l++){
runningTotalDist+=(database[dataIdx*GPUNUMDIM+l]-point[l])*(database[dataIdx*GPUNUMDIM+l]-point[l]);
}
if (sqrt(runningTotalDist)<=(*epsilon)){
unsigned int idx=atomicAdd(cnt,int(1));
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
|
7802f2b645469b3ee03a99b4ed68bedce26dcbce.cu
|
// MIT License
// Copyright (c) 2021 Mike Gowanlock
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "kernel.h"
#include "structs.h"
#include <math.h>
#include <thrust/execution_policy.h>
#include <thrust/binary_search.h>
#include "params.h"
__device__ void evaluateCellEstimate(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE *epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt, bool differentCell);
__device__ void evaluateCell(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt,int* pointIDKey, int* pointInDistVal, int pointIdx, bool differentCell, unsigned int* nDCellIDs, CTYPE* workCounts);
__device__ void swap(unsigned int* a, unsigned int* b) {
unsigned int temp = *a;
*a = *b;
*b= temp;
}
__device__ void sortCell(unsigned int* list, DTYPE* database, int length, int tid){
bool odd=false;
for(int i=0; i<length; i++) {
for(int j=(tid*2)+(int)odd; j<length-1; j+=32) {
if(database[list[j]*GPUNUMDIM] > database[list[j+1]*GPUNUMDIM]) {
swap(&list[j], &list[j+1]);
}
}
odd = !odd;
}
}
__device__ void seqSortCell(unsigned int* list, DTYPE* database, int length){
int min;
int minIdx;
for(int i=0; i<length-1; i++ ) {
min = database[list[i]*GPUNUMDIM];
minIdx=i;
for(int j=i; j<length; i++) {
if(database[list[j]*GPUNUMDIM] < min) {
min = database[list[j]*GPUNUMDIM];
minIdx = j;
}
}
swap(&list[i], &list[minIdx]);
}
}
__global__ void kernelSortPointsInCells(DTYPE* database, struct grid * index, unsigned int* indexLookupArr, unsigned int nNonEmptyCells) {
int tid = threadIdx.x + (blockIdx.x*BLOCKSIZE);
int warpId = tid/32;
int totalWarps = (gridDim.x*BLOCKSIZE)/32;
int sortDim=0;
if(GPUNUMDIM > NUMINDEXEDDIM)
sortDim = NUMINDEXEDDIM;
for(int i=warpId; i<nNonEmptyCells; i+=totalWarps) {
if(index[i].indexmin < index[i].indexmax) {
sortCell(indexLookupArr+index[i].indexmin, database+sortDim, (index[i].indexmax-index[i].indexmin)+1, threadIdx.x%32);
}
}
}
/////////////////////////////////////////
//THE RESULTS GET GENERATED AS KEY/VALUE PAIRS IN TWO ARRAYS
//KEY- THE POINT ID BEING SEARCHED
//VALUE- A POINT ID WITHIN EPSILON OF THE KEY POINT THAT WAS SEARCHED
//THE RESULTS ARE SORTED IN SITU ON THE DEVICE BY THRUST AFTER THE KERNEL FINISHES
/////////////////////////////////////////
__device__ uint64_t getLinearID_nDimensionsGPU(unsigned int * indexes, unsigned int * dimLen, unsigned int nDimensions) {
uint64_t offset = 0;
uint64_t multiplier = 1;
for (int i = 0; i<nDimensions; i++){
offset += (uint64_t)indexes[i] * multiplier;
multiplier *= dimLen[i];
}
return offset;
}
//This version is the same as the batch estimator
//One query point per GPU thread
// unsigned int *debug1, unsigned int *debug2 – ignore, debug values
// unsigned int *N – total GPU threads for the kernel
// unsigned int * queryPts -- the Query Points to be searched on the GPU
// unsigned int * offset - This is to offset into every nth data point, e.g., every 100th point calculates its neighbors
// unsigned int *batchNum - The batch number being executed, used to calculate the point being processed
// DTYPE* database – The points in the database as 1 array
// DTYPE* epsilon – distance threshold
// struct grid * index – each non-empty grid cell is one of these, stores the indices into indexLookupArray that coincide with the data points in the database that are in the cell
// unsigned int * indexLookupArr – array of the size of database, has the indices of the datapoints in the database stored contiguously for each grid cell. each grid index cell references this
// struct gridCellLookup * gridCellLookupArr, - lookup array to the grid cells, needed to find if a grid cell exists (this is binary searched). Maps the location of the non-empty grid cells in grid * index to their linearized (1-D) array
// DTYPE* minArr – The minimum “edge” of the grid in each dimension
// unsigned int * nCells –The total number of cells in each dimension (if all were indexed), can compute the spatial extent, with minArr[0]+nCells[0]*epsilon, in the 1st dimension
// unsigned int * cnt – the result set size
// unsigned int * nNonEmptyCells – the number of non-empty cells in total, this is the size of the gridCellLookupArr
// int * pointIDKey, int * pointInDistVal - result set to be sorted as key/value pairs
//unsigned int * refPointBeginId -- the id that begins the reference points all ids < the value are normal data points (from the queryPts Array)
__global__ void kernelNDGridIndexGlobal(unsigned int *debug1, unsigned int *debug2, unsigned int *N, unsigned int * queryPts,
unsigned int * offset, unsigned int *batchNum, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr, DTYPE* minArr, unsigned int * nCells, unsigned int * cnt,
unsigned int * nNonEmptyCells, int * pointIDKey, int * pointInDistVal, CTYPE* workCounts, unsigned int * refPointBeginId)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
//original
// unsigned int pointIdx=tid*(*offset)+(*batchNum);
//considering the query point array
unsigned int pointIdx=queryPts[tid*(*offset)+(*batchNum)];
//The offset into the database, taking into consideration the length of each dimension
//original
// unsigned int pointOffset=tid*(GPUNUMDIM)*(*offset)+(*batchNum)*(GPUNUMDIM);
unsigned int pointOffset=pointIdx*(GPUNUMDIM);
//1.5 epsilon for the reference points
DTYPE eps=*epsilon;
if (pointIdx>=(*refPointBeginId))
{
eps=*epsilon*1.5;
// printf("gpu pnt: %u, %f\n",pointIdx,eps);
// unsigned int idx=atomicAdd(debug1,int(1));
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i=0; i<GPUNUMDIM; i++){
point[i]=database[pointOffset+i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i=0; i<NUMINDEXEDDIM; i++){
nDCellIDs[i]=(point[i]-minArr[i])/(*epsilon);
nDMinCellIDs[i]=max(0,nDCellIDs[i]-1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i]=min(nCells[i]-1,nDCellIDs[i]+1); //boundary conditions (don't go beyond the maximum number of cells)
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
// for (loopRng[0]=rangeFilteredCellIdsMin[0]; loopRng[0]<=rangeFilteredCellIdsMax[0]; loopRng[0]++)
// for (loopRng[1]=rangeFilteredCellIdsMin[1]; loopRng[1]<=rangeFilteredCellIdsMax[1]; loopRng[1]++)
for (loopRng[0]=nDMinCellIDs[0]; loopRng[0]<=nDMaxCellIDs[0]; loopRng[0]++)
for (loopRng[1]=nDMinCellIDs[1]; loopRng[1]<=nDMaxCellIDs[1]; loopRng[1]++)
{ //beginning of loop body
for (int x=0; x<NUMINDEXEDDIM; x++){
indexes[x]=loopRng[x];
}
//original
// evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs, workCounts);
//with 1.5 eps for detecting merges if the point is a reference point
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, &eps, index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs, workCounts);
} //end loop body
}
__forceinline__ __device__ void evalPoint(unsigned int* indexLookupArr, int k, DTYPE* database, DTYPE* epsilon, DTYPE* point, unsigned int* cnt, int* pointIDKey, int* pointInDistVal, int pointIdx)
{
DTYPE runningTotalDist=0;
unsigned int dataIdx=indexLookupArr[k];
for (int l=0; l<GPUNUMDIM; l++){
runningTotalDist+=(database[dataIdx*GPUNUMDIM+l]-point[l])*(database[dataIdx*GPUNUMDIM+l]-point[l]);
}
if (sqrt(runningTotalDist)<=(*epsilon)){
unsigned int idx=atomicAdd(cnt,int(1));
pointIDKey[idx]=pointIdx;
pointInDistVal[idx]=dataIdx;
}
}
__device__ void evaluateCell(unsigned int* nCells, unsigned int* indexes, struct gridCellLookup * gridCellLookupArr, unsigned int* nNonEmptyCells, DTYPE* database, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr, DTYPE* point, unsigned int* cnt, int* pointIDKey, int* pointInDistVal, int pointIdx, bool differentCell, unsigned int* nDCellIDs, CTYPE* workCounts) {
uint64_t calcLinearID=getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID=calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr+ (*nNonEmptyCells), gridCellLookup(tmp))){
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex=resultBinSearch->idx;
for (int k=index[GridIndex].indexmin; k<=index[GridIndex].indexmax; k++){
evalPoint(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}//end if binary search
}
//Kernel brute forces to generate the neighbor table for each point in the database
__global__ void kernelBruteForce(unsigned int *N, unsigned int *debug1, unsigned int *debug2, DTYPE* epsilon, unsigned long long int * cnt, DTYPE* database, int * pointIDKey, int * pointInDistVal) {
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
int dataOffset=tid*GPUNUMDIM;
DTYPE runningDist=0;
//compare my point to every other point
for (int i=0; i<(*N); i++)
{
runningDist=0;
for (int j=0; j<GPUNUMDIM; j++){
runningDist+=(database[(i*GPUNUMDIM)+j]-database[dataOffset+j])*(database[(i*GPUNUMDIM)+j]-database[dataOffset+j]);
}
//if within epsilon:
if ((sqrt(runningDist))<=(*epsilon)){
atomicAdd(cnt, (unsigned long long int)1);
}
}
return;
}
//Need to use the query points to get a good estimate of the total result set size
__global__ void kernelNDGridIndexBatchEstimator(unsigned int *debug1, unsigned int *debug2, unsigned int *N,
unsigned int * sampleOffset, DTYPE* database, unsigned int *queryPts, DTYPE* epsilon, struct grid * index, unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr, DTYPE* minArr, unsigned int * nCells, unsigned int * cnt,
unsigned int * nNonEmptyCells)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
//Added this because of the offset, we may go beyond the end of the array
// if (tid*(*sampleOffset)>=*N)
// {
// return;
// }
//original
// unsigned int pointID=tid*(*sampleOffset)*(GPUNUMDIM);
//considering the query point array
unsigned int pointID=queryPts[tid*(*sampleOffset)];
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset=pointID*(GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i=0; i<GPUNUMDIM; i++){
point[i]=database[pointOffset+i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i=0; i<NUMINDEXEDDIM; i++){
nDCellIDs[i]=(point[i]-minArr[i])/(*epsilon);
nDMinCellIDs[i]=max(0,nDCellIDs[i]-1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i]=min(nCells[i]-1,nDCellIDs[i]+1); //boundary conditions (don't go beyond the maximum number of cells)
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0]=nDMinCellIDs[0]; loopRng[0]<=nDMaxCellIDs[0]; loopRng[0]++)
for (loopRng[1]=nDMinCellIDs[1]; loopRng[1]<=nDMaxCellIDs[1]; loopRng[1]++)
{ //beginning of loop body
for (int x=0; x<NUMINDEXEDDIM; x++){
indexes[x]=loopRng[x];
}
uint64_t calcLinearID=getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID=calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr+ (*nNonEmptyCells), gridCellLookup(tmp))){
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex=resultBinSearch->idx;
for (int k=index[GridIndex].indexmin; k<=index[GridIndex].indexmax; k++){
DTYPE runningTotalDist=0;
unsigned int dataIdx=indexLookupArr[k];
for (int l=0; l<GPUNUMDIM; l++){
runningTotalDist+=(database[dataIdx*GPUNUMDIM+l]-point[l])*(database[dataIdx*GPUNUMDIM+l]-point[l]);
}
if (sqrt(runningTotalDist)<=(*epsilon)){
unsigned int idx=atomicAdd(cnt,int(1));
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
|
6e3ee2b540d4452ea1dd4039abf28a4bc0413e4b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "multilayer.h"
#include "hip/hip_runtime.h"
#include "hipfft.h"
#include "kernels.h"
#include <vector>
#include <iostream>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <assert.h>
#include "stdio.h"
#include "cudaDebug.h"
#include "blur.h"
MultiLayer::MultiLayer(int width,
int height,
std::vector<double> z,
std::vector<double> rconstr,
std::vector<double> iconstr,
double mu,
double dx,
double lambda,
double n) :width(width), height(height), z(z), rconstr(rconstr), iconstr(iconstr), mu(mu)
{
numLayers = (int)z.size();
count = width*height;
m_count = width*height*numLayers;
blur = new Blur();
allocate();
multilayerPropagator(dx, lambda, n);
hipLaunchKernelGGL(( conjugate), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, Hq, Hn);
}
void MultiLayer::allocate(){
hipMalloc(&model, count*sizeof(hipfftDoubleComplex));
hipMalloc(&Hq, m_count*sizeof(hipfftComplex));
hipMalloc(&Hn, m_count*sizeof(hipfftComplex));
hipMalloc(&propagation, m_count*sizeof(hipfftComplex));
hipMalloc(&guess, m_count*sizeof(hipfftDoubleComplex));
hipMalloc(&newGuess, m_count*sizeof(hipfftDoubleComplex));
hipMalloc(&u, m_count*sizeof(hipfftDoubleComplex));
hipMalloc(&temporary, m_count*sizeof(hipfftDoubleComplex));
hipMalloc(&sumArr, 2*N_BLOCKS*sizeof(double));
hipMalloc(&c, sizeof(double));
hipMalloc(&image, count*sizeof(double));
hipMalloc(&Imodel, count*sizeof(double));
hipMalloc(&temporaryf, 2*m_count*sizeof(double));
int n[2] = {height, width};
hipfftPlanMany(&fftPlan, 2, n, NULL, 1, count, NULL, 1, count, HIPFFT_C2C, 2);
}
void MultiLayer::multilayerPropagator(double dx, double lambda, double n){
hipfftComplex *placeHolder;
for(int i = 0; i < numLayers; i++){
placeHolder = &Hq[i*count];
hipLaunchKernelGGL(( propagator), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, width, height, z[i], dx, n, lambda, placeHolder);
}
}
void MultiLayer::propagate(hipfftComplex* kernel, hipfftDoubleComplex* input, hipfftDoubleComplex* out){
hipLaunchKernelGGL(( Z2C), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, input, propagation);
hipfftExecC2C(fftPlan, propagation, propagation, HIPFFT_FORWARD);
hipLaunchKernelGGL(( multiply), dim3(N_BLOCKS), dim3(N_THREADS), 0, 0, m_count, kernel, propagation);
hipfftExecC2C(fftPlan, propagation, propagation, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( C2Z), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, propagation, out);
}
void MultiLayer::calculateCost(double mu, double* model, hipfftDoubleComplex* guess, double* temp, double* out){
hipLaunchKernelGGL(( absolute), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, guess, &temp[m_count]);
hipLaunchKernelGGL(( square), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, model, &temp[count]);
h_sum(m_count, &temp[m_count], sumArr);
h_sum(count, &temp[count], &sumArr[N_BLOCKS]);
hipLaunchKernelGGL(( scalef), dim3(1),dim3(1), 0, 0, 1,mu,sumArr,sumArr);
hipLaunchKernelGGL(( simpleSum), dim3(1),dim3(1), 0, 0, &sumArr[N_BLOCKS],sumArr,&out[0]);
}
void MultiLayer::normalize(int cnt, double* arr){
h_minimum(cnt, arr, sumArr);
double temp;
hipMemcpy(&temp, sumArr, sizeof(double), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( offsetf), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, cnt, -temp, arr, arr, true);
h_maximum(cnt, arr, sumArr);
hipLaunchKernelGGL(( contractf_p), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, cnt, sumArr, arr, arr);
}
void MultiLayer::iterate(double *input, int iters, bool b_cost, bool warm){
// Initialization of variables
s = 1;
if(b_cost){
hipMalloc(&cost, (1+iters)*sizeof(double));
h_cost = (double*)malloc((iters+1)*sizeof(double));
}
//Copying the input image from host to device memory - computationally complex
gpuErrchk(hipMemcpy(image, input, count*sizeof(double), hipMemcpyHostToDevice));
blur->gaussianBlur(width,height, 5, 3, image, temporaryf, image);
//Copying the device memory image to device memory guesses
for(int i = 0; i < numLayers; i++){
hipLaunchKernelGGL(( F2C), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, image, &u[i*count]);
if (!warm)
hipLaunchKernelGGL(( F2C), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, image, &guess[i*count]);
}
for(int iter = 0; iter < iters; iter++){
//Calculating the current iteration model
propagate(Hq, u, temporary);
//Calculation of Imodel and model arrays
hipLaunchKernelGGL(( modelFunc), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, numLayers, 1.0f, 0, temporary, model, Imodel);
//Calculation of the optimal scaling parameter c
h_sumOfProducts(count, image, Imodel, sumArr);
h_sumOfProducts(count, Imodel, Imodel, &sumArr[N_BLOCKS]);
hipLaunchKernelGGL(( contractf_p), dim3(1),dim3(1), 0, 0, 1, &sumArr[N_BLOCKS], sumArr, c);
double t_cost[1];
hipMemcpy(t_cost, c, sizeof(double), hipMemcpyDeviceToHost);
std::cout << "Current optimal scaling factor is " << t_cost[0] << std::endl;
//Cost calculation with sparsity constraint
hipLaunchKernelGGL(( linear), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, c, image, Imodel, temporaryf, false);
if(b_cost){
calculateCost(mu, temporaryf, guess, temporaryf, &cost[iter]);
double t_cost[1];
hipMemcpy(t_cost, &cost[iter], sizeof(double), hipMemcpyDeviceToHost);
std::cout << t_cost[0] << std::endl;
}
//Calculating residues
hipLaunchKernelGGL(( multiplyfc), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, temporaryf, model);
hipLaunchKernelGGL(( extend), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, numLayers, model, temporary);
propagate(Hn, temporary, temporary);
hipLaunchKernelGGL(( F2C), dim3(1),dim3(1), 0, 0, 1,c,newGuess);
hipLaunchKernelGGL(( scale_p), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, newGuess, temporary, temporary);
hipLaunchKernelGGL(( add), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, u, temporary, newGuess, false);
//Applying strict bounds
for(int i = 0 ; i < numLayers ; i++){
hipLaunchKernelGGL(( strictBounds), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, &newGuess[count*i], rconstr[i*2], rconstr[i*2+1], iconstr[i*2], iconstr[i*2+1]);
}
//Applying soft thresholding bounds
hipLaunchKernelGGL(( softBounds), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, newGuess, mu, 0.5f);
double s_new = 0.5*(1+std::sqrt(1+4*s*s));
hipfftDoubleComplex temp = make_cuDoubleComplex((s-1)/s_new,0);
hipLaunchKernelGGL(( add), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, newGuess, guess, temporary, false);
hipLaunchKernelGGL(( scale), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, temp, temporary, temporary);
hipLaunchKernelGGL(( add), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, newGuess, temporary, u, true);
s = s_new;
hipMemcpy(guess, newGuess, m_count*sizeof(hipfftDoubleComplex), hipMemcpyDeviceToDevice);
}
// Final cost calculation
if(b_cost){
propagate(Hq, u, newGuess);
//Calculation of Imodel and model arrays
hipLaunchKernelGGL(( modelFunc), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, numLayers, 1.0f, 0, newGuess, model, Imodel);
//Calculation of the optimal scaling parameter c
h_sumOfProducts(count, image, Imodel, sumArr);
h_sumOfProducts(count, Imodel, Imodel, &sumArr[N_BLOCKS]);
hipLaunchKernelGGL(( contractf_p), dim3(1),dim3(1), 0, 0, 1, &sumArr[N_BLOCKS], sumArr, c);
//Cost calculation with sparsity constraint
hipLaunchKernelGGL(( linear), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, count, c, image, Imodel, temporaryf, false);
calculateCost(mu, temporaryf, guess, temporaryf, &cost[iters]);
double t_cost[1];
hipMemcpy(t_cost, &cost[iters], sizeof(double), hipMemcpyDeviceToHost);
std::cout << t_cost[0] << std::endl;
gpuErrchk(hipMemcpy(h_cost, cost, (iters+1)*sizeof(double), hipMemcpyDeviceToHost));
hipFree(cost);
}
// Moving results to host memory
// Adding one to get the light wavefront (otherwise we only have the disturbance by the particles and electrodes)
hipLaunchKernelGGL(( offset), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count, 1.0f, 0.0f, guess, temporary);
// Check if any error occured - important to note that untested kernels can lead to exceptions at hipMemcpy calls
gpuErrchk(hipPeekAtLastError());
}
void MultiLayer::update(uint8_t* modulus, uint8_t* phase){
// temporary contains the latest results in complex form
// Processing the modulus of both layers
hipLaunchKernelGGL(( absolute), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count,temporary,temporaryf);
for(int i = 0; i < numLayers; i++){
normalize(count, &temporaryf[i*count]);
}
hipLaunchKernelGGL(( D2u8), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count,temporaryf,modulus);
// Processing the phase of both layers
hipLaunchKernelGGL(( angle), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count,temporary,temporaryf);
for(int i = 0; i < numLayers; i++){
normalize(count, &temporaryf[i*count]);
}
hipLaunchKernelGGL(( D2u8), dim3(N_BLOCKS),dim3(N_THREADS), 0, 0, m_count,temporaryf,phase);
}
MultiLayer::~MultiLayer(){
hipFree(Hq);
hipFree(Hn);
hipFree(temporary);
hipFree(image);
hipFree(model);
hipFree(guess);
hipFree(newGuess);
hipFree(u);
hipFree(temporaryf);
hipFree(c);
hipFree(propagation);
hipfftDestroy(fftPlan);
}
|
6e3ee2b540d4452ea1dd4039abf28a4bc0413e4b.cu
|
#include "multilayer.h"
#include "cuda.h"
#include "cufft.h"
#include "kernels.h"
#include <vector>
#include <iostream>
#include <math.h>
#include <cuda_runtime_api.h>
#include <assert.h>
#include "stdio.h"
#include "cudaDebug.h"
#include "blur.h"
MultiLayer::MultiLayer(int width,
int height,
std::vector<double> z,
std::vector<double> rconstr,
std::vector<double> iconstr,
double mu,
double dx,
double lambda,
double n) :width(width), height(height), z(z), rconstr(rconstr), iconstr(iconstr), mu(mu)
{
numLayers = (int)z.size();
count = width*height;
m_count = width*height*numLayers;
blur = new Blur();
allocate();
multilayerPropagator(dx, lambda, n);
conjugate<<<N_BLOCKS,N_THREADS>>>(m_count, Hq, Hn);
}
void MultiLayer::allocate(){
cudaMalloc(&model, count*sizeof(cufftDoubleComplex));
cudaMalloc(&Hq, m_count*sizeof(cufftComplex));
cudaMalloc(&Hn, m_count*sizeof(cufftComplex));
cudaMalloc(&propagation, m_count*sizeof(cufftComplex));
cudaMalloc(&guess, m_count*sizeof(cufftDoubleComplex));
cudaMalloc(&newGuess, m_count*sizeof(cufftDoubleComplex));
cudaMalloc(&u, m_count*sizeof(cufftDoubleComplex));
cudaMalloc(&temporary, m_count*sizeof(cufftDoubleComplex));
cudaMalloc(&sumArr, 2*N_BLOCKS*sizeof(double));
cudaMalloc(&c, sizeof(double));
cudaMalloc(&image, count*sizeof(double));
cudaMalloc(&Imodel, count*sizeof(double));
cudaMalloc(&temporaryf, 2*m_count*sizeof(double));
int n[2] = {height, width};
cufftPlanMany(&fftPlan, 2, n, NULL, 1, count, NULL, 1, count, CUFFT_C2C, 2);
}
void MultiLayer::multilayerPropagator(double dx, double lambda, double n){
cufftComplex *placeHolder;
for(int i = 0; i < numLayers; i++){
placeHolder = &Hq[i*count];
propagator<<<N_BLOCKS,N_THREADS>>>(width, height, z[i], dx, n, lambda, placeHolder);
}
}
void MultiLayer::propagate(cufftComplex* kernel, cufftDoubleComplex* input, cufftDoubleComplex* out){
Z2C<<<N_BLOCKS,N_THREADS>>>(m_count, input, propagation);
cufftExecC2C(fftPlan, propagation, propagation, CUFFT_FORWARD);
multiply<<<N_BLOCKS, N_THREADS>>>(m_count, kernel, propagation);
cufftExecC2C(fftPlan, propagation, propagation, CUFFT_INVERSE);
C2Z<<<N_BLOCKS,N_THREADS>>>(m_count, propagation, out);
}
void MultiLayer::calculateCost(double mu, double* model, cufftDoubleComplex* guess, double* temp, double* out){
absolute<<<N_BLOCKS,N_THREADS>>>(m_count, guess, &temp[m_count]);
square<<<N_BLOCKS,N_THREADS>>>(count, model, &temp[count]);
h_sum(m_count, &temp[m_count], sumArr);
h_sum(count, &temp[count], &sumArr[N_BLOCKS]);
scalef<<<1,1>>>(1,mu,sumArr,sumArr);
simpleSum<<<1,1>>>(&sumArr[N_BLOCKS],sumArr,&out[0]);
}
void MultiLayer::normalize(int cnt, double* arr){
h_minimum(cnt, arr, sumArr);
double temp;
cudaMemcpy(&temp, sumArr, sizeof(double), cudaMemcpyDeviceToHost);
offsetf<<<N_BLOCKS,N_THREADS>>>(cnt, -temp, arr, arr, true);
h_maximum(cnt, arr, sumArr);
contractf_p<<<N_BLOCKS,N_THREADS>>>(cnt, sumArr, arr, arr);
}
void MultiLayer::iterate(double *input, int iters, bool b_cost, bool warm){
// Initialization of variables
s = 1;
if(b_cost){
cudaMalloc(&cost, (1+iters)*sizeof(double));
h_cost = (double*)malloc((iters+1)*sizeof(double));
}
//Copying the input image from host to device memory - computationally complex
gpuErrchk(cudaMemcpy(image, input, count*sizeof(double), cudaMemcpyHostToDevice));
blur->gaussianBlur(width,height, 5, 3, image, temporaryf, image);
//Copying the device memory image to device memory guesses
for(int i = 0; i < numLayers; i++){
F2C<<<N_BLOCKS,N_THREADS>>>(count, image, &u[i*count]);
if (!warm)
F2C<<<N_BLOCKS,N_THREADS>>>(count, image, &guess[i*count]);
}
for(int iter = 0; iter < iters; iter++){
//Calculating the current iteration model
propagate(Hq, u, temporary);
//Calculation of Imodel and model arrays
modelFunc<<<N_BLOCKS,N_THREADS>>>(count, numLayers, 1.0f, 0, temporary, model, Imodel);
//Calculation of the optimal scaling parameter c
h_sumOfProducts(count, image, Imodel, sumArr);
h_sumOfProducts(count, Imodel, Imodel, &sumArr[N_BLOCKS]);
contractf_p<<<1,1>>>(1, &sumArr[N_BLOCKS], sumArr, c);
double t_cost[1];
cudaMemcpy(t_cost, c, sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "Current optimal scaling factor is " << t_cost[0] << std::endl;
//Cost calculation with sparsity constraint
linear<<<N_BLOCKS,N_THREADS>>>(count, c, image, Imodel, temporaryf, false);
if(b_cost){
calculateCost(mu, temporaryf, guess, temporaryf, &cost[iter]);
double t_cost[1];
cudaMemcpy(t_cost, &cost[iter], sizeof(double), cudaMemcpyDeviceToHost);
std::cout << t_cost[0] << std::endl;
}
//Calculating residues
multiplyfc<<<N_BLOCKS,N_THREADS>>>(count, temporaryf, model);
extend<<<N_BLOCKS,N_THREADS>>>(count, numLayers, model, temporary);
propagate(Hn, temporary, temporary);
F2C<<<1,1>>>(1,c,newGuess);
scale_p<<<N_BLOCKS,N_THREADS>>>(m_count, newGuess, temporary, temporary);
add<<<N_BLOCKS,N_THREADS>>>(m_count, u, temporary, newGuess, false);
//Applying strict bounds
for(int i = 0 ; i < numLayers ; i++){
strictBounds<<<N_BLOCKS,N_THREADS>>>(count, &newGuess[count*i], rconstr[i*2], rconstr[i*2+1], iconstr[i*2], iconstr[i*2+1]);
}
//Applying soft thresholding bounds
softBounds<<<N_BLOCKS,N_THREADS>>>(m_count, newGuess, mu, 0.5f);
double s_new = 0.5*(1+std::sqrt(1+4*s*s));
cufftDoubleComplex temp = make_cuDoubleComplex((s-1)/s_new,0);
add<<<N_BLOCKS,N_THREADS>>>(m_count, newGuess, guess, temporary, false);
scale<<<N_BLOCKS,N_THREADS>>>(m_count, temp, temporary, temporary);
add<<<N_BLOCKS,N_THREADS>>>(m_count, newGuess, temporary, u, true);
s = s_new;
cudaMemcpy(guess, newGuess, m_count*sizeof(cufftDoubleComplex), cudaMemcpyDeviceToDevice);
}
// Final cost calculation
if(b_cost){
propagate(Hq, u, newGuess);
//Calculation of Imodel and model arrays
modelFunc<<<N_BLOCKS,N_THREADS>>>(count, numLayers, 1.0f, 0, newGuess, model, Imodel);
//Calculation of the optimal scaling parameter c
h_sumOfProducts(count, image, Imodel, sumArr);
h_sumOfProducts(count, Imodel, Imodel, &sumArr[N_BLOCKS]);
contractf_p<<<1,1>>>(1, &sumArr[N_BLOCKS], sumArr, c);
//Cost calculation with sparsity constraint
linear<<<N_BLOCKS,N_THREADS>>>(count, c, image, Imodel, temporaryf, false);
calculateCost(mu, temporaryf, guess, temporaryf, &cost[iters]);
double t_cost[1];
cudaMemcpy(t_cost, &cost[iters], sizeof(double), cudaMemcpyDeviceToHost);
std::cout << t_cost[0] << std::endl;
gpuErrchk(cudaMemcpy(h_cost, cost, (iters+1)*sizeof(double), cudaMemcpyDeviceToHost));
cudaFree(cost);
}
// Moving results to host memory
// Adding one to get the light wavefront (otherwise we only have the disturbance by the particles and electrodes)
offset<<<N_BLOCKS,N_THREADS>>>(m_count, 1.0f, 0.0f, guess, temporary);
// Check if any error occured - important to note that untested kernels can lead to exceptions at cudaMemcpy calls
gpuErrchk(cudaPeekAtLastError());
}
void MultiLayer::update(uint8_t* modulus, uint8_t* phase){
// temporary contains the latest results in complex form
// Processing the modulus of both layers
absolute<<<N_BLOCKS,N_THREADS>>>(m_count,temporary,temporaryf);
for(int i = 0; i < numLayers; i++){
normalize(count, &temporaryf[i*count]);
}
D2u8<<<N_BLOCKS,N_THREADS>>>(m_count,temporaryf,modulus);
// Processing the phase of both layers
angle<<<N_BLOCKS,N_THREADS>>>(m_count,temporary,temporaryf);
for(int i = 0; i < numLayers; i++){
normalize(count, &temporaryf[i*count]);
}
D2u8<<<N_BLOCKS,N_THREADS>>>(m_count,temporaryf,phase);
}
MultiLayer::~MultiLayer(){
cudaFree(Hq);
cudaFree(Hn);
cudaFree(temporary);
cudaFree(image);
cudaFree(model);
cudaFree(guess);
cudaFree(newGuess);
cudaFree(u);
cudaFree(temporaryf);
cudaFree(c);
cudaFree(propagation);
cufftDestroy(fftPlan);
}
|
422f938f7ac9af229f9a955640d901c4177267de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "functions.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void func_kernel1d(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t offset = idx;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx)};
if (idx< n) {
dy[offset] = F0(x, params);
}
}
__global__ void func_kernel2d(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t offset = idx + idy * blockDim.x * gridDim.x;
// printf("%d, %d, %d\n", idx, idy, offset);
// ensure we are within bounds
float x[2] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy)};
if (idx< n && idy<n) {
dy[offset] = F1(x, params);
}
}
__global__ void func_kernel3dF2(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F2(x, params) ;
}
}
__global__ void func_kernel3dF3(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F3(x, params) ;
}
}
__global__ void func_kernel3dF4(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F4(x, params) ;
}
}
__global__ void func_kernel3dF5(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F5(x, params) ;
}
}
__global__ void func_kernel3dF6(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F6(x, params) ;
}
}
__global__ void func_kernel3dF9(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
// printf("%0.10f, %0.10f, %0.10f\n", x[0], x[1],x[2]);
// printf("%0.10f, %0.10f, %0.10f\n", a[0], a[1],a[2]);
if (idx< n) {
dy[offset] = myfunc(x, params) ;
}
}
void cudasafe( hipError_t error, char* message)
{
if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); }
}
extern "C" double integrate_cu(
int functionCode, // Identifies the function (and dimensionality k)
const float *a, // An array of k lower bounds
const float *b, // An array of k upper bounds
float eps, // A target accuracy
const float *params, // Parameters to function
float *errorEstimate // Estimated error in integral
)
{
int mult = 1; // multiplier
*errorEstimate = 100; // set error to high value
double sum = 0;
double sum_temp = 0;
while (*errorEstimate > eps) {
size_t freeMem = 0;
size_t totalMem = 0;
hipMemGetInfo(&freeMem, &totalMem);
// printf("Memory avaliable: Free: %lu, Total: %lu\n",freeMem, totalMem);
const int nsize = 10000000;
const int sz = sizeof(float) * nsize;
float *devicemem;
hipMalloc((void **)&devicemem, sz);
hipMemset(devicemem, 0, sz); // zeros all the bytes in devicemem
int n;
int k; int p = 0 ;
switch(functionCode){
case 0: k=1; p=0; n=32*mult; break;
case 1: k=2; p=2; n=32*mult; break;
case 2: k=3; p=0; n=8*mult; break;
case 3: k=3; p=1; n=8*mult; break;
case 4: k=3; p=10; n=8*mult; break;
case 5: k=3; p=0; n=8*mult; break;
case 6: k=3; p=2; n=128*mult; break;
case 9: k=3; p=0; n=8*mult; break;
default:
fprintf(stderr, "Invalid function code.");
exit(1);
}
int n0=n, n1=n, n2=n; // By default use n points in each dimension
// Collapse any dimensions we don't use
if(k<3){
n2=1;
}
if(k<2){
n1=1;
}
// size, in bytes, of each vector
size_t bytes = (n0*n1*n2)*sizeof(float);
size_t bytes_temp = (pow(2,k)*n0*n1*n2)*sizeof(float);
float *y = (float*)malloc(bytes);
float *y_temp = (float*)malloc(bytes_temp);
float base[3] = {(b[0] - a[0])/n, (b[1] - a[1])/n, (b[2] - a[2])/n};
float base_temp[3] = {(b[0] - a[0])/(n*2), (b[1] - a[1])/(n*2), (b[2] - a[2])/(n*2)};
// printf("base: %0.10f, %0.10f, %0.10f\n", base[0], base[1], base[2]);
// allocate memory for each vector on GPU
float * dy;
float * dy_temp;
float * dbase;
float * dbase_temp;
float * da;
float * dparams;
// int * dn;
hipMalloc(&dy, bytes);
hipMalloc(&dy_temp, bytes_temp);
hipMalloc(&dbase, 3*sizeof(float));
hipMalloc(&dbase_temp, 3*sizeof(float));
// hipMalloc((void**)&dn, sizeof(int));
hipMalloc(&da, k*sizeof(int));
hipMalloc(&dparams, p*sizeof(float));
hipMemcpy(dbase, base, 3*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dbase_temp, base_temp, 3*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(da, a, k*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dparams, params, p*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(dn,&n,sizeof(int), hipMemcpyHostToDevice);
//kernel execute
if (k==1) {
// printf("1D\n");
// number of threads in each thread block
int blockSize = 32;
dim3 dimBlock(blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize);
hipLaunchKernelGGL(( func_kernel1d), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp);
hipLaunchKernelGGL(( func_kernel1d), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (k==2) {
// number of threads in each thread block
// printf("2D\n");
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize, gridSize);
hipLaunchKernelGGL(( func_kernel2d), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp, gridSize_temp);
hipLaunchKernelGGL(( func_kernel2d), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else {
// number of threads in each thread block
// printf("3D\n");
int blockSize = 8;
dim3 dimBlock(blockSize, blockSize, blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize, gridSize, gridSize);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp, gridSize_temp, gridSize_temp);
if (functionCode==2) {
hipLaunchKernelGGL(( func_kernel3dF2), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF2), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==3) {
hipLaunchKernelGGL(( func_kernel3dF3), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF3), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==4) {
hipLaunchKernelGGL(( func_kernel3dF4), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF4), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==5) {
hipLaunchKernelGGL(( func_kernel3dF5), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF5), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==6) {
hipLaunchKernelGGL(( func_kernel3dF6), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF6), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==9) {
hipLaunchKernelGGL(( func_kernel3dF9), dim3(dimGrid), dim3(dimBlock), 0, 0, dy, da, dbase, dparams, n);
hipLaunchKernelGGL(( func_kernel3dF9), dim3(dimGrid_temp), dim3(dimBlock), 0, 0, dy_temp, da, dbase_temp, dparams, 2*n);
}
else {
fprintf(stderr, "Invalid function code.");
}
}
//copy array back
hipMemcpy(y, dy, bytes, hipMemcpyDeviceToHost);
hipMemcpy(y_temp, dy_temp, bytes_temp, hipMemcpyDeviceToHost);
sum = 0;
sum_temp = 0;
for(uint32_t i=0; i<n0*n1*n2; i++) {
sum += y[i];
}
for(uint32_t i=0; i<pow(2,k)*n0*n1*n2; i++) {
sum_temp += y_temp[i];
}
for(int j=0; j<k; j++) {
sum *= base[j];
sum_temp *= base_temp[j];
}
// printf("len: %0.10f\n", pow(2,k)*n0*n1*n2);
// printf("sum: %0.10f\n", sum);
// printf("sum_temp: %0.10f\n", sum_temp);
hipFree(dy);
hipFree(dy_temp);
hipFree(da);
hipFree(dbase);
hipFree(dbase_temp);
hipFree(dparams);
// hipFree(dn);
free(y);
free(y_temp);
hipMemset(devicemem, 0, sz); // zeros all the bytes in devicemem
*errorEstimate = fabs(sum - sum_temp);
mult += 1;
}
return sum;
}
|
422f938f7ac9af229f9a955640d901c4177267de.cu
|
#include "functions.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void func_kernel1d(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t offset = idx;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx)};
if (idx< n) {
dy[offset] = F0(x, params);
}
}
__global__ void func_kernel2d(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t offset = idx + idy * blockDim.x * gridDim.x;
// printf("%d, %d, %d\n", idx, idy, offset);
// ensure we are within bounds
float x[2] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy)};
if (idx< n && idy<n) {
dy[offset] = F1(x, params);
}
}
__global__ void func_kernel3dF2(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F2(x, params) ;
}
}
__global__ void func_kernel3dF3(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F3(x, params) ;
}
}
__global__ void func_kernel3dF4(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F4(x, params) ;
}
}
__global__ void func_kernel3dF5(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F5(x, params) ;
}
}
__global__ void func_kernel3dF6(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
if (idx< n) {
dy[offset] = F6(x, params) ;
}
}
__global__ void func_kernel3dF9(float * dy, float* a, float* base, float * params, int n)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t idy = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t idz = blockIdx.z * blockDim.z + threadIdx.z;
uint32_t offset = idx + (idy + (blockDim.x * idz * gridDim.x)) * blockDim.x * gridDim.x;
// printf("%d, %d, %d, %d\n", idx, idy, idz, offset);
// printf("%d, %d, %d, %d, %d\n", idx, idy, idz, offset, n);
// ensure we are within bounds
float x[3] = {a[0] + base[0] * (0.5f + idx), a[1] + base[1] * (0.5f + idy), a[2] + base[2] * (0.5f + idz)};
// printf("%0.10f, %0.10f, %0.10f\n", x[0], x[1],x[2]);
// printf("%0.10f, %0.10f, %0.10f\n", a[0], a[1],a[2]);
if (idx< n) {
dy[offset] = myfunc(x, params) ;
}
}
void cudasafe( cudaError_t error, char* message)
{
if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); }
}
extern "C" double integrate_cu(
int functionCode, // Identifies the function (and dimensionality k)
const float *a, // An array of k lower bounds
const float *b, // An array of k upper bounds
float eps, // A target accuracy
const float *params, // Parameters to function
float *errorEstimate // Estimated error in integral
)
{
int mult = 1; // multiplier
*errorEstimate = 100; // set error to high value
double sum = 0;
double sum_temp = 0;
while (*errorEstimate > eps) {
size_t freeMem = 0;
size_t totalMem = 0;
cudaMemGetInfo(&freeMem, &totalMem);
// printf("Memory avaliable: Free: %lu, Total: %lu\n",freeMem, totalMem);
const int nsize = 10000000;
const int sz = sizeof(float) * nsize;
float *devicemem;
cudaMalloc((void **)&devicemem, sz);
cudaMemset(devicemem, 0, sz); // zeros all the bytes in devicemem
int n;
int k; int p = 0 ;
switch(functionCode){
case 0: k=1; p=0; n=32*mult; break;
case 1: k=2; p=2; n=32*mult; break;
case 2: k=3; p=0; n=8*mult; break;
case 3: k=3; p=1; n=8*mult; break;
case 4: k=3; p=10; n=8*mult; break;
case 5: k=3; p=0; n=8*mult; break;
case 6: k=3; p=2; n=128*mult; break;
case 9: k=3; p=0; n=8*mult; break;
default:
fprintf(stderr, "Invalid function code.");
exit(1);
}
int n0=n, n1=n, n2=n; // By default use n points in each dimension
// Collapse any dimensions we don't use
if(k<3){
n2=1;
}
if(k<2){
n1=1;
}
// size, in bytes, of each vector
size_t bytes = (n0*n1*n2)*sizeof(float);
size_t bytes_temp = (pow(2,k)*n0*n1*n2)*sizeof(float);
float *y = (float*)malloc(bytes);
float *y_temp = (float*)malloc(bytes_temp);
float base[3] = {(b[0] - a[0])/n, (b[1] - a[1])/n, (b[2] - a[2])/n};
float base_temp[3] = {(b[0] - a[0])/(n*2), (b[1] - a[1])/(n*2), (b[2] - a[2])/(n*2)};
// printf("base: %0.10f, %0.10f, %0.10f\n", base[0], base[1], base[2]);
// allocate memory for each vector on GPU
float * dy;
float * dy_temp;
float * dbase;
float * dbase_temp;
float * da;
float * dparams;
// int * dn;
cudaMalloc(&dy, bytes);
cudaMalloc(&dy_temp, bytes_temp);
cudaMalloc(&dbase, 3*sizeof(float));
cudaMalloc(&dbase_temp, 3*sizeof(float));
// cudaMalloc((void**)&dn, sizeof(int));
cudaMalloc(&da, k*sizeof(int));
cudaMalloc(&dparams, p*sizeof(float));
cudaMemcpy(dbase, base, 3*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dbase_temp, base_temp, 3*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(da, a, k*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dparams, params, p*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(dn,&n,sizeof(int), cudaMemcpyHostToDevice);
//kernel execute
if (k==1) {
// printf("1D\n");
// number of threads in each thread block
int blockSize = 32;
dim3 dimBlock(blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize);
func_kernel1d<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp);
func_kernel1d<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (k==2) {
// number of threads in each thread block
// printf("2D\n");
int blockSize = 32;
dim3 dimBlock(blockSize, blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize, gridSize);
func_kernel2d<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp, gridSize_temp);
func_kernel2d<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else {
// number of threads in each thread block
// printf("3D\n");
int blockSize = 8;
dim3 dimBlock(blockSize, blockSize, blockSize);
// number of thread blocks in grid
int gridSize = (int) ceil((float)n/blockSize);
dim3 dimGrid(gridSize, gridSize, gridSize);
int gridSize_temp = (int) ceil((float)n*2.0/blockSize);
dim3 dimGrid_temp(gridSize_temp, gridSize_temp, gridSize_temp);
if (functionCode==2) {
func_kernel3dF2<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF2<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==3) {
func_kernel3dF3<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF3<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==4) {
func_kernel3dF4<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF4<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==5) {
func_kernel3dF5<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF5<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==6) {
func_kernel3dF6<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF6<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else if (functionCode==9) {
func_kernel3dF9<<<dimGrid, dimBlock>>>(dy, da, dbase, dparams, n);
func_kernel3dF9<<<dimGrid_temp, dimBlock>>>(dy_temp, da, dbase_temp, dparams, 2*n);
}
else {
fprintf(stderr, "Invalid function code.");
}
}
//copy array back
cudaMemcpy(y, dy, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(y_temp, dy_temp, bytes_temp, cudaMemcpyDeviceToHost);
sum = 0;
sum_temp = 0;
for(uint32_t i=0; i<n0*n1*n2; i++) {
sum += y[i];
}
for(uint32_t i=0; i<pow(2,k)*n0*n1*n2; i++) {
sum_temp += y_temp[i];
}
for(int j=0; j<k; j++) {
sum *= base[j];
sum_temp *= base_temp[j];
}
// printf("len: %0.10f\n", pow(2,k)*n0*n1*n2);
// printf("sum: %0.10f\n", sum);
// printf("sum_temp: %0.10f\n", sum_temp);
cudaFree(dy);
cudaFree(dy_temp);
cudaFree(da);
cudaFree(dbase);
cudaFree(dbase_temp);
cudaFree(dparams);
// cudaFree(dn);
free(y);
free(y_temp);
cudaMemset(devicemem, 0, sz); // zeros all the bytes in devicemem
*errorEstimate = fabs(sum - sum_temp);
mult += 1;
}
return sum;
}
|
9d9d059bf5cc290d3011cdd59450eea2f680c7bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyCol.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9d9d059bf5cc290d3011cdd59450eea2f680c7bf.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyCol.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyCol<<<gridBlock,threadBlock>>>(out,in,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyCol<<<gridBlock,threadBlock>>>(out,in,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyCol<<<gridBlock,threadBlock>>>(out,in,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e2c40d82b99001530dced11f089948ce23eb5581.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#include <math.h>
#endif
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <assert.h>
#include "CudaHOG.h"
#include "gpu_utils.h"
#include "cuPrintf.hip"
#include "persondetectorwt.tcc"
#include "Classifier.h"
using namespace std;
extern const double PERSON_WEIGHT_VEC[];
extern const int PERSON_WEIGHT_VEC_LENGTH;
#define fmin( a, b ) ( (a)<(b)? (a):(b) )
template<typename T> inline bool isnan(T value){
return value != value;
}
#define M_PI_DIV_2 2.5066282746310005024
__constant__ double SVM_VECTOR[PERSON_WEIGHT_VEC_LENGTH];
__constant__ float DESCRIPTOR_GAUSS[ 16 * 16 ];
float hostGaussian( float x, float y, float mx, float my, float sigma ) {
float dist_, dx = x - mx, dy = y - my;
dist_ = sqrt( ( dx * dx ) + ( dy * dy ) );
return exp( -dist_ * dist_ / ( sigma * sigma ) ) / ( M_PI_DIV_2 * sigma );
}
#define STEP_WIDTH 8
#define STEP_HEIGHT 16
CudaHOG::CudaHOG(int imageWidth_, int imageHeight_, int frameW_ , int frameH_ , bool oneThreadWindow_, Classifier *classifier_, bool fullCircle_ ){
assert( hipFree( 0 ) == hipSuccess );
this->imageWidth = imageWidth_;
this->imageHeight = imageHeight_;
this->frameW = frameW_;
this->frameH = frameH_;
//Melhores Valores encontrados por Dalal...
this->blockStride = 8;
this->descWidth = 16;
this->descHeight = 16;
this->blockWidth = 2;
this->blockHeight = 2;
this->numHistBins = 9;
this->fullCircle = fullCircle_;
this->oneThreadWindow = oneThreadWindow_;
this->classifier = classifier_;
this->blocks = (( imageHeight - frameH )/ STEP_HEIGHT ) + 1;
this->threads = (( imageWidth - frameW )/ STEP_WIDTH ) + 1;
if( this->fullCircle ) this->histBinSpace = (int)( 360 / this->numHistBins );
else this->histBinSpace = (int)( 180 / this->numHistBins );
this->cellWidth = (int)( (float) descWidth / (float) blockWidth );
///allocate final feature vector
this->numBlocksW = int( (float) ( this->frameW - this->descWidth + this->blockStride ) / (float) this->blockStride );
this->numBlocksH = int( (float) ( this->frameH - this->descHeight + this->blockStride ) / (float) this->blockStride );
this->blockSize = this->numHistBins * this->blockWidth * this->blockHeight;
this->hogSize = this->blockSize * this->numBlocksW * this->numBlocksH;
/* Alocando o vetor de Features HOG */
size_t hogSizeWidth = this->hogSize * sizeof( float );
size_t hogSizeHeight = blocks*threads;
//hipMalloc( (void**)&this->device_hog, hogSizeAlloc );
assert( hipMallocPitch( (void**)&this->device_hog, &hogS.pitch_hog, hogSizeWidth, hogSizeHeight) == hipSuccess );
assert( hipMemset2D( this->device_hog, hogS.pitch_hog, 0, hogSizeWidth, hogSizeHeight ) == hipSuccess );
/* Alocando Magnitudes e Gradientes */
size_t magGradSize = sizeof( float ) * this->imageWidth * this->imageHeight ;
assert( hipMalloc( (void**)&this->device_mag, magGradSize ) == hipSuccess );
assert( hipMalloc( (void**)&this->device_grad, magGradSize ) == hipSuccess );
assert( hipMemset( this->device_mag, 0, magGradSize ) == hipSuccess );
assert( hipMemset( this->device_grad, 0, magGradSize ) == hipSuccess );
//size_t descriptorWidth = sizeof(float) * this->numHistBins * this->blockHeight * this->blockWidth;
//size_t descriptorHeight= blocks*threads;
//if( oneThreadWindow ){
// assert( hipMallocPitch( (void**)&device_desc, &hogS.pitch_descriptor, descriptorWidth, descriptorHeight ) == hipSuccess );
// assert( hipMemset2D( this->device_desc, hogS.pitch_descriptor, 0, descriptorWidth, descriptorHeight ) == hipSuccess );
//}
assert( hipMalloc( (void**)&gray, this->imageHeight * imageWidth ) == hipSuccess );
svm.linearbias_ = 6.6657914910925990525925044494215;
//assert( hipMalloc( (void**)&svm.linearwt_, PERSON_WEIGHT_VEC_LENGTH * sizeof(double) ) == hipSuccess );
hipMemcpyToSymbol( SVM_VECTOR, PERSON_WEIGHT_VEC, PERSON_WEIGHT_VEC_LENGTH* sizeof(double) );
assert( hipMalloc( (void**)&svm.scores, blocks*threads * sizeof(double) ) == hipSuccess );
assert( hipMemset( svm.scores, 0, blocks*threads * sizeof(double) ) == hipSuccess );
int* angle;
assert( hipMalloc( (void**)&angle, this->imageWidth * this->imageHeight * sizeof(int) ) == hipSuccess );
assert( hipMemset( angle, 0, this->imageWidth * this->imageHeight * sizeof(int) ) == hipSuccess );
hogS.angle = angle;
hogS.blockHeight = blockHeight;
hogS.blockSize = blockSize;
hogS.blockStride = blockStride;
hogS.blockWidth = blockWidth;
hogS.cellWidth = cellWidth;
hogS.descHeight = descHeight;
hogS.descWidth = descWidth;
hogS.frameH = frameH;
hogS.frameW = frameW;
hogS.histBinSpace = histBinSpace;
hogS.imageHeight = imageHeight;
hogS.imageWidth = imageWidth;
hogS.numBlocksH = numBlocksH;
hogS.numBlocksW = numBlocksW;
hogS.numHistBins = numHistBins;
hogS.mag = device_mag;
hogS.grad = device_grad;
hogS.descriptor = device_desc;
hogS.hog = device_hog;
hogS.hogSize = hogSize;
hogS.bWHSize = blockWidth * blockHeight;
/* Foi percebido que o clculo da Gaussiana estava se repetindo para todos os blocos, otimizao, criando um vetor esttico com os valores
predefinidos da gaussiana */
float gauss[ 16 * 16];
for( int i = 0; i < 16; i++)
for(int j = 0; j < 16; j++){
gauss[i*16 +j] = hostGaussian( float(i), float(j), 8, 8, 8 );
//printf("Gaussian[%d][%d] = %f\n",i,j,gauss[i*16+j]);
}
assert( hipMemcpyToSymbol( DESCRIPTOR_GAUSS, gauss, 16*16*sizeof(float) ) == hipSuccess );
//cudaPrintfInit();
}
CudaHOG::~CudaHOG(){
assert( hipFree( this->hogS.angle ) == hipSuccess );
assert( hipFree( this->hogS.mag ) == hipSuccess );
assert( hipFree( this->hogS.grad ) == hipSuccess );
//if( this->oneThreadWindow ) assert( hipFree( this->hogS.descriptor ) == hipSuccess );
assert( hipFree( this->hogS.hog ) == hipSuccess );
assert( hipFree( this->gray ) == hipSuccess );
assert( hipFree( svm.scores ) == hipSuccess );
//cudaPrintfEnd();
assert( hipDeviceReset() == hipSuccess );
}
__device__ float deviceGaussian( float x, float y, float mx, float my, float sigma ) {
float dist_, dx = x - mx, dy = y - my;
dist_ = sqrt( ( dx * dx ) + ( dy * dy ) );
//dist_ = dx + dy;
return exp( -dist_ * dist_ / ( sigma * sigma ) ) / ( M_PI_DIV_2 * sigma );
}
__device__ void deviceCircularInterpBin( DeviceHOG hog, float value, int curBin, float *outCoef, int *outInterpBin ) {
int halfSize = int( hog.histBinSpace >> 1 );
if( value > halfSize ) { // range: (halfSize, binsize]
*outInterpBin = ( curBin + 1 ) % hog.numHistBins;
*outCoef = 1.0 - ( ( value - halfSize ) / hog.histBinSpace );
} else { // range: [0, halfsize]
*outInterpBin = ( curBin - 1 ) % hog.numHistBins;
if( *outInterpBin < 0 ) *outInterpBin += hog.numHistBins;
*outCoef = ( ( value + halfSize ) / hog.histBinSpace );
}
}
#define DESC_AT_ELEMENT(desc,i,j,k) ( desc[ (( (i) * ( (hog.blockWidth) * (hog.blockHeight) ) + (j) * (hog.blockWidth) + (k)) ) ] )
__device__ void deviceCalculateL2Hys( DeviceHOG hog , float* descriptor) {
float norm = 0.0, eps = 1.0;
//compute norm
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
norm += DESC_AT_ELEMENT( descriptor,i,j,k ) * DESC_AT_ELEMENT( descriptor,i,j,k );
//L2-norm
norm = sqrt( norm + eps );
if ( !norm ) norm = 1.0;
// Normalize and threshold ...
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ ) {
DESC_AT_ELEMENT( descriptor,i,j,k ) /= norm;
if( DESC_AT_ELEMENT( descriptor,i,j,k ) > 0.2 ) DESC_AT_ELEMENT( descriptor,i,j,k ) = 0.2;
}
norm = 0.0;
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
norm += DESC_AT_ELEMENT( descriptor,i,j,k ) * DESC_AT_ELEMENT( descriptor,i,j,k );
norm = sqrt( norm + eps );
if ( !norm ) norm = 1.0;
// and normalize again
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
DESC_AT_ELEMENT( descriptor,i,j,k ) /= norm;
}
__device__ void deviceWriteToVector( DeviceHOG hog, float *output, float* descriptor ) {
float feat = 0.0;
for( int b = 0; b < hog.numHistBins; b++ ) {
for( int y = 0; y < hog.blockHeight; y++ ) {
for( int x = 0; x < hog.blockWidth; x++ ) {
feat = DESC_AT_ELEMENT( descriptor,b,y,x );
if( isnan( feat ) ) {
feat = 0;
}
*output++ = feat;
}
}
}
}
__device__ void deviceComputeDescriptor( int bx, int by, DeviceHOG hog, float* descriptor ){
float curGrad = 0.0, curMag = 0.0, gWeight = 1.0,
cellWeight = 1.0, binWeight = 0.0,
dist = 0.0;
int angle = 0, iBin = 0,
stepx = 0, stepy = 0,
dx = 0, dy = 0;
for( int y = 0; y < hog.descHeight; y++ ) {
for( int x = 0; x < hog.descWidth; x++ ) {
int offset = ((by+y)*hog.imageWidth) + bx+x;
curGrad = hog.grad[ offset ];
curMag = hog.mag[ offset ];
angle = hog.angle[ offset ];
gWeight = DESCRIPTOR_GAUSS[ y * hog.descWidth + x ];
// histogram bin weighting
iBin = 0; binWeight = 0;
int halfSize = int( hog.histBinSpace >> 1 );
float value = curGrad - hog.histBinSpace * angle;
if( value > halfSize ) { // range: (halfSize, binsize]
iBin = ( angle + 1 ) % hog.numHistBins;
binWeight = 1.0 - ( ( value - halfSize ) / hog.histBinSpace );
} else { // range: [0, halfsize]
iBin = ( angle - 1 ) % hog.numHistBins;
if( iBin < 0 ) iBin += hog.numHistBins;
binWeight = ( ( value + halfSize ) / hog.histBinSpace );
}
int offset1_ = (angle) * ( hog.bWHSize );
int offset2_ = (iBin) * ( hog.bWHSize );
float gW_curMag = gWeight * curMag;
float bin_gW_curMag = gW_curMag * binWeight;
float oneMinusbin_gW_curMag = gW_curMag * ( 1.0 - binWeight );
for( int iy = 0; iy < hog.blockHeight; iy++ ) {
for( int ix = 0; ix < hog.blockWidth; ix++ ) {
dx = x - ( 8 + ( ix << 3 ) );
dy = y - ( 8 + ( iy << 3 ) );
dist = sqrt( (float) ( dx * dx ) + ( dy * dy ) );
//dist = abs(dx + dy);
// //cell weighting
cellWeight = 1.0 - fmin( (float) ( (float) dist / (float) hog.cellWidth ), (float) 1.0 );
int offset1 = (offset1_ + (iy) * (hog.blockWidth) + (ix)) ;
int offset2 = (( offset2_ + (iy) * (hog.blockWidth) + (ix)) );
descriptor[offset1] += bin_gW_curMag * cellWeight;
descriptor[offset2] += oneMinusbin_gW_curMag * cellWeight;
}
}
}
}
}
#define BLOCK_DESCRIPTOR_SIZE 2*2*9
__global__ void cudaSlidingWindow( DeviceHOG hog, LINEAR_CLASSIFY_SVM svm ){
int py = blockIdx.x * STEP_HEIGHT;
int px = threadIdx.x * STEP_WIDTH;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//cuPrintf("LinearBias %f\n",svm.linearbias_);
//float hog_row[ 3780 ];
size_t descriptorSize = sizeof( float ) * hog.numHistBins * hog.blockHeight * hog.blockWidth ;
if( (py < hog.imageHeight - hog.frameH + 1 ) && ( px < hog.imageWidth - hog.frameW + 1) ){
int i = 0;
float descriptor_row[BLOCK_DESCRIPTOR_SIZE];
float* hog_row = (float*)((char*)hog.hog + tid * hog.pitch_hog);
for( int by = 0; by <= hog.frameH - hog.descHeight; by += hog.blockStride ) {
for( int bx = 0; bx <= hog.frameW - hog.descWidth; bx += hog.blockStride ) {
memset( descriptor_row, 0, descriptorSize );
deviceComputeDescriptor( bx+px, by+py, hog, descriptor_row );
deviceCalculateL2Hys( hog ,descriptor_row);
deviceWriteToVector( hog, &hog_row[i], descriptor_row );
i += hog.blockSize;
}
}
double sum = 0;
for (int i= 0; i< hog.hogSize; ++i)
sum += SVM_VECTOR[i]*hog_row[i];
svm.scores[tid] = sum - svm.linearbias_;
//cuPrintf( "Score[%d] - %f\n",tid,sum);
}
}
/* Uma thread por bloco hog */
__global__ void cudaHogThreadBlock( DeviceHOG hog, LINEAR_CLASSIFY_SVM svm ){
int py = blockIdx.y * STEP_HEIGHT;
int px = blockIdx.x * STEP_WIDTH;
int tid = blockIdx.y * gridDim.x + blockIdx.x;
int by = threadIdx.y * hog.blockStride;
int bx = threadIdx.x * hog.blockStride;
int i = (threadIdx.y * blockDim.x + threadIdx.x) * hog.blockSize;
//cuPrintf("LinearBias %f\n",svm.linearbias_);
//float hog_row[ 3780 ];
size_t descriptorSize = sizeof( float ) * BLOCK_DESCRIPTOR_SIZE ;
if( (py < hog.imageHeight - hog.frameH + 1 ) && ( px < hog.imageWidth - hog.frameW + 1) ){
float descriptor_row[BLOCK_DESCRIPTOR_SIZE];
float* hog_row = (float*)((char*)hog.hog + tid * hog.pitch_hog);
memset( descriptor_row, 0, descriptorSize );
deviceComputeDescriptor( bx+px, by+py, hog, descriptor_row );
deviceCalculateL2Hys( hog ,descriptor_row);
deviceWriteToVector( hog, &hog_row[i], descriptor_row );
// }
//}
//__syncthreads();
//if( threadIdx.x == 0 && threadIdx.y == 0 ){
// double sum = 0;
// for (int i= 0; i< hog.hogSize; ++i)
// sum += SVM_VECTOR[i]*hog_row[i];
// svm.scores[tid] = sum - svm.linearbias_;
// cuPrintf( "Score[%d] - %f\n",tid,sum);
//}
}
}
__global__ void showMatrixLinear(double *device){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.x;
int j = threadIdx.x;
cuPrintf(" show matrixLinear[%d] = %f \n",tid,device[tid]);
}
void CudaHOG::extractFeatures( unsigned char *data, int channels ){
assert( hipDeviceSynchronize() == hipSuccess );
computeGradients( hogS, data, channels );
//printf(" blocks %d threads %d \n",blocks, threads );
if( this->oneThreadWindow ){
hipLaunchKernelGGL(( cudaSlidingWindow), dim3(blocks), dim3(threads), 0, 0, hogS , svm );
}else{
int threads_height = (( hogS.frameH - hogS.descHeight )/ hogS.blockStride ) + 1;
int threads_width = (( hogS.frameW - hogS.descWidth )/ hogS.blockStride ) + 1;
dim3 blocks_windows( threads, blocks );
dim3 threads_blocks( threads_width, threads_height );
hipLaunchKernelGGL(( cudaHogThreadBlock), dim3(blocks_windows), dim3(threads_blocks) , 0, 0, hogS, svm );
}
//cudaPrintfDisplay( stdout );
}
/* ComputeGradientsInCuda */
__global__ void cudaComputeGradients( DeviceHOG hog, unsigned char* gray, int width, int height, float *mag,float *grad ){
bool fullCircle = false;
int p1 = 0, p2 = 0, p3 = 0, p4 = 0;
int hor = 0, ver = 0;
float curGrad = 0.0, curMag = 0.0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if( ( blockIdx.x > 1 && blockIdx.x < (height - 1) )
&& ( threadIdx.x > 1 && threadIdx.x < (width - 1) ) ){
if( tid < width * height ){
p1 = (int) gray[ blockIdx.x*blockDim.x + threadIdx.x+1 ];
p2 = (int) gray[ blockIdx.x*blockDim.x + threadIdx.x-1 ];
p3 = (int) gray[ (blockIdx.x-1)*blockDim.x + threadIdx.x ];
p4 = (int) gray[ (blockIdx.x+1)*blockDim.x + threadIdx.x ];
hor = p1 - p2;
ver = p3 - p4;
curMag = (float) sqrt( (double)( hor * hor ) + ( ver * ver ) );
mag[tid] = curMag;
// make sure we don't divide by zero when calculating the gradient orientation
if( curMag > 0.0 ) {
curGrad = ( (float) ( (float) 180 * acos( (float) hor / (float) curMag ) ) / (float) M_PI );
if( !fullCircle )
curGrad = float( (int) curGrad % 180 ); //if unsigned, then range it over 0-180 (pedestrian)
grad[tid]=curGrad;
}else {
grad[tid]=0;
}
int angle = int( curGrad / hog.histBinSpace );
hog.angle[tid] = ( angle >= hog.numHistBins ? hog.numHistBins - 1 : angle );
}
}
}
__global__ void cudaBGR2Gray_(unsigned char* bgr, unsigned char* gray,int width, int height){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if( tid < width * height ){
gray[tid] = ( 2989 * bgr[tid*3+2] + 5870 * bgr[tid*3+1] + 1140 * bgr[tid*3+0] ) / 10000;
}
}
void CudaHOG::computeGradients(DeviceHOG hog, unsigned char* data, int channels){
unsigned char *bgr;
if( channels == 1 ){
assert( hipMemcpy( gray, data, this->imageHeight * imageWidth, hipMemcpyHostToDevice ) == hipSuccess );
}else{
assert( hipMalloc( (void**)&bgr, this->imageHeight * imageWidth * channels ) == hipSuccess );
assert( hipMemcpy( bgr, data, this->imageHeight * imageWidth * channels, hipMemcpyHostToDevice ) == hipSuccess );
hipLaunchKernelGGL(( cudaBGR2Gray_), dim3(imageHeight), dim3(imageWidth) , 0, 0, bgr, gray, this->imageWidth, this->imageHeight );
assert( hipFree( bgr ) == hipSuccess );
}
hipLaunchKernelGGL(( cudaComputeGradients), dim3(imageHeight), dim3(imageWidth) , 0, 0, hog, gray, this->imageWidth, this->imageHeight, this->device_mag, this->device_grad );
}
float* CudaHOG::getMagnitudeN(){
float *host_magnitude = new float[ this->imageWidth * this->imageHeight ];
assert( hipMemcpy( host_magnitude, this->device_mag, this->imageWidth * this->imageHeight * sizeof( float ), hipMemcpyDeviceToHost ) == hipSuccess );
return host_magnitude;
}
float* CudaHOG::getGradientN(){
float *host_gradient = new float[ this->imageWidth * this->imageHeight ];
assert( hipMemcpy( host_gradient, this->device_grad, this->imageWidth * this->imageHeight * sizeof( float ), hipMemcpyDeviceToHost ) == hipSuccess );
return host_gradient;
}
void CudaHOG::getFoundLocations( vector<CudaPoint> &founds ){
float **hogVector = new float*[ getWindowsCount() ];
for( int i = 0; i < getWindowsCount(); i++ ){
hogVector[i] = new float[ getHOGVectorSize() ];
}
getHOGVectorN( hogVector );
int px=0,py=0,count=0;
for( int i = 0; i < getBlocksCount(); i++){
for( int j = 0; j < getThreadsPerBlockCount(); j++ ){
px = j * STEP_WIDTH;
py = i * STEP_HEIGHT;
float scoreHOGSVM = classifier->run( hogVector[count++], getHOGVectorSize(), LIGHTSVM );
if( scoreHOGSVM > 0 ){
founds.push_back(CudaPoint(px,py));
}
}
}
delete[] hogVector;
}
void CudaHOG::getHOGVectorN(float **matrix){
matrixCpyDeviceToHost( matrix, device_hog,hogS.pitch_hog, hogSize, getWindowsCount());
}
double *CudaHOG::getScoresN(){
double *scores = new double[ getWindowsCount() ];
assert( hipMemcpy( scores, svm.scores, getWindowsCount() * sizeof(double), hipMemcpyDeviceToHost ) == hipSuccess );
return scores;
}
|
e2c40d82b99001530dced11f089948ce23eb5581.cu
|
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#include <math.h>
#endif
#include <stdio.h>
#include <cuda_runtime.h>
#include <vector>
#include <assert.h>
#include "CudaHOG.h"
#include "gpu_utils.h"
#include "cuPrintf.cu"
#include "persondetectorwt.tcc"
#include "Classifier.h"
using namespace std;
extern const double PERSON_WEIGHT_VEC[];
extern const int PERSON_WEIGHT_VEC_LENGTH;
#define fmin( a, b ) ( (a)<(b)? (a):(b) )
template<typename T> inline bool isnan(T value){
return value != value;
}
#define M_PI_DIV_2 2.5066282746310005024
__constant__ double SVM_VECTOR[PERSON_WEIGHT_VEC_LENGTH];
__constant__ float DESCRIPTOR_GAUSS[ 16 * 16 ];
float hostGaussian( float x, float y, float mx, float my, float sigma ) {
float dist_, dx = x - mx, dy = y - my;
dist_ = sqrt( ( dx * dx ) + ( dy * dy ) );
return exp( -dist_ * dist_ / ( sigma * sigma ) ) / ( M_PI_DIV_2 * sigma );
}
#define STEP_WIDTH 8
#define STEP_HEIGHT 16
CudaHOG::CudaHOG(int imageWidth_, int imageHeight_, int frameW_ , int frameH_ , bool oneThreadWindow_, Classifier *classifier_, bool fullCircle_ ){
assert( cudaFree( 0 ) == cudaSuccess );
this->imageWidth = imageWidth_;
this->imageHeight = imageHeight_;
this->frameW = frameW_;
this->frameH = frameH_;
//Melhores Valores encontrados por Dalal...
this->blockStride = 8;
this->descWidth = 16;
this->descHeight = 16;
this->blockWidth = 2;
this->blockHeight = 2;
this->numHistBins = 9;
this->fullCircle = fullCircle_;
this->oneThreadWindow = oneThreadWindow_;
this->classifier = classifier_;
this->blocks = (( imageHeight - frameH )/ STEP_HEIGHT ) + 1;
this->threads = (( imageWidth - frameW )/ STEP_WIDTH ) + 1;
if( this->fullCircle ) this->histBinSpace = (int)( 360 / this->numHistBins );
else this->histBinSpace = (int)( 180 / this->numHistBins );
this->cellWidth = (int)( (float) descWidth / (float) blockWidth );
///allocate final feature vector
this->numBlocksW = int( (float) ( this->frameW - this->descWidth + this->blockStride ) / (float) this->blockStride );
this->numBlocksH = int( (float) ( this->frameH - this->descHeight + this->blockStride ) / (float) this->blockStride );
this->blockSize = this->numHistBins * this->blockWidth * this->blockHeight;
this->hogSize = this->blockSize * this->numBlocksW * this->numBlocksH;
/* Alocando o vetor de Features HOG */
size_t hogSizeWidth = this->hogSize * sizeof( float );
size_t hogSizeHeight = blocks*threads;
//cudaMalloc( (void**)&this->device_hog, hogSizeAlloc );
assert( cudaMallocPitch( (void**)&this->device_hog, &hogS.pitch_hog, hogSizeWidth, hogSizeHeight) == cudaSuccess );
assert( cudaMemset2D( this->device_hog, hogS.pitch_hog, 0, hogSizeWidth, hogSizeHeight ) == cudaSuccess );
/* Alocando Magnitudes e Gradientes */
size_t magGradSize = sizeof( float ) * this->imageWidth * this->imageHeight ;
assert( cudaMalloc( (void**)&this->device_mag, magGradSize ) == cudaSuccess );
assert( cudaMalloc( (void**)&this->device_grad, magGradSize ) == cudaSuccess );
assert( cudaMemset( this->device_mag, 0, magGradSize ) == cudaSuccess );
assert( cudaMemset( this->device_grad, 0, magGradSize ) == cudaSuccess );
//size_t descriptorWidth = sizeof(float) * this->numHistBins * this->blockHeight * this->blockWidth;
//size_t descriptorHeight= blocks*threads;
//if( oneThreadWindow ){
// assert( cudaMallocPitch( (void**)&device_desc, &hogS.pitch_descriptor, descriptorWidth, descriptorHeight ) == cudaSuccess );
// assert( cudaMemset2D( this->device_desc, hogS.pitch_descriptor, 0, descriptorWidth, descriptorHeight ) == cudaSuccess );
//}
assert( cudaMalloc( (void**)&gray, this->imageHeight * imageWidth ) == cudaSuccess );
svm.linearbias_ = 6.6657914910925990525925044494215;
//assert( cudaMalloc( (void**)&svm.linearwt_, PERSON_WEIGHT_VEC_LENGTH * sizeof(double) ) == cudaSuccess );
cudaMemcpyToSymbol( SVM_VECTOR, PERSON_WEIGHT_VEC, PERSON_WEIGHT_VEC_LENGTH* sizeof(double) );
assert( cudaMalloc( (void**)&svm.scores, blocks*threads * sizeof(double) ) == cudaSuccess );
assert( cudaMemset( svm.scores, 0, blocks*threads * sizeof(double) ) == cudaSuccess );
int* angle;
assert( cudaMalloc( (void**)&angle, this->imageWidth * this->imageHeight * sizeof(int) ) == cudaSuccess );
assert( cudaMemset( angle, 0, this->imageWidth * this->imageHeight * sizeof(int) ) == cudaSuccess );
hogS.angle = angle;
hogS.blockHeight = blockHeight;
hogS.blockSize = blockSize;
hogS.blockStride = blockStride;
hogS.blockWidth = blockWidth;
hogS.cellWidth = cellWidth;
hogS.descHeight = descHeight;
hogS.descWidth = descWidth;
hogS.frameH = frameH;
hogS.frameW = frameW;
hogS.histBinSpace = histBinSpace;
hogS.imageHeight = imageHeight;
hogS.imageWidth = imageWidth;
hogS.numBlocksH = numBlocksH;
hogS.numBlocksW = numBlocksW;
hogS.numHistBins = numHistBins;
hogS.mag = device_mag;
hogS.grad = device_grad;
hogS.descriptor = device_desc;
hogS.hog = device_hog;
hogS.hogSize = hogSize;
hogS.bWHSize = blockWidth * blockHeight;
/* Foi percebido que o cálculo da Gaussiana estava se repetindo para todos os blocos, otimização, criando um vetor estático com os valores
predefinidos da gaussiana */
float gauss[ 16 * 16];
for( int i = 0; i < 16; i++)
for(int j = 0; j < 16; j++){
gauss[i*16 +j] = hostGaussian( float(i), float(j), 8, 8, 8 );
//printf("Gaussian[%d][%d] = %f\n",i,j,gauss[i*16+j]);
}
assert( cudaMemcpyToSymbol( DESCRIPTOR_GAUSS, gauss, 16*16*sizeof(float) ) == cudaSuccess );
//cudaPrintfInit();
}
CudaHOG::~CudaHOG(){
assert( cudaFree( this->hogS.angle ) == cudaSuccess );
assert( cudaFree( this->hogS.mag ) == cudaSuccess );
assert( cudaFree( this->hogS.grad ) == cudaSuccess );
//if( this->oneThreadWindow ) assert( cudaFree( this->hogS.descriptor ) == cudaSuccess );
assert( cudaFree( this->hogS.hog ) == cudaSuccess );
assert( cudaFree( this->gray ) == cudaSuccess );
assert( cudaFree( svm.scores ) == cudaSuccess );
//cudaPrintfEnd();
assert( cudaDeviceReset() == cudaSuccess );
}
__device__ float deviceGaussian( float x, float y, float mx, float my, float sigma ) {
float dist_, dx = x - mx, dy = y - my;
dist_ = sqrt( ( dx * dx ) + ( dy * dy ) );
//dist_ = dx + dy;
return exp( -dist_ * dist_ / ( sigma * sigma ) ) / ( M_PI_DIV_2 * sigma );
}
__device__ void deviceCircularInterpBin( DeviceHOG hog, float value, int curBin, float *outCoef, int *outInterpBin ) {
int halfSize = int( hog.histBinSpace >> 1 );
if( value > halfSize ) { // range: (halfSize, binsize]
*outInterpBin = ( curBin + 1 ) % hog.numHistBins;
*outCoef = 1.0 - ( ( value - halfSize ) / hog.histBinSpace );
} else { // range: [0, halfsize]
*outInterpBin = ( curBin - 1 ) % hog.numHistBins;
if( *outInterpBin < 0 ) *outInterpBin += hog.numHistBins;
*outCoef = ( ( value + halfSize ) / hog.histBinSpace );
}
}
#define DESC_AT_ELEMENT(desc,i,j,k) ( desc[ (( (i) * ( (hog.blockWidth) * (hog.blockHeight) ) + (j) * (hog.blockWidth) + (k)) ) ] )
__device__ void deviceCalculateL2Hys( DeviceHOG hog , float* descriptor) {
float norm = 0.0, eps = 1.0;
//compute norm
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
norm += DESC_AT_ELEMENT( descriptor,i,j,k ) * DESC_AT_ELEMENT( descriptor,i,j,k );
//L2-norm
norm = sqrt( norm + eps );
if ( !norm ) norm = 1.0;
// Normalize and threshold ...
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ ) {
DESC_AT_ELEMENT( descriptor,i,j,k ) /= norm;
if( DESC_AT_ELEMENT( descriptor,i,j,k ) > 0.2 ) DESC_AT_ELEMENT( descriptor,i,j,k ) = 0.2;
}
norm = 0.0;
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
norm += DESC_AT_ELEMENT( descriptor,i,j,k ) * DESC_AT_ELEMENT( descriptor,i,j,k );
norm = sqrt( norm + eps );
if ( !norm ) norm = 1.0;
// and normalize again
for( int i = 0; i < hog.numHistBins; i++ )
for( int j = 0; j < hog.blockHeight; j++ )
for( int k = 0; k < hog.blockWidth; k++ )
DESC_AT_ELEMENT( descriptor,i,j,k ) /= norm;
}
__device__ void deviceWriteToVector( DeviceHOG hog, float *output, float* descriptor ) {
float feat = 0.0;
for( int b = 0; b < hog.numHistBins; b++ ) {
for( int y = 0; y < hog.blockHeight; y++ ) {
for( int x = 0; x < hog.blockWidth; x++ ) {
feat = DESC_AT_ELEMENT( descriptor,b,y,x );
if( isnan( feat ) ) {
feat = 0;
}
*output++ = feat;
}
}
}
}
__device__ void deviceComputeDescriptor( int bx, int by, DeviceHOG hog, float* descriptor ){
float curGrad = 0.0, curMag = 0.0, gWeight = 1.0,
cellWeight = 1.0, binWeight = 0.0,
dist = 0.0;
int angle = 0, iBin = 0,
stepx = 0, stepy = 0,
dx = 0, dy = 0;
for( int y = 0; y < hog.descHeight; y++ ) {
for( int x = 0; x < hog.descWidth; x++ ) {
int offset = ((by+y)*hog.imageWidth) + bx+x;
curGrad = hog.grad[ offset ];
curMag = hog.mag[ offset ];
angle = hog.angle[ offset ];
gWeight = DESCRIPTOR_GAUSS[ y * hog.descWidth + x ];
// histogram bin weighting
iBin = 0; binWeight = 0;
int halfSize = int( hog.histBinSpace >> 1 );
float value = curGrad - hog.histBinSpace * angle;
if( value > halfSize ) { // range: (halfSize, binsize]
iBin = ( angle + 1 ) % hog.numHistBins;
binWeight = 1.0 - ( ( value - halfSize ) / hog.histBinSpace );
} else { // range: [0, halfsize]
iBin = ( angle - 1 ) % hog.numHistBins;
if( iBin < 0 ) iBin += hog.numHistBins;
binWeight = ( ( value + halfSize ) / hog.histBinSpace );
}
int offset1_ = (angle) * ( hog.bWHSize );
int offset2_ = (iBin) * ( hog.bWHSize );
float gW_curMag = gWeight * curMag;
float bin_gW_curMag = gW_curMag * binWeight;
float oneMinusbin_gW_curMag = gW_curMag * ( 1.0 - binWeight );
for( int iy = 0; iy < hog.blockHeight; iy++ ) {
for( int ix = 0; ix < hog.blockWidth; ix++ ) {
dx = x - ( 8 + ( ix << 3 ) );
dy = y - ( 8 + ( iy << 3 ) );
dist = sqrt( (float) ( dx * dx ) + ( dy * dy ) );
//dist = abs(dx + dy);
// //cell weighting
cellWeight = 1.0 - fmin( (float) ( (float) dist / (float) hog.cellWidth ), (float) 1.0 );
int offset1 = (offset1_ + (iy) * (hog.blockWidth) + (ix)) ;
int offset2 = (( offset2_ + (iy) * (hog.blockWidth) + (ix)) );
descriptor[offset1] += bin_gW_curMag * cellWeight;
descriptor[offset2] += oneMinusbin_gW_curMag * cellWeight;
}
}
}
}
}
#define BLOCK_DESCRIPTOR_SIZE 2*2*9
__global__ void cudaSlidingWindow( DeviceHOG hog, LINEAR_CLASSIFY_SVM svm ){
int py = blockIdx.x * STEP_HEIGHT;
int px = threadIdx.x * STEP_WIDTH;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//cuPrintf("LinearBias %f\n",svm.linearbias_);
//float hog_row[ 3780 ];
size_t descriptorSize = sizeof( float ) * hog.numHistBins * hog.blockHeight * hog.blockWidth ;
if( (py < hog.imageHeight - hog.frameH + 1 ) && ( px < hog.imageWidth - hog.frameW + 1) ){
int i = 0;
float descriptor_row[BLOCK_DESCRIPTOR_SIZE];
float* hog_row = (float*)((char*)hog.hog + tid * hog.pitch_hog);
for( int by = 0; by <= hog.frameH - hog.descHeight; by += hog.blockStride ) {
for( int bx = 0; bx <= hog.frameW - hog.descWidth; bx += hog.blockStride ) {
memset( descriptor_row, 0, descriptorSize );
deviceComputeDescriptor( bx+px, by+py, hog, descriptor_row );
deviceCalculateL2Hys( hog ,descriptor_row);
deviceWriteToVector( hog, &hog_row[i], descriptor_row );
i += hog.blockSize;
}
}
double sum = 0;
for (int i= 0; i< hog.hogSize; ++i)
sum += SVM_VECTOR[i]*hog_row[i];
svm.scores[tid] = sum - svm.linearbias_;
//cuPrintf( "Score[%d] - %f\n",tid,sum);
}
}
/* Uma thread por bloco hog */
__global__ void cudaHogThreadBlock( DeviceHOG hog, LINEAR_CLASSIFY_SVM svm ){
int py = blockIdx.y * STEP_HEIGHT;
int px = blockIdx.x * STEP_WIDTH;
int tid = blockIdx.y * gridDim.x + blockIdx.x;
int by = threadIdx.y * hog.blockStride;
int bx = threadIdx.x * hog.blockStride;
int i = (threadIdx.y * blockDim.x + threadIdx.x) * hog.blockSize;
//cuPrintf("LinearBias %f\n",svm.linearbias_);
//float hog_row[ 3780 ];
size_t descriptorSize = sizeof( float ) * BLOCK_DESCRIPTOR_SIZE ;
if( (py < hog.imageHeight - hog.frameH + 1 ) && ( px < hog.imageWidth - hog.frameW + 1) ){
float descriptor_row[BLOCK_DESCRIPTOR_SIZE];
float* hog_row = (float*)((char*)hog.hog + tid * hog.pitch_hog);
memset( descriptor_row, 0, descriptorSize );
deviceComputeDescriptor( bx+px, by+py, hog, descriptor_row );
deviceCalculateL2Hys( hog ,descriptor_row);
deviceWriteToVector( hog, &hog_row[i], descriptor_row );
// }
//}
//__syncthreads();
//if( threadIdx.x == 0 && threadIdx.y == 0 ){
// double sum = 0;
// for (int i= 0; i< hog.hogSize; ++i)
// sum += SVM_VECTOR[i]*hog_row[i];
// svm.scores[tid] = sum - svm.linearbias_;
// cuPrintf( "Score[%d] - %f\n",tid,sum);
//}
}
}
__global__ void showMatrixLinear(double *device){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.x;
int j = threadIdx.x;
cuPrintf(" show matrixLinear[%d] = %f \n",tid,device[tid]);
}
void CudaHOG::extractFeatures( unsigned char *data, int channels ){
assert( cudaDeviceSynchronize() == cudaSuccess );
computeGradients( hogS, data, channels );
//printf(" blocks %d threads %d \n",blocks, threads );
if( this->oneThreadWindow ){
cudaSlidingWindow<<< blocks, threads>>>( hogS , svm );
}else{
int threads_height = (( hogS.frameH - hogS.descHeight )/ hogS.blockStride ) + 1;
int threads_width = (( hogS.frameW - hogS.descWidth )/ hogS.blockStride ) + 1;
dim3 blocks_windows( threads, blocks );
dim3 threads_blocks( threads_width, threads_height );
cudaHogThreadBlock<<< blocks_windows, threads_blocks >>>( hogS, svm );
}
//cudaPrintfDisplay( stdout );
}
/* ComputeGradientsInCuda */
__global__ void cudaComputeGradients( DeviceHOG hog, unsigned char* gray, int width, int height, float *mag,float *grad ){
bool fullCircle = false;
int p1 = 0, p2 = 0, p3 = 0, p4 = 0;
int hor = 0, ver = 0;
float curGrad = 0.0, curMag = 0.0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if( ( blockIdx.x > 1 && blockIdx.x < (height - 1) )
&& ( threadIdx.x > 1 && threadIdx.x < (width - 1) ) ){
if( tid < width * height ){
p1 = (int) gray[ blockIdx.x*blockDim.x + threadIdx.x+1 ];
p2 = (int) gray[ blockIdx.x*blockDim.x + threadIdx.x-1 ];
p3 = (int) gray[ (blockIdx.x-1)*blockDim.x + threadIdx.x ];
p4 = (int) gray[ (blockIdx.x+1)*blockDim.x + threadIdx.x ];
hor = p1 - p2;
ver = p3 - p4;
curMag = (float) sqrt( (double)( hor * hor ) + ( ver * ver ) );
mag[tid] = curMag;
// make sure we don't divide by zero when calculating the gradient orientation
if( curMag > 0.0 ) {
curGrad = ( (float) ( (float) 180 * acos( (float) hor / (float) curMag ) ) / (float) M_PI );
if( !fullCircle )
curGrad = float( (int) curGrad % 180 ); //if unsigned, then range it over 0-180 (pedestrian)
grad[tid]=curGrad;
}else {
grad[tid]=0;
}
int angle = int( curGrad / hog.histBinSpace );
hog.angle[tid] = ( angle >= hog.numHistBins ? hog.numHistBins - 1 : angle );
}
}
}
__global__ void cudaBGR2Gray_(unsigned char* bgr, unsigned char* gray,int width, int height){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if( tid < width * height ){
gray[tid] = ( 2989 * bgr[tid*3+2] + 5870 * bgr[tid*3+1] + 1140 * bgr[tid*3+0] ) / 10000;
}
}
void CudaHOG::computeGradients(DeviceHOG hog, unsigned char* data, int channels){
unsigned char *bgr;
if( channels == 1 ){
assert( cudaMemcpy( gray, data, this->imageHeight * imageWidth, cudaMemcpyHostToDevice ) == cudaSuccess );
}else{
assert( cudaMalloc( (void**)&bgr, this->imageHeight * imageWidth * channels ) == cudaSuccess );
assert( cudaMemcpy( bgr, data, this->imageHeight * imageWidth * channels, cudaMemcpyHostToDevice ) == cudaSuccess );
cudaBGR2Gray_<<< imageHeight, imageWidth >>>( bgr, gray, this->imageWidth, this->imageHeight );
assert( cudaFree( bgr ) == cudaSuccess );
}
cudaComputeGradients<<< imageHeight, imageWidth >>>(hog, gray, this->imageWidth, this->imageHeight, this->device_mag, this->device_grad );
}
float* CudaHOG::getMagnitudeN(){
float *host_magnitude = new float[ this->imageWidth * this->imageHeight ];
assert( cudaMemcpy( host_magnitude, this->device_mag, this->imageWidth * this->imageHeight * sizeof( float ), cudaMemcpyDeviceToHost ) == cudaSuccess );
return host_magnitude;
}
float* CudaHOG::getGradientN(){
float *host_gradient = new float[ this->imageWidth * this->imageHeight ];
assert( cudaMemcpy( host_gradient, this->device_grad, this->imageWidth * this->imageHeight * sizeof( float ), cudaMemcpyDeviceToHost ) == cudaSuccess );
return host_gradient;
}
void CudaHOG::getFoundLocations( vector<CudaPoint> &founds ){
float **hogVector = new float*[ getWindowsCount() ];
for( int i = 0; i < getWindowsCount(); i++ ){
hogVector[i] = new float[ getHOGVectorSize() ];
}
getHOGVectorN( hogVector );
int px=0,py=0,count=0;
for( int i = 0; i < getBlocksCount(); i++){
for( int j = 0; j < getThreadsPerBlockCount(); j++ ){
px = j * STEP_WIDTH;
py = i * STEP_HEIGHT;
float scoreHOGSVM = classifier->run( hogVector[count++], getHOGVectorSize(), LIGHTSVM );
if( scoreHOGSVM > 0 ){
founds.push_back(CudaPoint(px,py));
}
}
}
delete[] hogVector;
}
void CudaHOG::getHOGVectorN(float **matrix){
matrixCpyDeviceToHost( matrix, device_hog,hogS.pitch_hog, hogSize, getWindowsCount());
}
double *CudaHOG::getScoresN(){
double *scores = new double[ getWindowsCount() ];
assert( cudaMemcpy( scores, svm.scores, getWindowsCount() * sizeof(double), cudaMemcpyDeviceToHost ) == cudaSuccess );
return scores;
}
|
47fdf07761e4c1ef6bb69482a9d79ae063b38ab7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VERBOSE
#define VERBOSE 0
#endif
#include "nvcomp/lz4.hpp"
#include "benchmark_common.h"
#include <fstream>
#include <iostream>
#include <string.h>
#include <string>
#include <thrust/device_vector.h>
#include <vector>
using namespace nvcomp;
static size_t compute_batch_size(
const std::vector<std::vector<char>>& data, const size_t chunk_size)
{
size_t batch_size = 0;
for (size_t i = 0; i < data.size(); ++i) {
const size_t num_chunks = (data[i].size() + chunk_size - 1) / chunk_size;
batch_size += num_chunks;
}
return batch_size;
}
std::vector<size_t> compute_chunk_sizes(
const std::vector<std::vector<char>>& data,
const size_t batch_size,
const size_t chunk_size)
{
std::vector<size_t> sizes(batch_size, chunk_size);
size_t offset = 0;
for (size_t i = 0; i < data.size(); ++i) {
const size_t num_chunks = (data[i].size() + chunk_size - 1) / chunk_size;
if (data[i].size() % chunk_size != 0) {
sizes[offset] = data[i].size() % chunk_size;
}
offset += num_chunks;
}
return sizes;
}
class BatchData
{
public:
BatchData(
const std::vector<std::vector<char>>& host_data,
const size_t chunk_size) :
m_ptrs(),
m_sizes(),
m_data(),
m_size(0)
{
m_size = compute_batch_size(host_data, chunk_size);
m_data = thrust::device_vector<uint8_t>(chunk_size * size());
std::vector<void*> uncompressed_ptrs(size());
for (size_t i = 0; i < size(); ++i) {
uncompressed_ptrs[i] = static_cast<void*>(data() + chunk_size * i);
}
m_ptrs = thrust::device_vector<void*>(uncompressed_ptrs);
std::vector<size_t> sizes
= compute_chunk_sizes(host_data, size(), chunk_size);
m_sizes = thrust::device_vector<size_t>(sizes);
// copy data to GPU
size_t offset = 0;
for (size_t i = 0; i < host_data.size(); ++i) {
CUDA_CHECK(hipMemcpy(
uncompressed_ptrs[offset],
host_data[i].data(),
host_data[i].size(),
hipMemcpyHostToDevice));
const size_t num_chunks
= (host_data[i].size() + chunk_size - 1) / chunk_size;
offset += num_chunks;
}
}
BatchData(const size_t max_output_size, const size_t batch_size) :
m_ptrs(),
m_sizes(),
m_data(),
m_size(batch_size)
{
m_data = thrust::device_vector<uint8_t>(max_output_size * size());
std::vector<size_t> sizes(size(), max_output_size);
m_sizes = thrust::device_vector<size_t>(sizes);
std::vector<void*> ptrs(batch_size);
for (size_t i = 0; i < batch_size; ++i) {
ptrs[i] = data() + max_output_size * i;
}
m_ptrs = thrust::device_vector<void*>(ptrs);
}
BatchData(BatchData&& other) = default;
// disable copying
BatchData(const BatchData& other) = delete;
BatchData& operator=(const BatchData& other) = delete;
void** ptrs()
{
return m_ptrs.data().get();
}
size_t* sizes()
{
return m_sizes.data().get();
}
uint8_t* data()
{
return m_data.data().get();
}
size_t size() const
{
return m_size;
}
private:
thrust::device_vector<void*> m_ptrs;
thrust::device_vector<size_t> m_sizes;
thrust::device_vector<uint8_t> m_data;
size_t m_size;
};
// Benchmark performance from the binary data file fname
static void
run_benchmark(const std::vector<std::vector<char>>& data, const bool warmup)
{
size_t total_bytes = 0;
for (const std::vector<char>& part : data) {
total_bytes += part.size();
}
if (!warmup) {
std::cout << "----------" << std::endl;
std::cout << "files: " << data.size() << std::endl;
std::cout << "uncompressed (B): " << total_bytes << std::endl;
}
const size_t chunk_size = 1 << 16;
// build up metadata
BatchData input_data(data, chunk_size);
// compression
nvcompError_t status;
// Compress on the GPU using batched API
size_t comp_temp_bytes;
status = nvcompBatchedLZ4CompressGetTempSize(
input_data.size(), chunk_size, &comp_temp_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4CompressGetTempSize() failed.");
}
void* d_comp_temp;
CUDA_CHECK(hipMalloc(&d_comp_temp, comp_temp_bytes));
size_t max_out_bytes;
status = nvcompBatchedLZ4CompressGetMaxOutputChunkSize(
chunk_size, &max_out_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4GetMaxOutputChunkSize() failed.");
}
BatchData compress_data(max_out_bytes, input_data.size());
hipStream_t stream;
hipStreamCreate(&stream);
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, stream);
status = nvcompBatchedLZ4CompressAsync(
input_data.ptrs(),
input_data.sizes(),
chunk_size,
input_data.size(),
d_comp_temp,
comp_temp_bytes,
compress_data.ptrs(),
compress_data.sizes(),
stream);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4CompressAsync() failed.");
}
hipEventRecord(end, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// free compression memory
hipFree(d_comp_temp);
float ms;
hipEventElapsedTime(&ms, start, end);
if (!warmup) {
// compute compression ratio
std::vector<size_t> compressed_sizes_host(compress_data.size());
hipMemcpy(
compressed_sizes_host.data(),
compress_data.sizes(),
compress_data.size() * sizeof(*compress_data.sizes()),
hipMemcpyDeviceToHost);
size_t comp_bytes = 0;
for (const size_t s : compressed_sizes_host) {
comp_bytes += s;
}
std::cout << "comp_size: " << comp_bytes
<< ", compressed ratio: " << std::fixed << std::setprecision(2)
<< (double)total_bytes / comp_bytes << std::endl;
std::cout << "compression throughput (GB/s): "
<< (double)total_bytes / (1.0e6 * ms) << std::endl;
}
// overwrite our uncompressed data so we can test for correctness
CUDA_CHECK(hipMemset(input_data.data(), 0, chunk_size * input_data.size()));
// LZ4 decompression
size_t decomp_temp_bytes;
status = nvcompBatchedLZ4DecompressGetTempSize(
compress_data.size(), chunk_size, &decomp_temp_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4DecompressGetTempSize() failed.");
}
void* d_decomp_temp;
CUDA_CHECK(hipMalloc(&d_decomp_temp, decomp_temp_bytes));
hipEventRecord(start, stream);
status = nvcompBatchedLZ4DecompressAsync(
compress_data.ptrs(),
compress_data.sizes(),
input_data.sizes(),
chunk_size,
compress_data.size(),
d_decomp_temp,
decomp_temp_bytes,
input_data.ptrs(),
stream);
benchmark_assert(
status == nvcompSuccess,
"nvcompBatchedLZ4DecompressAsync() not successful");
hipEventRecord(end, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
hipEventElapsedTime(&ms, start, end);
if (!warmup) {
std::cout << "decompression throughput (GB/s): "
<< (double)total_bytes / (1.0e6 * ms) << std::endl;
}
hipFree(d_decomp_temp);
hipStreamDestroy(stream);
(void)status;
}
std::vector<char> readFile(const std::string& filename)
{
std::vector<char> buffer(4096);
std::vector<char> host_data;
std::ifstream fin(filename, std::ifstream::binary);
fin.exceptions(std::ifstream::failbit | std::ifstream::badbit);
size_t num;
do {
num = fin.readsome(buffer.data(), buffer.size());
host_data.insert(host_data.end(), buffer.begin(), buffer.begin() + num);
} while (num > 0);
return host_data;
}
std::vector<std::vector<char>>
multi_file(const std::vector<std::string>& filenames)
{
std::vector<std::vector<char>> split_data;
for (auto const& filename : filenames) {
split_data.emplace_back(readFile(filename));
}
return split_data;
}
int main(int argc, char* argv[])
{
std::vector<std::string> file_names(argc - 1);
if (argc == 1) {
std::cerr << "Must specify at least one file." << std::endl;
return 1;
}
// if `-f` is speficieid, assume single file mode
if (strcmp(argv[1], "-f") == 0) {
if (argc == 2) {
std::cerr << "Missing file name following '-f'" << std::endl;
return 1;
} else if (argc > 3) {
std::cerr << "Unknown extra arguments with '-f'." << std::endl;
return 1;
}
file_names = {argv[2]};
} else {
// multi-file mode
for (int i = 1; i < argc; ++i) {
file_names[i - 1] = argv[i];
}
}
auto data = multi_file(file_names);
// one warmup to allow cuda to initialize
run_benchmark(data, true);
// second run to report times
run_benchmark(data, false);
return 0;
}
|
47fdf07761e4c1ef6bb69482a9d79ae063b38ab7.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef VERBOSE
#define VERBOSE 0
#endif
#include "nvcomp/lz4.hpp"
#include "benchmark_common.h"
#include <fstream>
#include <iostream>
#include <string.h>
#include <string>
#include <thrust/device_vector.h>
#include <vector>
using namespace nvcomp;
static size_t compute_batch_size(
const std::vector<std::vector<char>>& data, const size_t chunk_size)
{
size_t batch_size = 0;
for (size_t i = 0; i < data.size(); ++i) {
const size_t num_chunks = (data[i].size() + chunk_size - 1) / chunk_size;
batch_size += num_chunks;
}
return batch_size;
}
std::vector<size_t> compute_chunk_sizes(
const std::vector<std::vector<char>>& data,
const size_t batch_size,
const size_t chunk_size)
{
std::vector<size_t> sizes(batch_size, chunk_size);
size_t offset = 0;
for (size_t i = 0; i < data.size(); ++i) {
const size_t num_chunks = (data[i].size() + chunk_size - 1) / chunk_size;
if (data[i].size() % chunk_size != 0) {
sizes[offset] = data[i].size() % chunk_size;
}
offset += num_chunks;
}
return sizes;
}
class BatchData
{
public:
BatchData(
const std::vector<std::vector<char>>& host_data,
const size_t chunk_size) :
m_ptrs(),
m_sizes(),
m_data(),
m_size(0)
{
m_size = compute_batch_size(host_data, chunk_size);
m_data = thrust::device_vector<uint8_t>(chunk_size * size());
std::vector<void*> uncompressed_ptrs(size());
for (size_t i = 0; i < size(); ++i) {
uncompressed_ptrs[i] = static_cast<void*>(data() + chunk_size * i);
}
m_ptrs = thrust::device_vector<void*>(uncompressed_ptrs);
std::vector<size_t> sizes
= compute_chunk_sizes(host_data, size(), chunk_size);
m_sizes = thrust::device_vector<size_t>(sizes);
// copy data to GPU
size_t offset = 0;
for (size_t i = 0; i < host_data.size(); ++i) {
CUDA_CHECK(cudaMemcpy(
uncompressed_ptrs[offset],
host_data[i].data(),
host_data[i].size(),
cudaMemcpyHostToDevice));
const size_t num_chunks
= (host_data[i].size() + chunk_size - 1) / chunk_size;
offset += num_chunks;
}
}
BatchData(const size_t max_output_size, const size_t batch_size) :
m_ptrs(),
m_sizes(),
m_data(),
m_size(batch_size)
{
m_data = thrust::device_vector<uint8_t>(max_output_size * size());
std::vector<size_t> sizes(size(), max_output_size);
m_sizes = thrust::device_vector<size_t>(sizes);
std::vector<void*> ptrs(batch_size);
for (size_t i = 0; i < batch_size; ++i) {
ptrs[i] = data() + max_output_size * i;
}
m_ptrs = thrust::device_vector<void*>(ptrs);
}
BatchData(BatchData&& other) = default;
// disable copying
BatchData(const BatchData& other) = delete;
BatchData& operator=(const BatchData& other) = delete;
void** ptrs()
{
return m_ptrs.data().get();
}
size_t* sizes()
{
return m_sizes.data().get();
}
uint8_t* data()
{
return m_data.data().get();
}
size_t size() const
{
return m_size;
}
private:
thrust::device_vector<void*> m_ptrs;
thrust::device_vector<size_t> m_sizes;
thrust::device_vector<uint8_t> m_data;
size_t m_size;
};
// Benchmark performance from the binary data file fname
static void
run_benchmark(const std::vector<std::vector<char>>& data, const bool warmup)
{
size_t total_bytes = 0;
for (const std::vector<char>& part : data) {
total_bytes += part.size();
}
if (!warmup) {
std::cout << "----------" << std::endl;
std::cout << "files: " << data.size() << std::endl;
std::cout << "uncompressed (B): " << total_bytes << std::endl;
}
const size_t chunk_size = 1 << 16;
// build up metadata
BatchData input_data(data, chunk_size);
// compression
nvcompError_t status;
// Compress on the GPU using batched API
size_t comp_temp_bytes;
status = nvcompBatchedLZ4CompressGetTempSize(
input_data.size(), chunk_size, &comp_temp_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4CompressGetTempSize() failed.");
}
void* d_comp_temp;
CUDA_CHECK(cudaMalloc(&d_comp_temp, comp_temp_bytes));
size_t max_out_bytes;
status = nvcompBatchedLZ4CompressGetMaxOutputChunkSize(
chunk_size, &max_out_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4GetMaxOutputChunkSize() failed.");
}
BatchData compress_data(max_out_bytes, input_data.size());
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, stream);
status = nvcompBatchedLZ4CompressAsync(
input_data.ptrs(),
input_data.sizes(),
chunk_size,
input_data.size(),
d_comp_temp,
comp_temp_bytes,
compress_data.ptrs(),
compress_data.sizes(),
stream);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4CompressAsync() failed.");
}
cudaEventRecord(end, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// free compression memory
cudaFree(d_comp_temp);
float ms;
cudaEventElapsedTime(&ms, start, end);
if (!warmup) {
// compute compression ratio
std::vector<size_t> compressed_sizes_host(compress_data.size());
cudaMemcpy(
compressed_sizes_host.data(),
compress_data.sizes(),
compress_data.size() * sizeof(*compress_data.sizes()),
cudaMemcpyDeviceToHost);
size_t comp_bytes = 0;
for (const size_t s : compressed_sizes_host) {
comp_bytes += s;
}
std::cout << "comp_size: " << comp_bytes
<< ", compressed ratio: " << std::fixed << std::setprecision(2)
<< (double)total_bytes / comp_bytes << std::endl;
std::cout << "compression throughput (GB/s): "
<< (double)total_bytes / (1.0e6 * ms) << std::endl;
}
// overwrite our uncompressed data so we can test for correctness
CUDA_CHECK(cudaMemset(input_data.data(), 0, chunk_size * input_data.size()));
// LZ4 decompression
size_t decomp_temp_bytes;
status = nvcompBatchedLZ4DecompressGetTempSize(
compress_data.size(), chunk_size, &decomp_temp_bytes);
if (status != nvcompSuccess) {
throw std::runtime_error("nvcompBatchedLZ4DecompressGetTempSize() failed.");
}
void* d_decomp_temp;
CUDA_CHECK(cudaMalloc(&d_decomp_temp, decomp_temp_bytes));
cudaEventRecord(start, stream);
status = nvcompBatchedLZ4DecompressAsync(
compress_data.ptrs(),
compress_data.sizes(),
input_data.sizes(),
chunk_size,
compress_data.size(),
d_decomp_temp,
decomp_temp_bytes,
input_data.ptrs(),
stream);
benchmark_assert(
status == nvcompSuccess,
"nvcompBatchedLZ4DecompressAsync() not successful");
cudaEventRecord(end, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
cudaEventElapsedTime(&ms, start, end);
if (!warmup) {
std::cout << "decompression throughput (GB/s): "
<< (double)total_bytes / (1.0e6 * ms) << std::endl;
}
cudaFree(d_decomp_temp);
cudaStreamDestroy(stream);
(void)status;
}
std::vector<char> readFile(const std::string& filename)
{
std::vector<char> buffer(4096);
std::vector<char> host_data;
std::ifstream fin(filename, std::ifstream::binary);
fin.exceptions(std::ifstream::failbit | std::ifstream::badbit);
size_t num;
do {
num = fin.readsome(buffer.data(), buffer.size());
host_data.insert(host_data.end(), buffer.begin(), buffer.begin() + num);
} while (num > 0);
return host_data;
}
std::vector<std::vector<char>>
multi_file(const std::vector<std::string>& filenames)
{
std::vector<std::vector<char>> split_data;
for (auto const& filename : filenames) {
split_data.emplace_back(readFile(filename));
}
return split_data;
}
int main(int argc, char* argv[])
{
std::vector<std::string> file_names(argc - 1);
if (argc == 1) {
std::cerr << "Must specify at least one file." << std::endl;
return 1;
}
// if `-f` is speficieid, assume single file mode
if (strcmp(argv[1], "-f") == 0) {
if (argc == 2) {
std::cerr << "Missing file name following '-f'" << std::endl;
return 1;
} else if (argc > 3) {
std::cerr << "Unknown extra arguments with '-f'." << std::endl;
return 1;
}
file_names = {argv[2]};
} else {
// multi-file mode
for (int i = 1; i < argc; ++i) {
file_names[i - 1] = argv[i];
}
}
auto data = multi_file(file_names);
// one warmup to allow cuda to initialize
run_benchmark(data, true);
// second run to report times
run_benchmark(data, false);
return 0;
}
|
a883e8ecf7739a6793a84fda738a523b40f6f5d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cudaLib.h"
#include "../Options/utils.h"
/*
* Constructor for CudaLib
*/
CudaLib::CudaLib(MonteCarlo* mc){
/*
* Calcul du nombre de thread max disponible
*/
int deviceCount;
hipGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if(dev == 0){
this->maxDevice = deviceProp.maxThreadsDim[dev];
}
}
/*
* Gestion du "Strike" des options
*/
int idOpt = mc->opt_->id_;
if(idOpt == 1){ //Option Asian
this->strike = (dynamic_cast<OptionAsian *>(mc->opt_))->strike_;
}
else if(idOpt == 2){ //Option Barrire
this->strike = (dynamic_cast<OptionBarrier *>(mc->opt_))->strike_;
}
else if(idOpt == 3){ //Option Barrire Basse
this->strike = (dynamic_cast<OptionBarrierLow *>(mc->opt_))->strike_;
}
else if(idOpt == 4){ //Option Barrire Haute
this->strike = (dynamic_cast<OptionBarrierUp *>(mc->opt_))->strike_;
}
else if(idOpt == 5){ //Option Basket
this->strike = (dynamic_cast<OptionBasket *>(mc->opt_))->strike_;
}
else if(idOpt == 6){ //Option Performance
//Cette option n'a pas de "Strike"
this->strike = 0.0;
}
else{ //Ne devrait jamais arriver
exit(EXIT_FAILURE);
}
/* On aligne le nombre de samples sur un multiple de maxDevice */
int nbTourModifie = (int)(mc->samples_/this->maxDevice) * this->maxDevice;
mc->samples_ = nbTourModifie;
/* Allocation et chargement des objets de type pnl dans la mmoire GPU */
this->allocMonteCarlo(mc);
this->memcpyMonteCarlo(mc);
}
/*
* Destructor for CudaLib
*/
CudaLib::~CudaLib(){
hipFree(this->trend);
hipFree(this->sigma);
hipFree(this->spot);
hipFree(this->chol);
hipFree(this->tabPath);
hipFree(this->tabPrice);
hipFree(this->tabVar);
if(this->payoffCoeff != NULL){
hipFree(this->payoffCoeff);
}
if(this->lowerBarrier != NULL){
hipFree(this->lowerBarrier);
}
if(this->upperBarrier != NULL){
hipFree(this->upperBarrier);
}
}
void CudaLib::allocOption(Option* opt){
hipError_t err;
//Allocation du tableau du vecteur PayoffCoeff
err = hipMalloc((void **) &(this->payoffCoeff) ,sizeof(double)*opt->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Allocation du tableau du vecteur pour la barrire basse
err = hipMalloc((void **) &(this->lowerBarrier) ,sizeof(double)*opt->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Allocation du tableau du vecteur pour la barrire haute
err = hipMalloc((void **) &(this->upperBarrier) ,sizeof(double)*opt->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::allocBS(BS* bs){
hipError_t err;
//*trend array
err = hipMalloc( (void**) &(this->trend), sizeof(float)*bs->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*sigma_ array
err = hipMalloc( (void**) &(this->sigma), sizeof(float)*bs->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*spot_ array
err = hipMalloc( (void**) &(this->spot), sizeof(float)*bs->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*chol_ array
err = hipMalloc( (void**) &(this->chol), sizeof(float)*bs->chol->m*bs->chol->n);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::allocMonteCarlo(MonteCarlo* mc){
//Allocation de l'option
allocOption(mc->opt_);
//Allocation du modle de Black&Scholes
allocBS(mc->mod_);
//Allocation du tableau contenant une une matrice path pour chaque device
hipError_t err;
//tabPath
err = hipMalloc( (void**) &(this->tabPath), sizeof(float)*mc->samples_*(mc->opt_->TimeSteps_ + 1)*mc->mod_->size_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//tabPrice
err = hipMalloc( (void**) &(this->tabPrice), sizeof(float)*mc->samples_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//tabVar
err = hipMalloc( (void**) &(this->tabVar), sizeof(float)*mc->samples_);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyOption(Option* opt){
hipError_t err;
int idOpt = opt->id_;
if(idOpt == 1){ //Option Asian
//Rien copier pour une telle option
}
else if(idOpt == 2){ //Option Barrire
OptionBarrier *barrier = dynamic_cast<OptionBarrier *>(opt);
//Chargement en mmoire du tableau du vecteur PayoffCoeff
err = hipMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrier->payoffCoeff_->array,barrier->payoffCoeff_->size), barrier->payoffCoeff_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mmoire du tableau du vecteur LowerBarrier
err = hipMemcpy(this->lowerBarrier, utils::convertPnlVectToFloat(barrier->lowerBarrier_->array,barrier->lowerBarrier_->size), barrier->lowerBarrier_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mmoire du tableau du vecteur UpperBarrier
err = hipMemcpy(this->upperBarrier, utils::convertPnlVectToFloat(barrier->upperBarrier_->array,barrier->upperBarrier_->size), barrier->upperBarrier_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 3){ //Option Barrire basse
OptionBarrierLow *barrierLow = dynamic_cast<OptionBarrierLow *>(opt);
//Chargement en mmoire du tableau du vecteur PayoffCoeff
err = hipMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrierLow->payoffCoeff_->array,barrierLow->payoffCoeff_->size), barrierLow->payoffCoeff_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mmoire du tableau du vecteur LowerBarrier
err = hipMemcpy(this->lowerBarrier, utils::convertPnlVectToFloat(barrierLow->lowerBarrier_->array,barrierLow->lowerBarrier_->size), barrierLow->lowerBarrier_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 4){ //Option Barrire haute
OptionBarrierUp *barrierUp = dynamic_cast<OptionBarrierUp *>(opt);
//Chargement en mmoire du tableau du vecteur PayoffCoeff
err = hipMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrierUp->payoffCoeff_->array,barrierUp->payoffCoeff_->size), barrierUp->payoffCoeff_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mmoire du tableau du vecteur UpperBarrier
err = hipMemcpy(this->upperBarrier, utils::convertPnlVectToFloat(barrierUp->upperBarrier_->array,barrierUp->upperBarrier_->size), barrierUp->upperBarrier_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 5){ //Option Basket
OptionBasket *basket = dynamic_cast<OptionBasket *>(opt);
//Chargement en mmoire du tableau du vecteur PayoffCoeff
err = hipMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(basket->payoffCoeff_->array,basket->payoffCoeff_->size), basket->payoffCoeff_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 6){ //Option Performance
OptionPerformance *performance = dynamic_cast<OptionPerformance *>(opt);
//Chargement en mmoire du tableau du vecteur PayoffCoeff
err = hipMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(performance->payoffCoeff_->array,performance->payoffCoeff_->size), performance->payoffCoeff_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else{ //Cette id n'existe pas, ne doit jamais arriver
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyBS(BS* bs){
hipError_t err;
//*trend array
err = hipMemcpy(this->trend, utils::convertPnlVectToFloat(bs->trend->array,bs->size_), bs->trend->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*sigma_ array
err = hipMemcpy(this->sigma, utils::convertPnlVectToFloat(bs->sigma_->array,bs->size_), bs->sigma_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*spot_ array
err = hipMemcpy(this->spot, utils::convertPnlVectToFloat(bs->spot_->array,bs->size_), bs->spot_->size*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*chol_ array
err = hipMemcpy(this->chol, utils::convertPnlVectToFloat(bs->chol->array,bs->chol->m*bs->chol->n), bs->chol->m*bs->chol->n*sizeof(float), hipMemcpyHostToDevice);
if(err != hipSuccess){
printf("%s in %s at line %d\n", hipGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyMonteCarlo(MonteCarlo* mc){
//Chargement en mmoire de l'option
memcpyOption(mc->opt_);
//Chargement en mmoire du modle de Black&Scholes
memcpyBS(mc->mod_);
}
|
a883e8ecf7739a6793a84fda738a523b40f6f5d0.cu
|
#include "cudaLib.h"
#include "../Options/utils.h"
/*
* Constructor for CudaLib
*/
CudaLib::CudaLib(MonteCarlo* mc){
/*
* Calcul du nombre de thread max disponible
*/
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if(dev == 0){
this->maxDevice = deviceProp.maxThreadsDim[dev];
}
}
/*
* Gestion du "Strike" des options
*/
int idOpt = mc->opt_->id_;
if(idOpt == 1){ //Option Asian
this->strike = (dynamic_cast<OptionAsian *>(mc->opt_))->strike_;
}
else if(idOpt == 2){ //Option Barrière
this->strike = (dynamic_cast<OptionBarrier *>(mc->opt_))->strike_;
}
else if(idOpt == 3){ //Option Barrière Basse
this->strike = (dynamic_cast<OptionBarrierLow *>(mc->opt_))->strike_;
}
else if(idOpt == 4){ //Option Barrière Haute
this->strike = (dynamic_cast<OptionBarrierUp *>(mc->opt_))->strike_;
}
else if(idOpt == 5){ //Option Basket
this->strike = (dynamic_cast<OptionBasket *>(mc->opt_))->strike_;
}
else if(idOpt == 6){ //Option Performance
//Cette option n'a pas de "Strike"
this->strike = 0.0;
}
else{ //Ne devrait jamais arriver
exit(EXIT_FAILURE);
}
/* On aligne le nombre de samples sur un multiple de maxDevice */
int nbTourModifie = (int)(mc->samples_/this->maxDevice) * this->maxDevice;
mc->samples_ = nbTourModifie;
/* Allocation et chargement des objets de type pnl dans la mémoire GPU */
this->allocMonteCarlo(mc);
this->memcpyMonteCarlo(mc);
}
/*
* Destructor for CudaLib
*/
CudaLib::~CudaLib(){
cudaFree(this->trend);
cudaFree(this->sigma);
cudaFree(this->spot);
cudaFree(this->chol);
cudaFree(this->tabPath);
cudaFree(this->tabPrice);
cudaFree(this->tabVar);
if(this->payoffCoeff != NULL){
cudaFree(this->payoffCoeff);
}
if(this->lowerBarrier != NULL){
cudaFree(this->lowerBarrier);
}
if(this->upperBarrier != NULL){
cudaFree(this->upperBarrier);
}
}
void CudaLib::allocOption(Option* opt){
cudaError_t err;
//Allocation du tableau du vecteur PayoffCoeff
err = cudaMalloc((void **) &(this->payoffCoeff) ,sizeof(double)*opt->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Allocation du tableau du vecteur pour la barrière basse
err = cudaMalloc((void **) &(this->lowerBarrier) ,sizeof(double)*opt->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Allocation du tableau du vecteur pour la barrière haute
err = cudaMalloc((void **) &(this->upperBarrier) ,sizeof(double)*opt->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::allocBS(BS* bs){
cudaError_t err;
//*trend array
err = cudaMalloc( (void**) &(this->trend), sizeof(float)*bs->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*sigma_ array
err = cudaMalloc( (void**) &(this->sigma), sizeof(float)*bs->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*spot_ array
err = cudaMalloc( (void**) &(this->spot), sizeof(float)*bs->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*chol_ array
err = cudaMalloc( (void**) &(this->chol), sizeof(float)*bs->chol->m*bs->chol->n);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::allocMonteCarlo(MonteCarlo* mc){
//Allocation de l'option
allocOption(mc->opt_);
//Allocation du modèle de Black&Scholes
allocBS(mc->mod_);
//Allocation du tableau contenant une une matrice path pour chaque device
cudaError_t err;
//tabPath
err = cudaMalloc( (void**) &(this->tabPath), sizeof(float)*mc->samples_*(mc->opt_->TimeSteps_ + 1)*mc->mod_->size_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//tabPrice
err = cudaMalloc( (void**) &(this->tabPrice), sizeof(float)*mc->samples_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//tabVar
err = cudaMalloc( (void**) &(this->tabVar), sizeof(float)*mc->samples_);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyOption(Option* opt){
cudaError_t err;
int idOpt = opt->id_;
if(idOpt == 1){ //Option Asian
//Rien à copier pour une telle option
}
else if(idOpt == 2){ //Option Barrière
OptionBarrier *barrier = dynamic_cast<OptionBarrier *>(opt);
//Chargement en mémoire du tableau du vecteur PayoffCoeff
err = cudaMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrier->payoffCoeff_->array,barrier->payoffCoeff_->size), barrier->payoffCoeff_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mémoire du tableau du vecteur LowerBarrier
err = cudaMemcpy(this->lowerBarrier, utils::convertPnlVectToFloat(barrier->lowerBarrier_->array,barrier->lowerBarrier_->size), barrier->lowerBarrier_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mémoire du tableau du vecteur UpperBarrier
err = cudaMemcpy(this->upperBarrier, utils::convertPnlVectToFloat(barrier->upperBarrier_->array,barrier->upperBarrier_->size), barrier->upperBarrier_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 3){ //Option Barrière basse
OptionBarrierLow *barrierLow = dynamic_cast<OptionBarrierLow *>(opt);
//Chargement en mémoire du tableau du vecteur PayoffCoeff
err = cudaMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrierLow->payoffCoeff_->array,barrierLow->payoffCoeff_->size), barrierLow->payoffCoeff_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mémoire du tableau du vecteur LowerBarrier
err = cudaMemcpy(this->lowerBarrier, utils::convertPnlVectToFloat(barrierLow->lowerBarrier_->array,barrierLow->lowerBarrier_->size), barrierLow->lowerBarrier_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 4){ //Option Barrière haute
OptionBarrierUp *barrierUp = dynamic_cast<OptionBarrierUp *>(opt);
//Chargement en mémoire du tableau du vecteur PayoffCoeff
err = cudaMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(barrierUp->payoffCoeff_->array,barrierUp->payoffCoeff_->size), barrierUp->payoffCoeff_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//Chargement en mémoire du tableau du vecteur UpperBarrier
err = cudaMemcpy(this->upperBarrier, utils::convertPnlVectToFloat(barrierUp->upperBarrier_->array,barrierUp->upperBarrier_->size), barrierUp->upperBarrier_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 5){ //Option Basket
OptionBasket *basket = dynamic_cast<OptionBasket *>(opt);
//Chargement en mémoire du tableau du vecteur PayoffCoeff
err = cudaMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(basket->payoffCoeff_->array,basket->payoffCoeff_->size), basket->payoffCoeff_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else if(idOpt == 6){ //Option Performance
OptionPerformance *performance = dynamic_cast<OptionPerformance *>(opt);
//Chargement en mémoire du tableau du vecteur PayoffCoeff
err = cudaMemcpy(this->payoffCoeff, utils::convertPnlVectToFloat(performance->payoffCoeff_->array,performance->payoffCoeff_->size), performance->payoffCoeff_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
else{ //Cette id n'existe pas, ne doit jamais arriver
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyBS(BS* bs){
cudaError_t err;
//*trend array
err = cudaMemcpy(this->trend, utils::convertPnlVectToFloat(bs->trend->array,bs->size_), bs->trend->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*sigma_ array
err = cudaMemcpy(this->sigma, utils::convertPnlVectToFloat(bs->sigma_->array,bs->size_), bs->sigma_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*spot_ array
err = cudaMemcpy(this->spot, utils::convertPnlVectToFloat(bs->spot_->array,bs->size_), bs->spot_->size*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
//*chol_ array
err = cudaMemcpy(this->chol, utils::convertPnlVectToFloat(bs->chol->array,bs->chol->m*bs->chol->n), bs->chol->m*bs->chol->n*sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
void CudaLib::memcpyMonteCarlo(MonteCarlo* mc){
//Chargement en mémoire de l'option
memcpyOption(mc->opt_);
//Chargement en mémoire du modèle de Black&Scholes
memcpyBS(mc->mod_);
}
|
40ef72f5f8080aac8cfb2575ad5682264343faac.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
// This file is used to check the version of NCCL detected.
#include <tuple>
#include <rccl.h>
std::tuple<int, int> get_nccl_version() {
return { int(NCCL_MAJOR), int(NCCL_MINOR) };
}
|
40ef72f5f8080aac8cfb2575ad5682264343faac.cu
|
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
// This file is used to check the version of NCCL detected.
#include <tuple>
#include <nccl.h>
std::tuple<int, int> get_nccl_version() {
return { int(NCCL_MAJOR), int(NCCL_MINOR) };
}
|
13d0844eaaf33f4b5dbfd216872be1f033d3fc32.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cross_entropy_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "../cross_entropy_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void cross_entropy_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
if (actual_val > 0.0F)
{
err -= actual_val * __logf(max(predicted_val, 1.0e-20F));
}
if (actual_val < 1.0F)
{
err -= (1.0F - actual_val) * __logf(max(1.0F - predicted_val, 1.0e-20F));
}
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
void cross_entropy_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
hipLaunchKernelGGL(( cross_entropy_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void cross_entropy_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const cross_entropy_layer> layer_derived = std::dynamic_pointer_cast<const cross_entropy_layer>(layer_schema);
scale = layer_derived->scale;
}
int cross_entropy_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
13d0844eaaf33f4b5dbfd216872be1f033d3fc32.cu
|
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cross_entropy_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "../cross_entropy_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
__global__ void cross_entropy_kernel(
float * __restrict output,
const float * __restrict predicted,
const float * __restrict actual,
const float * __restrict scale_mask,
int input_feature_map_count,
int elem_count_per_feature_map,
float scale,
int entry_count)
{
int feature_map_id = threadIdx.x;
int neuron_id = blockIdx.x;
int entry_id = blockIdx.y;
int threadblock_size = blockDim.x;
float err = 0.0F;
int output_offset = entry_id * elem_count_per_feature_map + neuron_id;
float mask = 1.0F;
if (scale_mask)
mask = scale_mask[output_offset];
int thread_id = threadIdx.x;
if (mask != 0.0F)
{
int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id;
while (feature_map_id < input_feature_map_count)
{
float actual_val = actual[input_offset];
float predicted_val = predicted[input_offset];
if (actual_val > 0.0F)
{
err -= actual_val * __logf(max(predicted_val, 1.0e-20F));
}
if (actual_val < 1.0F)
{
err -= (1.0F - actual_val) * __logf(max(1.0F - predicted_val, 1.0e-20F));
}
feature_map_id += threadblock_size;
input_offset += threadblock_size * elem_count_per_feature_map;
}
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
int warp_count = threadblock_size >> 5;
if (warp_count > 1)
{
if (lane_id == 0)
arr_sh[thread_id >> 5] = err;
__syncthreads();
if (thread_id < 32)
{
err = 0.0F;
if (thread_id < warp_count)
err = arr_sh[thread_id];
#pragma unroll
for(int tx = 4; tx > 0; tx >>= 1)
err += __shfl_down(err, tx);
}
}
}
if (thread_id == 0)
output[output_offset] = err * (mask * scale);
}
void cross_entropy_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count);
const float * scale_mask = 0;
if (input_buffers.size() > 2)
scale_mask = *input_buffers[2];
int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float);
cross_entropy_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
scale_mask,
input_configuration_specific_list[0].feature_map_count,
input_elem_count_per_feature_map_list[0],
scale,
entry_count);
}
void cross_entropy_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const cross_entropy_layer> layer_derived = std::dynamic_pointer_cast<const cross_entropy_layer>(layer_schema);
scale = layer_derived->scale;
}
int cross_entropy_layer_tester_cuda::get_threadblock_size(int input_feature_map_count)
{
int threadblock_size;
if (input_feature_map_count < 256)
{
threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (input_feature_map_count + 256 - 1) / 256;
threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
}
}
|
a81cca42cab894b93f5a6203e0b7ccc0fe6f2c43.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_5000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_5000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_5000::PATH_TO_CENTROIDS;
const auto LOG_NAIVE = mean_shift::cuda::bench_3d::case_5000::LOG_NAIVE;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_5000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_5000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_5000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_5000::BLOCKS;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_naive(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
hipMalloc(&dev_data, data_bytes);
hipMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
hipLaunchKernelGGL(( mean_shift_naive), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next);
hipDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_NAIVE, ',');
return 0;
}
|
a81cca42cab894b93f5a6203e0b7ccc0fe6f2c43.cu
|
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <cuda.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_5000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_5000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_5000::PATH_TO_CENTROIDS;
const auto LOG_NAIVE = mean_shift::cuda::bench_3d::case_5000::LOG_NAIVE;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_5000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_5000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_5000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_5000::BLOCKS;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_naive(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
cudaMalloc(&dev_data, data_bytes);
cudaMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
mean_shift_naive<<<BLOCKS, THREADS>>>(dev_data, dev_data_next);
cudaDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_NAIVE, ',');
return 0;
}
|
464591ea885301efc036524a7bf9bcc787ac59b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "markEmbedding.cuh"
__global__ void kernelMarkEmbedding(cHistory **dH,struct_Q *device_arr_Q,int lastColumn,int n,unsigned int maxOfVer,int *d_O,int *d_N){
int i= blockDim.x*blockIdx.x + threadIdx.x; //mi thread i s x l mt embedding
if(i<n){
int vid = device_arr_Q[lastColumn]._d_arr_Q[i].vid; // T ct Q cui cng, mi thread i s x l embedding th i
int indexOfFirstVertexInGraph = vid-(vid%maxOfVer);
int toVid = vid;//nh to ca cnh thuc embedding
int idxOfdH= (vid%maxOfVer);
dH[i]->d_arr_HO[idxOfdH]=2;
int prevQ=device_arr_Q[lastColumn]._prevQ;
int newi=device_arr_Q[lastColumn]._d_arr_Q[i].idx;
while (true)
{
//printf("\nd_arr_Q[%d]: (prevQ:%d, idx:%d,vid:%d)",prevQ,device_arr_Q[prevQ]._prevQ,device_arr_Q[prevQ]._d_arr_Q[newi].idx,device_arr_Q[prevQ]._d_arr_Q[newi].vid);
vid = device_arr_Q[prevQ]._d_arr_Q[newi].vid;
int fromVid=vid; //nh from ca cnh thuc embedding
int idxEdge = d_O[vid]-d_O[indexOfFirstVertexInGraph]; //v tr cnh cn cp nht c khi to bng gi tr index ca vid ang xt tr i gi tr index ca nh u tin trong th .
int indexOfdN=d_O[fromVid];
while (d_N[indexOfdN]!=toVid){
idxEdge=idxEdge+1;
indexOfdN++;
}
int fromVidR=toVid;
int toVidR=fromVid;
int indexOfEdgeR=d_O[fromVidR]-d_O[indexOfFirstVertexInGraph];
indexOfdN=d_O[fromVidR];
while(d_N[indexOfdN]!=toVidR){
indexOfEdgeR++;
indexOfdN++;
}
//Nu khng phi l nh u tin th phi cng vo idxEdge mt lng bng tng bc ca cc nh trc
//Tng bc ca cc nh trc chnh bng
idxOfdH = (vid%maxOfVer); //nh du nh thuc Embedding
dH[i]->d_arr_HO[idxOfdH]=2;
dH[i]->d_arr_HLN[idxEdge]=2;//nh du cnh thuc Embedding. v y l n th v hng nn cnh AB cng bng cnh BA,do ta phi nh du cnh BA cng thuc embedding.
dH[i]->d_arr_HLN[indexOfEdgeR]=2;
if(device_arr_Q[prevQ]._prevQ==-1) return; //nu l ct Q u tin th dng li v duyt xong embedding
newi=device_arr_Q[prevQ]._d_arr_Q[i].idx; //ngc li th ly index ca ct Q pha trc
prevQ=device_arr_Q[prevQ]._prevQ; //Ly Q pha trc
toVid=fromVid; //cp nht li nh to.
}
}
}
hipError_t markEmbedding(cHistory **dH,struct_Q *device_arr_Q,int lastColumn,int n,unsigned int maxOfVer,int *d_O,int *d_N){
hipError_t cudaStatus;
dim3 block(1024);
dim3 grid((n+block.x-1)/block.x);
/*printf("\****************ndH arr***********"); //kim tra th d liu ca mng dH trn device xem c ng khng
kernelPrintdeviceH<<<1,1>>>(dH,n);*/
hipLaunchKernelGGL(( kernelMarkEmbedding), dim3(grid),dim3(block), 0, 0, dH,device_arr_Q,lastColumn,n,maxOfVer,d_O,d_N);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize markEmbedding failed");
goto Error;
}
//printf("\****************ndH arr***********"); //kim tra th d liu ca mng dH trn device sau khi nh du cc embedding thuc right most path
//kernelPrintdeviceH<<<1,1>>>(dH,n);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize markEmbedding failed");
goto Error;
}
Error:
return cudaStatus;
}
|
464591ea885301efc036524a7bf9bcc787ac59b0.cu
|
#pragma once
#include "markEmbedding.cuh"
__global__ void kernelMarkEmbedding(cHistory **dH,struct_Q *device_arr_Q,int lastColumn,int n,unsigned int maxOfVer,int *d_O,int *d_N){
int i= blockDim.x*blockIdx.x + threadIdx.x; //mỗi thread i sẽ xử lý một embedding
if(i<n){
int vid = device_arr_Q[lastColumn]._d_arr_Q[i].vid; // Từ cột Q cuối cùng, mỗi thread i sẽ xử lý embedding thứ i
int indexOfFirstVertexInGraph = vid-(vid%maxOfVer);
int toVid = vid;//đỉnh to của cạnh thuộc embedding
int idxOfdH= (vid%maxOfVer);
dH[i]->d_arr_HO[idxOfdH]=2;
int prevQ=device_arr_Q[lastColumn]._prevQ;
int newi=device_arr_Q[lastColumn]._d_arr_Q[i].idx;
while (true)
{
//printf("\nd_arr_Q[%d]: (prevQ:%d, idx:%d,vid:%d)",prevQ,device_arr_Q[prevQ]._prevQ,device_arr_Q[prevQ]._d_arr_Q[newi].idx,device_arr_Q[prevQ]._d_arr_Q[newi].vid);
vid = device_arr_Q[prevQ]._d_arr_Q[newi].vid;
int fromVid=vid; //đỉnh from của cạnh thuộc embedding
int idxEdge = d_O[vid]-d_O[indexOfFirstVertexInGraph]; //vị trí cạnh cần cập nhật được khởi tạo bằng giá trị index của vid đang xét trừ đi giá trị index của đỉnh đầu tiên trong đồ thị đó.
int indexOfdN=d_O[fromVid];
while (d_N[indexOfdN]!=toVid){
idxEdge=idxEdge+1;
indexOfdN++;
}
int fromVidR=toVid;
int toVidR=fromVid;
int indexOfEdgeR=d_O[fromVidR]-d_O[indexOfFirstVertexInGraph];
indexOfdN=d_O[fromVidR];
while(d_N[indexOfdN]!=toVidR){
indexOfEdgeR++;
indexOfdN++;
}
//Nếu không phải là đỉnh đầu tiên thì phải cộng vào idxEdge một lượng bằng tổng bậc của các đỉnh trước đó
//Tổng bậc của các đỉnh trước đó chính bằng
idxOfdH = (vid%maxOfVer); //Đánh dấu đỉnh thuộc Embedding
dH[i]->d_arr_HO[idxOfdH]=2;
dH[i]->d_arr_HLN[idxEdge]=2;//Đánh dấu cạnh thuộc Embedding. vì đây là đơn đồ thị vô hướng nên cạnh AB cũng bằng cạnh BA,do đó ta phải đánh dấu cạnh BA cũng thuộc embedding.
dH[i]->d_arr_HLN[indexOfEdgeR]=2;
if(device_arr_Q[prevQ]._prevQ==-1) return; //nếu là cột Q đầu tiên thì dừng lại vì đã duyệt xong embedding
newi=device_arr_Q[prevQ]._d_arr_Q[i].idx; //ngược lại thì lấy index của cột Q phía trước
prevQ=device_arr_Q[prevQ]._prevQ; //Lấy Q phía trước
toVid=fromVid; //cập nhật lại đỉnh to.
}
}
}
cudaError_t markEmbedding(cHistory **dH,struct_Q *device_arr_Q,int lastColumn,int n,unsigned int maxOfVer,int *d_O,int *d_N){
cudaError_t cudaStatus;
dim3 block(1024);
dim3 grid((n+block.x-1)/block.x);
/*printf("\****************ndH arr***********"); //kiểm tra thử dữ liệu của mảng dH trên device xem có đúng không
kernelPrintdeviceH<<<1,1>>>(dH,n);*/
kernelMarkEmbedding<<<grid,block>>>(dH,device_arr_Q,lastColumn,n,maxOfVer,d_O,d_N);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize markEmbedding failed");
goto Error;
}
//printf("\****************ndH arr***********"); //kiểm tra thử dữ liệu của mảng dH trên device sau khi đã đánh dấu các embedding thuộc right most path
//kernelPrintdeviceH<<<1,1>>>(dH,n);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize markEmbedding failed");
goto Error;
}
Error:
return cudaStatus;
}
|
91b52a407e0759682a23f9501c0e62ef84b557dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calcul_min.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned long *ord = NULL;
hipMalloc(&ord, XSIZE*YSIZE);
int ind_start = 1;
int ind_end = 1;
unsigned long long *ymin = NULL;
hipMalloc(&ymin, XSIZE*YSIZE);
int *ind_min = NULL;
hipMalloc(&ind_min, XSIZE*YSIZE);
int size_max_parallel = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calcul_min), dim3(gridBlock),dim3(threadBlock), 0, 0, ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calcul_min), dim3(gridBlock),dim3(threadBlock), 0, 0, ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calcul_min), dim3(gridBlock),dim3(threadBlock), 0, 0, ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
91b52a407e0759682a23f9501c0e62ef84b557dc.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calcul_min.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned long *ord = NULL;
cudaMalloc(&ord, XSIZE*YSIZE);
int ind_start = 1;
int ind_end = 1;
unsigned long long *ymin = NULL;
cudaMalloc(&ymin, XSIZE*YSIZE);
int *ind_min = NULL;
cudaMalloc(&ind_min, XSIZE*YSIZE);
int size_max_parallel = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calcul_min<<<gridBlock,threadBlock>>>(ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calcul_min<<<gridBlock,threadBlock>>>(ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calcul_min<<<gridBlock,threadBlock>>>(ord,ind_start,ind_end,ymin,ind_min,size_max_parallel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d9389b77b09fd7c795ab5d16d1c2e4dbb0afcff4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <cfloat>
template <typename Dtype, typename Acctype>
__device__ inline int getInterval(Acctype sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
Acctype alpha = (Acctype)(inputSize - poolSize) / (Acctype) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic, typename Dtype, typename Acctype>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
THCDeviceTensor<THCIndex_t, 4> indices,
THCDeviceTensor<Dtype, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][0]), outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][1]), outputH,
input.getSize(2), output.getSize(2), poolSizeH);
Dtype maxVal = THCNumerics<Dtype>::min();
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
Dtype val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.getSize(3) + w;
maxVal = val;
}
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
Dtype val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.getSize(3) + w;
maxVal = val;
}
}
}
}
assert(THCNumerics<Dtype>::ne(maxVal, THCNumerics<Dtype>::min()));
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + TH_INDEX_BASE;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename Dtype>
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradInput,
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<THCIndex_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - TH_INDEX_BASE;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
#include <THHUNN/generic/SpatialFractionalMaxPooling.hip>
#include <THH/THHGenerateFloatTypes.h>
|
d9389b77b09fd7c795ab5d16d1c2e4dbb0afcff4.cu
|
#include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <cfloat>
template <typename Dtype, typename Acctype>
__device__ inline int getInterval(Acctype sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
Acctype alpha = (Acctype)(inputSize - poolSize) / (Acctype) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic, typename Dtype, typename Acctype>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
THCDeviceTensor<THCIndex_t, 4> indices,
THCDeviceTensor<Dtype, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][0]), outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval<Dtype, Acctype>(ScalarConvert<Dtype, Acctype>::to(samples[batch][plane][1]), outputH,
input.getSize(2), output.getSize(2), poolSizeH);
Dtype maxVal = THCNumerics<Dtype>::min();
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
Dtype val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.getSize(3) + w;
maxVal = val;
}
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
Dtype val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal) {
maxIndex = h * input.getSize(3) + w;
maxVal = val;
}
}
}
}
assert(THCNumerics<Dtype>::ne(maxVal, THCNumerics<Dtype>::min()));
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + TH_INDEX_BASE;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename Dtype>
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradInput,
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<THCIndex_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - TH_INDEX_BASE;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
#include <THCUNN/generic/SpatialFractionalMaxPooling.cu>
#include <THC/THCGenerateFloatTypes.h>
|
0842ce281e0342bdc602c66df3d4f460a1176384.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define ThreadNum 256
__global__ void printBase(int **base, int length) {
int t_id = threadIdx.x;
int b_id = blockIdx.x;
if (t_id < length) {
printf("block:%d-%d : %d\n", b_id, t_id, base[b_id][t_id]);
}
}
int main(int agrc, char *argv[]) {
int limit = atoi(argv[1]);
int **base;
hipMallocManaged(&base, sizeof(int*) * limit);
hipDeviceSynchronize();
int i, j;
for (i = 0; i < limit; i ++) {
hipMallocManaged(&base[i], sizeof(int) * 256);
for (j = 0; j < ThreadNum; j ++) {
base[i][j] = i * 1000 + j;
}
}
int block_num = limit;
hipLaunchKernelGGL(( printBase), dim3(block_num), dim3(ThreadNum), 0, 0, base, ThreadNum);
hipDeviceSynchronize();
hipDeviceReset();
hipFree(base);
return 0;
}
|
0842ce281e0342bdc602c66df3d4f460a1176384.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define ThreadNum 256
__global__ void printBase(int **base, int length) {
int t_id = threadIdx.x;
int b_id = blockIdx.x;
if (t_id < length) {
printf("block:%d-%d : %d\n", b_id, t_id, base[b_id][t_id]);
}
}
int main(int agrc, char *argv[]) {
int limit = atoi(argv[1]);
int **base;
cudaMallocManaged(&base, sizeof(int*) * limit);
cudaDeviceSynchronize();
int i, j;
for (i = 0; i < limit; i ++) {
cudaMallocManaged(&base[i], sizeof(int) * 256);
for (j = 0; j < ThreadNum; j ++) {
base[i][j] = i * 1000 + j;
}
}
int block_num = limit;
printBase<<<block_num, ThreadNum>>>(base, ThreadNum);
cudaDeviceSynchronize();
cudaDeviceReset();
cudaFree(base);
return 0;
}
|
cce59e77c7980b8d9158c79bb6316493716143cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
cce59e77c7980b8d9158c79bb6316493716143cd.cu
|
#include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
79af622f385ec1f339c66e2cc432dda313a5f2b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "cuda_util.h"
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#define THREAD_NUM 128
#define BLOCKS_SCHED 2
#define SIZE_WARP 32
__device__ inline int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__global__ void computeCgh(int * vetor, int * result, int numlinhas, int dev_id, int dev_count) {
int total_comp = numlinhas*(numlinhas+1)/2;
int tid = ((total_comp/dev_count)*dev_id) + threadIdx.x + blockIdx.x * blockDim.x; //Identificao da thread;
int c = row_index(tid,numlinhas);
int h = column_index(tid,numlinhas);
int max_so_far = INT_MIN, max_ending_here = INT_MIN;
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
if(tid < total_comp && h >= c) {
for(int i = 0; i < numlinhas; i++) {
int value = vetor[i*numlinhas + h] - (c == 0 ? 0 : vetor[i*numlinhas + c - 1]);
if(max_ending_here < 0) {
max_ending_here = value;
}
else {
max_ending_here += value;
}
if(max_ending_here >= max_so_far ) {
max_so_far = max_ending_here;
}
}
atomicMax(&max_block[0],max_so_far);
}
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
int main() {
int el;
scanf("%d",&el);
el *= el;
int * vetor = (int*)malloc(el*sizeof(int));
int * keys = (int*)malloc(el*sizeof(int));
int numlinhas = (int)sqrt(el);
int j = 0;
for(int i = 1; i < el+1; i++)
{
keys[i-1] = j;
scanf("%d",&vetor[i-1]);
if(i % numlinhas == 0)
j++;
}
int devCount;
HANDLE_ERROR( hipGetDeviceCount(&devCount));
thrust::host_vector<int> max_device(devCount);
int global_max = -1;
#pragma omp parallel num_threads(devCount) default(shared)
{
const int dev_id = omp_get_thread_num();
HANDLE_ERROR( hipSetDevice(dev_id) );
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, dev_id);
int total_comp = numlinhas*(numlinhas+1)/2;
unsigned tnumb = total_comp / THREAD_NUM > 0 ? THREAD_NUM : 32;
unsigned bnumb = ((int)(total_comp / tnumb / devCount)) + 1;
dim3 threadsPorBloco(tnumb);
dim3 blocosPorGrid(bnumb);
thrust::device_vector<int> d_vetor(vetor, vetor + el);
thrust::device_vector<int> d_keys(keys, keys + el);
thrust::device_vector<int> d_preffixsum(el);
thrust::device_vector<int> d_result(1);
float time;
hipEvent_t start,stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, 0) );
thrust::inclusive_scan_by_key(d_keys.begin(), d_keys.end(),d_vetor.begin(),d_preffixsum.begin());
CudaCheckError();
hipLaunchKernelGGL(( computeCgh), dim3(blocosPorGrid),dim3(threadsPorBloco),sizeof(int), 0, thrust::raw_pointer_cast(d_preffixsum.data()),thrust::raw_pointer_cast(d_result.data()),numlinhas, dev_id, devCount);
HANDLE_ERROR( hipDeviceSynchronize() );
max_device[dev_id] = d_result[0];
#pragma omp barrier
#pragma omp single
{
for(int i = 0; i < devCount; i++) {
if(global_max < max_device[i])
global_max = max_device[i];
}
}
HANDLE_ERROR( hipEventRecord(stop, 0) );
HANDLE_ERROR( hipEventSynchronize(stop) );
HANDLE_ERROR( hipEventElapsedTime(&time, start, stop) );
#pragma omp single
{
//printf("\nO resultado e: %d\n",global_max);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", time);
printf("mmax2d_multigpu: %d, tempo: %.9fms\n",global_max,time);
}
}
return 0;
}
|
79af622f385ec1f339c66e2cc432dda313a5f2b1.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "cuda_util.h"
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#define THREAD_NUM 128
#define BLOCKS_SCHED 2
#define SIZE_WARP 32
__device__ inline int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__global__ void computeCgh(int * vetor, int * result, int numlinhas, int dev_id, int dev_count) {
int total_comp = numlinhas*(numlinhas+1)/2;
int tid = ((total_comp/dev_count)*dev_id) + threadIdx.x + blockIdx.x * blockDim.x; //Identificação da thread;
int c = row_index(tid,numlinhas);
int h = column_index(tid,numlinhas);
int max_so_far = INT_MIN, max_ending_here = INT_MIN;
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
if(tid < total_comp && h >= c) {
for(int i = 0; i < numlinhas; i++) {
int value = vetor[i*numlinhas + h] - (c == 0 ? 0 : vetor[i*numlinhas + c - 1]);
if(max_ending_here < 0) {
max_ending_here = value;
}
else {
max_ending_here += value;
}
if(max_ending_here >= max_so_far ) {
max_so_far = max_ending_here;
}
}
atomicMax(&max_block[0],max_so_far);
}
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
int main() {
int el;
scanf("%d",&el);
el *= el;
int * vetor = (int*)malloc(el*sizeof(int));
int * keys = (int*)malloc(el*sizeof(int));
int numlinhas = (int)sqrt(el);
int j = 0;
for(int i = 1; i < el+1; i++)
{
keys[i-1] = j;
scanf("%d",&vetor[i-1]);
if(i % numlinhas == 0)
j++;
}
int devCount;
HANDLE_ERROR( cudaGetDeviceCount(&devCount));
thrust::host_vector<int> max_device(devCount);
int global_max = -1;
#pragma omp parallel num_threads(devCount) default(shared)
{
const int dev_id = omp_get_thread_num();
HANDLE_ERROR( cudaSetDevice(dev_id) );
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev_id);
int total_comp = numlinhas*(numlinhas+1)/2;
unsigned tnumb = total_comp / THREAD_NUM > 0 ? THREAD_NUM : 32;
unsigned bnumb = ((int)(total_comp / tnumb / devCount)) + 1;
dim3 threadsPorBloco(tnumb);
dim3 blocosPorGrid(bnumb);
thrust::device_vector<int> d_vetor(vetor, vetor + el);
thrust::device_vector<int> d_keys(keys, keys + el);
thrust::device_vector<int> d_preffixsum(el);
thrust::device_vector<int> d_result(1);
float time;
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, 0) );
thrust::inclusive_scan_by_key(d_keys.begin(), d_keys.end(),d_vetor.begin(),d_preffixsum.begin());
CudaCheckError();
computeCgh<<<blocosPorGrid,threadsPorBloco,sizeof(int)>>>(thrust::raw_pointer_cast(d_preffixsum.data()),thrust::raw_pointer_cast(d_result.data()),numlinhas, dev_id, devCount);
HANDLE_ERROR( cudaThreadSynchronize() );
max_device[dev_id] = d_result[0];
#pragma omp barrier
#pragma omp single
{
for(int i = 0; i < devCount; i++) {
if(global_max < max_device[i])
global_max = max_device[i];
}
}
HANDLE_ERROR( cudaEventRecord(stop, 0) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&time, start, stop) );
#pragma omp single
{
//printf("\nO resultado e: %d\n",global_max);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", time);
printf("mmax2d_multigpu: %d, tempo: %.9fms\n",global_max,time);
}
}
return 0;
}
|
f1f835ac9cd550aefb5be7f754d7d3314d2b8aa1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculate_IMC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *norm = NULL;
hipMalloc(&norm, XSIZE*YSIZE);
float *IMC = NULL;
hipMalloc(&IMC, XSIZE*YSIZE);
float *HX = NULL;
hipMalloc(&HX, XSIZE*YSIZE);
float *HY = NULL;
hipMalloc(&HY, XSIZE*YSIZE);
float *entropy = NULL;
hipMalloc(&entropy, XSIZE*YSIZE);
float *px = NULL;
hipMalloc(&px, XSIZE*YSIZE);
float *py = NULL;
hipMalloc(&py, XSIZE*YSIZE);
float *HXY = NULL;
hipMalloc(&HXY, XSIZE*YSIZE);
int max = 1;
float sum = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculate_IMC), dim3(gridBlock),dim3(threadBlock), 0, 0, norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculate_IMC), dim3(gridBlock),dim3(threadBlock), 0, 0, norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculate_IMC), dim3(gridBlock),dim3(threadBlock), 0, 0, norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f1f835ac9cd550aefb5be7f754d7d3314d2b8aa1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculate_IMC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *norm = NULL;
cudaMalloc(&norm, XSIZE*YSIZE);
float *IMC = NULL;
cudaMalloc(&IMC, XSIZE*YSIZE);
float *HX = NULL;
cudaMalloc(&HX, XSIZE*YSIZE);
float *HY = NULL;
cudaMalloc(&HY, XSIZE*YSIZE);
float *entropy = NULL;
cudaMalloc(&entropy, XSIZE*YSIZE);
float *px = NULL;
cudaMalloc(&px, XSIZE*YSIZE);
float *py = NULL;
cudaMalloc(&py, XSIZE*YSIZE);
float *HXY = NULL;
cudaMalloc(&HXY, XSIZE*YSIZE);
int max = 1;
float sum = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculate_IMC<<<gridBlock,threadBlock>>>(norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculate_IMC<<<gridBlock,threadBlock>>>(norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculate_IMC<<<gridBlock,threadBlock>>>(norm,IMC,HX,HY,entropy,px,py,HXY,max,sum,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
54780d4989cdaf76f324d97c2e343721a670ffbd.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Paraller Markov Chain Monte Carlo for Hard-Sphere-Particle Simulations on GPUs
Numerical Precision: Single Point
Author: Xin Yan
Credit: Part of the algorithms in this program is adapted from Joshua A. Anderson et. al
as in Anderson, J. A. et. al, Massively parallel Monte Carlo for many-particle
simulations on GPUs. Journal of Computational Physics 2013, 254, 27.
Date: 12/06/2014
*/
#define FP float
#define NMAX 4 //max number of ptcs in a cell
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <hiprand/hiprand_kernel.h>
#include "/home/fas/hpcprog/ahs3/cpsc424/utils/timing/timing.h"
void genbox(struct ptc *rsys, int *n, FP diameter, FP w, int N, int m);
double MCsweep(struct ptc *rsys_d, int *n_d, struct ptc *rsys_update, int *n_update, int bdim, int m,\
int iter, hiprandState_t *state);
int randint(int n);
void timing(double* wcTime, double* cpuTime);
// Check for cuda errors
// credit: http://stackoverflow.com/questions/25702573
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("hipError_t: %s in %s at line %d. Aborting...\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
// structure definition for particle position
struct ptc {
FP x;
FP y;
};
__device__ unsigned int hiprand (hiprandState_t *state); // declare state on glb mem
__device__ float hiprand_uniform (hiprandState_t *state); // declare state on glb mem
// function to calculate the index of a ptc in glb mem
__host__ __device__ int cellIdx(int m, int x, int y, int i) {
int q;
if (x%2) q = (x+m)/2;
else q = x/2;
return (i*m + y)*m + q;
}
// Sub-sweep kernel: Update cells in each set
__global__ void subsweep(struct ptc *rsys, int *n, int off_x, int off_y, int m, int iter, hiprandState_t *state) {
// initialization
int x, y; // cell indices
int nptc; // number of particles in the current cell
struct ptc rcell[NMAX]; // array for ptc coord in a cell
struct ptc rmove; // new ptc coord after the move
int overlap; // 0 for no overlap, 1 otherwise
struct ptc vec; // vector pointing to the current neighbor
unsigned int nrand; // random number
int i, j, s, nb; // ptc index, sweep index, neighbor index
int xnb, ynb; // x, y of neighbor cell
int iptc, inb; // idx of ptc in glb mem
struct ptc rnb;
FP dist2; // distance between two ptcs
int nnb; // number of ptcs in the neighboring cell
int nblist[8][2] = {{-1,0}, {1,0}, {0,-1}, {0,1}, {-1,-1}, {1,-1}, {-1,1}, {1,1}};
__shared__ FP diameter; // sphere diameter
__shared__ int nsweeps; // max number of sub-sweeps per cell
__shared__ FP d; // perturbation size
__shared__ FP w; // cell width
__shared__ float pi2; // 2*pi
diameter = 1.0;
nsweeps = 4;
d = 0.16;
w = 1.4142*diameter;
pi2 = 6.28318530718;
// load coord in glb mem to the shared mem
x = 2*(blockDim.x*blockIdx.x + threadIdx.x) + off_x;
y = 2*(blockDim.y*blockIdx.y + threadIdx.y) + off_y;
// initialize ptc # in each cell
nptc = n[y*m+x];
if (nptc == 0) {
return;
}
// initialize rcell to -10.
for (i=0; i<NMAX; i++) {
rcell[i].x = -10.;
rcell[i].y = -10.;
}
// copy ptc in a cell from global memory to the register
for (i=0; i<nptc; i++) {
iptc = cellIdx(m, x, y, i); // call func cellIdx to calc iptc in glb mem
rcell[i] = rsys[iptc];
}
// Fisher-Yates shuffling
// initialize hiprand. Each thread will use the same seed for each iter but with diff seq.
hiprand_init(iter, y*m+x, 0, &state[y*m+x]);
// copy state to local memory
hiprandState_t localState = state[y*m+x];
// shuffle ptcs in the current cell
for (i=nptc-1; i>0; i--) {
nrand = hiprand(&localState)%(i+1); // Generate pseudo-rand unsigned ints from [0,i]
struct ptc temp;
temp = rcell[i];
rcell[i] = rcell[nrand];
rcell[nrand] = temp;
}
i = 0;
for (s=0; s<nsweeps; s++) {
// perturb the ptc
float angrand = hiprand_uniform(&localState)*pi2; // gen rand number from [0,2*pi)
angrand = (float) cos(double(angrand));
rmove.x = rcell[i].x + d * angrand;
rmove.y = rcell[i].y + d * sqrt(1-angrand*angrand);
overlap = 0;
// check if moved out of the cell
if ((rmove.x>0) && (rmove.y>0) && (rmove.x<=w) && (rmove.y<=w)) {
// check for overlap within the cell
for (j=0; j<nptc; j++) {
if (i == j) continue;
dist2 = (rmove.x - rcell[j].x)*(rmove.x - rcell[j].x);
dist2 += (rmove.y - rcell[j].y)*(rmove.y - rcell[j].y);
if (dist2 < diameter*diameter) {
overlap = 1;
break;
}
}
// check for overlap with ptcs in neighboring cells
for (nb=0; nb<8; nb++) {
xnb = x + nblist[nb][0]; // indices of neighboring cells
ynb = y + nblist[nb][1];
if ((xnb<0) || (ynb<0) || (xnb>=m) || (ynb>=m)) continue;
vec.x = nblist[nb][0]*w;
vec.y = nblist[nb][1]*w;
nnb = n[ynb*m + xnb];
for (j=0; j<nnb; j++) {
inb = cellIdx(m, xnb, ynb, j); // call func cellIdx to calc inb in glb mem
rnb = rsys[inb];
dist2 = (rmove.x-rnb.x-vec.x)*(rmove.x-rnb.x-vec.x);
dist2 += (rmove.y-rnb.y-vec.y)*(rmove.y-rnb.y-vec.y);
if (dist2 < diameter*diameter) {
overlap = 1;
nb = 8;
j = nnb;
break;
}
}
}
if (!overlap) {
rcell[i] = rmove; // if rmove is still in the cell, accept the move
}
}
i++;
if (i == nptc) i = 0;
}
// copy state back to global mem
state[y*m+x] = localState;
for (i=0; i<nptc; i++) { // write updated cell info to the global memory
iptc = cellIdx(m, x, y, i);
rsys[iptc] = rcell[i];
}
return;
} // Done with subsweep kernel
// cell shift GPU kernel
__global__ void shift_cells(int fx, int fy, FP d, struct ptc *rsys, int *n, struct ptc *rsys_update, \
int *n_update, int m) {
// initializations
int x, y; // cell index
int nptc, nnew; // ptc # in the current cell, ptc # in the cell after the shift
struct ptc rcell[NMAX]; // ptc coords in the current cell after shift
struct ptc rshft; // ptc coord after the shift
struct ptc vec; // vector pointing to the neighbor
int i; // ptc index
int iptc, inb; // index of ptc in global memory
int xnb, ynb; // cell index in direction of f
int nnb; // number of ptcs in the neighboring cell
__shared__ FP diameter; // sphere diameter
__shared__ FP w;
diameter = 1.0;
w = 1.4142*diameter;
x = blockDim.x*blockIdx.x + threadIdx.x;
y = blockDim.y*blockIdx.y + threadIdx.y;
nptc = n[y*m+x];
// initialize all ptc coord to -10
for (i=0; i<NMAX; i++) {
rcell[i].x = -10.;
rcell[i].y = -10.;
}
nnew = 0;
// perform cell move
for (i=0; i<nptc; i++) {
iptc = cellIdx(m, x, y, i);
rshft = rsys[iptc];
rshft.x -= fx*d;
rshft.y -= fy*d;
// update ptc that has remains in the current cell
if ((rshft.x>0) && (rshft.y>0) && (rshft.x<=w) && (rshft.y<=w)) {
rcell[nnew] = rshft;
nnew++;
}
}
// update ptc that moved into the current cell from neighboring cell
xnb = (x+fx+m)%m;
ynb = (y+fy+m)%m;
// if ((xnb>=0) && (ynb>=0) && (xnb<m) && (ynb<m)) {
vec.x = fx*w;
vec.y = fy*w;
nnb = n[ynb*m + xnb];
for (i=0; i<nnb; i++) {
inb = cellIdx(m, xnb, ynb, i);
rshft = rsys[inb];
rshft.x -= fx*d;
rshft.y -= fy*d;
if ((rshft.x<=0) || (rshft.y<=0) || (rshft.x>w) || (rshft.y>w)) {
rshft.x += vec.x;
rshft.y += vec.y;
rcell[nnew] = rshft;
nnew++;
}
}
// }
// update the coord and ptc # info to a new buffer
n_update[y*m+x] = nnew; // update the ptc # to the new buffer
for (i=0; i<nnew; i++) {
iptc = cellIdx(m, x, y, i);
rsys_update[iptc] = rcell[i];
}
return;
}
int main(int argc, char *argv[]) {
// Declaration and Initialization
int m, N; // number of cells in each dim, total ptc #
const FP diameter = 1.; // diameter of the ptc sphere
const FP w = 1.4142*diameter; // set the width of the cell to 2*sqrt(2)*diameter
int bdim; // blockDim
struct ptc *rsys; // rsys on CPU host
int *n; // n array on CPU host
struct ptc *rsys_d, *rsys_update; // rsys on GPU glb mem
int *n_d, *n_update; // n on GPU glb mem
int sizer, sizen; // size of rsys and n in bytes
struct ptc temp; // ptc generated by rng
int x, y; // cell index
int iter, maxiter; // the current MC sweeps and the max number of iterations
int i;
int gpucount;
hiprandState_t *state;
double wctime, totwctime; // timers
//Read command line args
if (argc < 5) {
printf("Error: Incorrect command line format. Aborting...\n");
printf("Usage: MCMCpar <N> <m> <bdim> <maxiter>\n");
exit(-1);
}
N = atoi(argv[1]);
m = atoi(argv[2]);
bdim = atoi(argv[3]);
maxiter = atoi(argv[4]);
if (N > m*m*NMAX) {
printf("Error: Too many particles in the system. Aborting...\n");
exit(-1);
}
// output basic simulation information
printf("###### Markov Chain Monte Carlo Simulation on GPUs ######\n\n");
printf("### Basic Info: \n");
printf("# Total NO. of particles = %d\n", N);
printf("# NO. of cells in each dimension = %d\n", m);
printf("# Number of Monte Carlo sweeps = %d\n", maxiter);
// Check GPU device
HANDLE_ERROR(hipGetDeviceCount(&gpucount));
printf("# GPU device count = %d.\n\n", gpucount);
if (bdim*bdim > 1024) {
printf("Error: Too many threads in a block. Aborting...\n");
exit(-1);
}
// allocate n, rsys arrays and initialize them to zero
int nsize = m*m;
int rsyssize = nsize*NMAX;
n = (int *) malloc(nsize*sizeof(int));
rsys = (struct ptc *) malloc(rsyssize*sizeof(struct ptc));
for (i=0; i<nsize; i++) n[i] = 0;
for (i=0; i<rsyssize; i++) {
rsys[i].x = -10.;
rsys[i].y = -10.;
}
// generate initial simulation system
printf("### Start generating intial simulation box...\n");
genbox(rsys, n, diameter, w, N, m);
printf("# Box generation successfully finished!\n");
int ncount = 0; // ptc counter
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
ncount += n[m*y+x];
}
}
printf("# %d particles generated in the system\n", ncount);
// print initial particle positions
printf("# Initial particle positions:\n");
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
temp = rsys[(i*m+y)*m+x];
printf("%10.4f\t%10.4f\n",temp.x, temp.y);
}
}
}
printf("\n");
// allocate and initialize global memory on device
sizer = m * m * NMAX * sizeof(struct ptc);
sizen = m * m * sizeof(int);
HANDLE_ERROR(hipMalloc((void **) &rsys_d, sizer));
HANDLE_ERROR(hipMemcpy(rsys_d, rsys, sizer, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **) &rsys_update, sizer)); // allocate an update buffer
HANDLE_ERROR(hipMemcpy(rsys_update, rsys, sizer, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **) &n_d, sizen));
HANDLE_ERROR(hipMemcpy(n_d, n, sizen, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **) &n_update, sizen)); // allocate an update buffer
HANDLE_ERROR(hipMemcpy(n_update, n, sizen, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **) &state, m*m*sizeof(hiprandState_t)));
// Loop for Monte Carlo sweep
totwctime = 0.;
printf("### Entering Monte Carlo sweep loops:\n");
for (iter=0; iter<maxiter; iter++) {
printf("# MC sweep iteration # %d\n", iter);
wctime = MCsweep(rsys_d, n_d, rsys_update, n_update, bdim, m, iter, state);
totwctime += wctime;
printf("# Iteration # %d finished, total wctime: %f\n", iter, wctime);
// Compute physical properties and output
}
printf("# Monte Carlo sweep finished in %f sec. Writing results to the output...\n\n", totwctime);
// Copy data from GPU glb mem back to CPU host
if (maxiter%2) {
HANDLE_ERROR(hipMemcpy(rsys, rsys_update, sizer, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(n, n_update, sizen, hipMemcpyDeviceToHost));
}
else {
HANDLE_ERROR(hipMemcpy(rsys, rsys_d, sizer, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(n, n_d, sizen, hipMemcpyDeviceToHost));
}
// print the coordinate of all particles after each MC sweep
ncount = 0; // ptc counter
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
ncount += n[m*y+x];
}
}
printf("# Final number of particles in the system: %d\n", ncount);
printf("# Final position of particles:\n");
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
temp = rsys[(i*m+y)*m+x];
printf("%10.4f\t%10.4f\n",temp.x, temp.y);
}
}
}
// Free GPU glb mem
HANDLE_ERROR(hipFree(rsys_d));
HANDLE_ERROR(hipFree(rsys_update));
HANDLE_ERROR(hipFree(n_d));
HANDLE_ERROR(hipFree(n_update));
HANDLE_ERROR(hipFree(state));
printf("\n### Successful Termination of the Markov Chain Monte Carlo Simulation!\n");
return(0);
}
// set up a box of 8*8 cells, m should be multiples of 8, N should be multiples of m*m
void genbox(struct ptc *rsys, int *n, FP diameter, FP w, int N, int m) {
// Declarations
struct ptc test; // test particle
int ptcbox = N*64/(m*m); // number of ptcs per box
FP lb2 = diameter*diameter; // shortest allowed distance between two ptcs
int nbox = 0; // actual number of ptcs in the box
int success; // successfully generated a new ptc
FP dist2; // distance^2 between two particles
struct ptc vec;
int nb, xnb, ynb, inb, nnb; // index of neighboring cell and total ptc # in nb cell
struct ptc rnb; // ptc in nb cell
int xbox, ybox; // the index of cell in box
int idx; //idx in rsys
int i, j, x, y;
int nblist[8][2] = {{-1,0}, {1,0}, {0,-1}, {0,1}, {-1,-1}, {1,-1}, {-1,1}, {1,1}};
int ncell;
// loop over all cells to generation ptc positions
for (i=0; i<NMAX; i++) {
for (y=0; y<8; y++) {
for (x=0; x<8; x++) {
if (nbox >= ptcbox) { // enough ptc has been gen, break out of nested loops
x = y = 8;
i = NMAX;
break;
}
else {
success = 0;
while (!success) { // loop until successfully generated a new ptc
test.x = (FP)rand()/(FP)RAND_MAX*w; // gen test ptc within [0,w)
test.y = (FP)rand()/(FP)RAND_MAX*w; // gen test ptc within [0,w)
// check for overlap within the cell
ncell = n[y*m+x];
if (ncell == 0) {
success = 1;
}
else {
for (j=0; j<ncell; j++) { //loop over all previously generated ptcs
idx = cellIdx(m, x, y, j); //very bad memory retrieving
dist2 = (test.x-rsys[idx].x)*(test.x-rsys[idx].x);
dist2 += (test.y-rsys[idx].y)*(test.y-rsys[idx].y);
if (dist2 < lb2) { //overlap
success = 0;
break;
}
else {
success = 1;
}
}
}
//if no overlap within the cell, check for overlap with neighbor cells
if (success) {
for (nb=0; nb<8; nb++) {
xnb = (x + nblist[nb][0]+8)%8; // indices of neighboring cells
ynb = (y + nblist[nb][1]+8)%8;
vec.x = nblist[nb][0]*w;
vec.y = nblist[nb][1]*w;
nnb = n[ynb*m + xnb];
if (nnb == 0) continue;
for (j=0; j<nnb; j++) { // loop over all ptcs in a neighbor cell
inb = cellIdx(m, xnb, ynb, j); // call func cellIdx to calc inb in glb mem
rnb = rsys[inb];
dist2 = (test.x-rnb.x-vec.x)*(test.x-rnb.x-vec.x);
dist2 += (test.y-rnb.y-vec.y)*(test.y-rnb.y-vec.y);
if (dist2 < lb2) {
success = 0;
nb = 8;
j = nnb;
break;
}
}
}
}
}
// successful generation of a test ptc, store it in the host memory
idx = cellIdx(m, x, y, i);
rsys[idx] = test;
n[m*y+x]++;
nbox++;
}
}
}
}
int iptcbox, iptc;
// replicate the 8*8 box to all other cells in the system
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
ybox = y%8;
for (x=0; x<m; x++) {
xbox = x%8;
iptcbox = cellIdx(m, xbox, ybox, i);
iptc = cellIdx(m, x, y, i);
n[y*m+x] = n[ybox*m+xbox];
rsys[iptc] = rsys[iptcbox];
}
}
}
return;
} // Done with genbox
// Monte Carlo Sweep
double MCsweep(struct ptc *rsys_d, int *n_d, struct ptc *rsys_update, int *n_update, int bdim, int m,\
int iter, hiprandState_t *state) {
// initialization
int chksets[] = {'a', 'b', 'c', 'd'}; // collection of checkerboard sets
int set; // checkerboard set
int i; // index
unsigned int nrand; // random number
int off_x, off_y; // cell index offset to the lower-leftmost active cell in the current set
FP d; // cell shift distance
int shftvec[4][2] = {{-1, 0}, {1, 0}, {0, -1}, {0, 1}}; // unit vectors for cell shift
int fx, fy; // vector to perform cell shift;
int bx, by, gx, gy; // blockdim and griddim, bx=by=bdim
const FP diameter = 1.0;
const FP w = 1.4142*diameter;
double start, end, cput;
// start timing
timing(&start, &cput);
// Fisher-Yates shuffling
srand(time(NULL));
for (i=3; i>0; i--) {
//pick a rand number in [0, i]
nrand = randint(i+1);
//swap chksets[i] and chksets[nrand]
int temp;
temp = chksets[nrand];
chksets[nrand] = chksets[i];
chksets[i] = temp;
}
// define grid and block size
bx = by = bdim; // bx and by set from command line input
gx = gy = m/(bdim*2); // total # threads=m/2, each thread controls every other r/c of cells
if (bx*gx < m/2) {
printf("Error: number of threads in x dimension less than half the number of cells. \
Aborting...\n");
exit(-1);
}
dim3 dimBlock(bx, by, 1);
dim3 dimGrid(gx, gy, 1);
printf("# sub-sweeps: Block x = %d, Block y = %d, Grid x = %d, Grid y = %d.\n", bx, by, gx, \
gy);
// Loop over checkerboard sets
for(i=0; i<4; i++) {
set = chksets[i];
switch(set) {
case 'a':
off_x = 0;
off_y = 0;
break;
case 'b':
off_x = 1;
off_y = 0;
break;
case 'c':
off_x = 0;
off_y = 1;
break;
case 'd':
off_x = 1;
off_y = 1;
break;
default:
printf("Error: set not in the checkerboard sets. Aborting...\n");
exit(-1);
}
// Sub-sweep GPU kernel
// need to swap buffer each iteration
if (iter%2)
hipLaunchKernelGGL(( subsweep), dim3(dimGrid), dim3(dimBlock), 0, 0, rsys_update, n_update, off_x, off_y, m, iter, state);
else
hipLaunchKernelGGL(( subsweep), dim3(dimGrid), dim3(dimBlock), 0, 0, rsys_d, n_d, off_x, off_y, m, iter, state);
// synchronize all threads in the device
HANDLE_ERROR(hipDeviceSynchronize());
} // Done with sub-sweeps
// Shift cells
d = (float) rand()/(float)(RAND_MAX)*w/2.;//generate random floating point number [0, w/2.]
nrand = randint(4); // randomly select a direction to perform cell shift
fx = shftvec[nrand][0];
fy = shftvec[nrand][1];
gx = gy = m/bdim; // total # threads=m, each thread controls a cell
if (bx*gx < m) {
printf("Error: number of threads in x dimension less than the nubmer of the number of cells. \
Aborting...\n");
exit(-1);
}
printf("# shift cells: Block x = %d, Block y = %d, Grid x = %d, Grid y = %d.\n", bx, by, gx, gy);
dim3 dimGrid2(gx, gy, 1);
// need to swap buffer each iteration
if (iter%2)
hipLaunchKernelGGL(( shift_cells), dim3(dimGrid2), dim3(dimBlock), 0, 0, fx, fy, d, rsys_update, n_update, rsys_d, n_d, m);
else
hipLaunchKernelGGL(( shift_cells), dim3(dimGrid2), dim3(dimBlock), 0, 0, fx, fy, d, rsys_d, n_d, rsys_update, n_update, m);
HANDLE_ERROR(hipDeviceSynchronize());
//end timing
timing(&end, &cput);
return(end - start);
} // Done with MC sweep
// random number generator, returns an integer in the range [0, n)
// credit: http://stackoverflow.com/questions/822323
int randint(int n) {
if ((n-1) == RAND_MAX) {
return rand();
} else {
// chop off all values that would cause skew
long end = RAND_MAX / n;
assert (end > 0L);
end *= n;
//ignore results from rand() that fall above the limit
int r;
while ((r=rand()) >= end) ;
return r%n; // obtain rand number that give uniform distribution
}
}
// timer function
// credit: Dr. Andrew Sherman, Yale University
void timing(double* wcTime, double* cpuTime)
{
struct timeval tp;
struct rusage ruse;
gettimeofday(&tp, NULL);
*wcTime=(double) (tp.tv_sec + tp.tv_usec/1000000.0);
getrusage(RUSAGE_SELF, &ruse);
*cpuTime=(double)(ruse.ru_utime.tv_sec+ruse.ru_utime.tv_usec / 1000000.0);
}
|
54780d4989cdaf76f324d97c2e343721a670ffbd.cu
|
/* Paraller Markov Chain Monte Carlo for Hard-Sphere-Particle Simulations on GPUs
Numerical Precision: Single Point
Author: Xin Yan
Credit: Part of the algorithms in this program is adapted from Joshua A. Anderson et. al
as in Anderson, J. A. et. al, Massively parallel Monte Carlo for many-particle
simulations on GPUs. Journal of Computational Physics 2013, 254, 27.
Date: 12/06/2014
*/
#define FP float
#define NMAX 4 //max number of ptcs in a cell
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <curand_kernel.h>
#include "/home/fas/hpcprog/ahs3/cpsc424/utils/timing/timing.h"
void genbox(struct ptc *rsys, int *n, FP diameter, FP w, int N, int m);
double MCsweep(struct ptc *rsys_d, int *n_d, struct ptc *rsys_update, int *n_update, int bdim, int m,\
int iter, curandState *state);
int randint(int n);
void timing(double* wcTime, double* cpuTime);
// Check for cuda errors
// credit: http://stackoverflow.com/questions/25702573
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("cudaError: %s in %s at line %d. Aborting...\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
// structure definition for particle position
struct ptc {
FP x;
FP y;
};
__device__ unsigned int curand (curandState_t *state); // declare state on glb mem
__device__ float curand_uniform (curandState_t *state); // declare state on glb mem
// function to calculate the index of a ptc in glb mem
__host__ __device__ int cellIdx(int m, int x, int y, int i) {
int q;
if (x%2) q = (x+m)/2;
else q = x/2;
return (i*m + y)*m + q;
}
// Sub-sweep kernel: Update cells in each set
__global__ void subsweep(struct ptc *rsys, int *n, int off_x, int off_y, int m, int iter, curandState *state) {
// initialization
int x, y; // cell indices
int nptc; // number of particles in the current cell
struct ptc rcell[NMAX]; // array for ptc coord in a cell
struct ptc rmove; // new ptc coord after the move
int overlap; // 0 for no overlap, 1 otherwise
struct ptc vec; // vector pointing to the current neighbor
unsigned int nrand; // random number
int i, j, s, nb; // ptc index, sweep index, neighbor index
int xnb, ynb; // x, y of neighbor cell
int iptc, inb; // idx of ptc in glb mem
struct ptc rnb;
FP dist2; // distance between two ptcs
int nnb; // number of ptcs in the neighboring cell
int nblist[8][2] = {{-1,0}, {1,0}, {0,-1}, {0,1}, {-1,-1}, {1,-1}, {-1,1}, {1,1}};
__shared__ FP diameter; // sphere diameter
__shared__ int nsweeps; // max number of sub-sweeps per cell
__shared__ FP d; // perturbation size
__shared__ FP w; // cell width
__shared__ float pi2; // 2*pi
diameter = 1.0;
nsweeps = 4;
d = 0.16;
w = 1.4142*diameter;
pi2 = 6.28318530718;
// load coord in glb mem to the shared mem
x = 2*(blockDim.x*blockIdx.x + threadIdx.x) + off_x;
y = 2*(blockDim.y*blockIdx.y + threadIdx.y) + off_y;
// initialize ptc # in each cell
nptc = n[y*m+x];
if (nptc == 0) {
return;
}
// initialize rcell to -10.
for (i=0; i<NMAX; i++) {
rcell[i].x = -10.;
rcell[i].y = -10.;
}
// copy ptc in a cell from global memory to the register
for (i=0; i<nptc; i++) {
iptc = cellIdx(m, x, y, i); // call func cellIdx to calc iptc in glb mem
rcell[i] = rsys[iptc];
}
// Fisher-Yates shuffling
// initialize curand. Each thread will use the same seed for each iter but with diff seq.
curand_init(iter, y*m+x, 0, &state[y*m+x]);
// copy state to local memory
curandState localState = state[y*m+x];
// shuffle ptcs in the current cell
for (i=nptc-1; i>0; i--) {
nrand = curand(&localState)%(i+1); // Generate pseudo-rand unsigned ints from [0,i]
struct ptc temp;
temp = rcell[i];
rcell[i] = rcell[nrand];
rcell[nrand] = temp;
}
i = 0;
for (s=0; s<nsweeps; s++) {
// perturb the ptc
float angrand = curand_uniform(&localState)*pi2; // gen rand number from [0,2*pi)
angrand = (float) cos(double(angrand));
rmove.x = rcell[i].x + d * angrand;
rmove.y = rcell[i].y + d * sqrt(1-angrand*angrand);
overlap = 0;
// check if moved out of the cell
if ((rmove.x>0) && (rmove.y>0) && (rmove.x<=w) && (rmove.y<=w)) {
// check for overlap within the cell
for (j=0; j<nptc; j++) {
if (i == j) continue;
dist2 = (rmove.x - rcell[j].x)*(rmove.x - rcell[j].x);
dist2 += (rmove.y - rcell[j].y)*(rmove.y - rcell[j].y);
if (dist2 < diameter*diameter) {
overlap = 1;
break;
}
}
// check for overlap with ptcs in neighboring cells
for (nb=0; nb<8; nb++) {
xnb = x + nblist[nb][0]; // indices of neighboring cells
ynb = y + nblist[nb][1];
if ((xnb<0) || (ynb<0) || (xnb>=m) || (ynb>=m)) continue;
vec.x = nblist[nb][0]*w;
vec.y = nblist[nb][1]*w;
nnb = n[ynb*m + xnb];
for (j=0; j<nnb; j++) {
inb = cellIdx(m, xnb, ynb, j); // call func cellIdx to calc inb in glb mem
rnb = rsys[inb];
dist2 = (rmove.x-rnb.x-vec.x)*(rmove.x-rnb.x-vec.x);
dist2 += (rmove.y-rnb.y-vec.y)*(rmove.y-rnb.y-vec.y);
if (dist2 < diameter*diameter) {
overlap = 1;
nb = 8;
j = nnb;
break;
}
}
}
if (!overlap) {
rcell[i] = rmove; // if rmove is still in the cell, accept the move
}
}
i++;
if (i == nptc) i = 0;
}
// copy state back to global mem
state[y*m+x] = localState;
for (i=0; i<nptc; i++) { // write updated cell info to the global memory
iptc = cellIdx(m, x, y, i);
rsys[iptc] = rcell[i];
}
return;
} // Done with subsweep kernel
// cell shift GPU kernel
__global__ void shift_cells(int fx, int fy, FP d, struct ptc *rsys, int *n, struct ptc *rsys_update, \
int *n_update, int m) {
// initializations
int x, y; // cell index
int nptc, nnew; // ptc # in the current cell, ptc # in the cell after the shift
struct ptc rcell[NMAX]; // ptc coords in the current cell after shift
struct ptc rshft; // ptc coord after the shift
struct ptc vec; // vector pointing to the neighbor
int i; // ptc index
int iptc, inb; // index of ptc in global memory
int xnb, ynb; // cell index in direction of f
int nnb; // number of ptcs in the neighboring cell
__shared__ FP diameter; // sphere diameter
__shared__ FP w;
diameter = 1.0;
w = 1.4142*diameter;
x = blockDim.x*blockIdx.x + threadIdx.x;
y = blockDim.y*blockIdx.y + threadIdx.y;
nptc = n[y*m+x];
// initialize all ptc coord to -10
for (i=0; i<NMAX; i++) {
rcell[i].x = -10.;
rcell[i].y = -10.;
}
nnew = 0;
// perform cell move
for (i=0; i<nptc; i++) {
iptc = cellIdx(m, x, y, i);
rshft = rsys[iptc];
rshft.x -= fx*d;
rshft.y -= fy*d;
// update ptc that has remains in the current cell
if ((rshft.x>0) && (rshft.y>0) && (rshft.x<=w) && (rshft.y<=w)) {
rcell[nnew] = rshft;
nnew++;
}
}
// update ptc that moved into the current cell from neighboring cell
xnb = (x+fx+m)%m;
ynb = (y+fy+m)%m;
// if ((xnb>=0) && (ynb>=0) && (xnb<m) && (ynb<m)) {
vec.x = fx*w;
vec.y = fy*w;
nnb = n[ynb*m + xnb];
for (i=0; i<nnb; i++) {
inb = cellIdx(m, xnb, ynb, i);
rshft = rsys[inb];
rshft.x -= fx*d;
rshft.y -= fy*d;
if ((rshft.x<=0) || (rshft.y<=0) || (rshft.x>w) || (rshft.y>w)) {
rshft.x += vec.x;
rshft.y += vec.y;
rcell[nnew] = rshft;
nnew++;
}
}
// }
// update the coord and ptc # info to a new buffer
n_update[y*m+x] = nnew; // update the ptc # to the new buffer
for (i=0; i<nnew; i++) {
iptc = cellIdx(m, x, y, i);
rsys_update[iptc] = rcell[i];
}
return;
}
int main(int argc, char *argv[]) {
// Declaration and Initialization
int m, N; // number of cells in each dim, total ptc #
const FP diameter = 1.; // diameter of the ptc sphere
const FP w = 1.4142*diameter; // set the width of the cell to 2*sqrt(2)*diameter
int bdim; // blockDim
struct ptc *rsys; // rsys on CPU host
int *n; // n array on CPU host
struct ptc *rsys_d, *rsys_update; // rsys on GPU glb mem
int *n_d, *n_update; // n on GPU glb mem
int sizer, sizen; // size of rsys and n in bytes
struct ptc temp; // ptc generated by rng
int x, y; // cell index
int iter, maxiter; // the current MC sweeps and the max number of iterations
int i;
int gpucount;
curandState *state;
double wctime, totwctime; // timers
//Read command line args
if (argc < 5) {
printf("Error: Incorrect command line format. Aborting...\n");
printf("Usage: MCMCpar <N> <m> <bdim> <maxiter>\n");
exit(-1);
}
N = atoi(argv[1]);
m = atoi(argv[2]);
bdim = atoi(argv[3]);
maxiter = atoi(argv[4]);
if (N > m*m*NMAX) {
printf("Error: Too many particles in the system. Aborting...\n");
exit(-1);
}
// output basic simulation information
printf("###### Markov Chain Monte Carlo Simulation on GPUs ######\n\n");
printf("### Basic Info: \n");
printf("# Total NO. of particles = %d\n", N);
printf("# NO. of cells in each dimension = %d\n", m);
printf("# Number of Monte Carlo sweeps = %d\n", maxiter);
// Check GPU device
HANDLE_ERROR(cudaGetDeviceCount(&gpucount));
printf("# GPU device count = %d.\n\n", gpucount);
if (bdim*bdim > 1024) {
printf("Error: Too many threads in a block. Aborting...\n");
exit(-1);
}
// allocate n, rsys arrays and initialize them to zero
int nsize = m*m;
int rsyssize = nsize*NMAX;
n = (int *) malloc(nsize*sizeof(int));
rsys = (struct ptc *) malloc(rsyssize*sizeof(struct ptc));
for (i=0; i<nsize; i++) n[i] = 0;
for (i=0; i<rsyssize; i++) {
rsys[i].x = -10.;
rsys[i].y = -10.;
}
// generate initial simulation system
printf("### Start generating intial simulation box...\n");
genbox(rsys, n, diameter, w, N, m);
printf("# Box generation successfully finished!\n");
int ncount = 0; // ptc counter
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
ncount += n[m*y+x];
}
}
printf("# %d particles generated in the system\n", ncount);
// print initial particle positions
printf("# Initial particle positions:\n");
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
temp = rsys[(i*m+y)*m+x];
printf("%10.4f\t%10.4f\n",temp.x, temp.y);
}
}
}
printf("\n");
// allocate and initialize global memory on device
sizer = m * m * NMAX * sizeof(struct ptc);
sizen = m * m * sizeof(int);
HANDLE_ERROR(cudaMalloc((void **) &rsys_d, sizer));
HANDLE_ERROR(cudaMemcpy(rsys_d, rsys, sizer, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &rsys_update, sizer)); // allocate an update buffer
HANDLE_ERROR(cudaMemcpy(rsys_update, rsys, sizer, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &n_d, sizen));
HANDLE_ERROR(cudaMemcpy(n_d, n, sizen, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &n_update, sizen)); // allocate an update buffer
HANDLE_ERROR(cudaMemcpy(n_update, n, sizen, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &state, m*m*sizeof(curandState)));
// Loop for Monte Carlo sweep
totwctime = 0.;
printf("### Entering Monte Carlo sweep loops:\n");
for (iter=0; iter<maxiter; iter++) {
printf("# MC sweep iteration # %d\n", iter);
wctime = MCsweep(rsys_d, n_d, rsys_update, n_update, bdim, m, iter, state);
totwctime += wctime;
printf("# Iteration # %d finished, total wctime: %f\n", iter, wctime);
// Compute physical properties and output
}
printf("# Monte Carlo sweep finished in %f sec. Writing results to the output...\n\n", totwctime);
// Copy data from GPU glb mem back to CPU host
if (maxiter%2) {
HANDLE_ERROR(cudaMemcpy(rsys, rsys_update, sizer, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(n, n_update, sizen, cudaMemcpyDeviceToHost));
}
else {
HANDLE_ERROR(cudaMemcpy(rsys, rsys_d, sizer, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(n, n_d, sizen, cudaMemcpyDeviceToHost));
}
// print the coordinate of all particles after each MC sweep
ncount = 0; // ptc counter
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
ncount += n[m*y+x];
}
}
printf("# Final number of particles in the system: %d\n", ncount);
printf("# Final position of particles:\n");
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
for (x=0; x<m; x++) {
temp = rsys[(i*m+y)*m+x];
printf("%10.4f\t%10.4f\n",temp.x, temp.y);
}
}
}
// Free GPU glb mem
HANDLE_ERROR(cudaFree(rsys_d));
HANDLE_ERROR(cudaFree(rsys_update));
HANDLE_ERROR(cudaFree(n_d));
HANDLE_ERROR(cudaFree(n_update));
HANDLE_ERROR(cudaFree(state));
printf("\n### Successful Termination of the Markov Chain Monte Carlo Simulation!\n");
return(0);
}
// set up a box of 8*8 cells, m should be multiples of 8, N should be multiples of m*m
void genbox(struct ptc *rsys, int *n, FP diameter, FP w, int N, int m) {
// Declarations
struct ptc test; // test particle
int ptcbox = N*64/(m*m); // number of ptcs per box
FP lb2 = diameter*diameter; // shortest allowed distance between two ptcs
int nbox = 0; // actual number of ptcs in the box
int success; // successfully generated a new ptc
FP dist2; // distance^2 between two particles
struct ptc vec;
int nb, xnb, ynb, inb, nnb; // index of neighboring cell and total ptc # in nb cell
struct ptc rnb; // ptc in nb cell
int xbox, ybox; // the index of cell in box
int idx; //idx in rsys
int i, j, x, y;
int nblist[8][2] = {{-1,0}, {1,0}, {0,-1}, {0,1}, {-1,-1}, {1,-1}, {-1,1}, {1,1}};
int ncell;
// loop over all cells to generation ptc positions
for (i=0; i<NMAX; i++) {
for (y=0; y<8; y++) {
for (x=0; x<8; x++) {
if (nbox >= ptcbox) { // enough ptc has been gen, break out of nested loops
x = y = 8;
i = NMAX;
break;
}
else {
success = 0;
while (!success) { // loop until successfully generated a new ptc
test.x = (FP)rand()/(FP)RAND_MAX*w; // gen test ptc within [0,w)
test.y = (FP)rand()/(FP)RAND_MAX*w; // gen test ptc within [0,w)
// check for overlap within the cell
ncell = n[y*m+x];
if (ncell == 0) {
success = 1;
}
else {
for (j=0; j<ncell; j++) { //loop over all previously generated ptcs
idx = cellIdx(m, x, y, j); //very bad memory retrieving
dist2 = (test.x-rsys[idx].x)*(test.x-rsys[idx].x);
dist2 += (test.y-rsys[idx].y)*(test.y-rsys[idx].y);
if (dist2 < lb2) { //overlap
success = 0;
break;
}
else {
success = 1;
}
}
}
//if no overlap within the cell, check for overlap with neighbor cells
if (success) {
for (nb=0; nb<8; nb++) {
xnb = (x + nblist[nb][0]+8)%8; // indices of neighboring cells
ynb = (y + nblist[nb][1]+8)%8;
vec.x = nblist[nb][0]*w;
vec.y = nblist[nb][1]*w;
nnb = n[ynb*m + xnb];
if (nnb == 0) continue;
for (j=0; j<nnb; j++) { // loop over all ptcs in a neighbor cell
inb = cellIdx(m, xnb, ynb, j); // call func cellIdx to calc inb in glb mem
rnb = rsys[inb];
dist2 = (test.x-rnb.x-vec.x)*(test.x-rnb.x-vec.x);
dist2 += (test.y-rnb.y-vec.y)*(test.y-rnb.y-vec.y);
if (dist2 < lb2) {
success = 0;
nb = 8;
j = nnb;
break;
}
}
}
}
}
// successful generation of a test ptc, store it in the host memory
idx = cellIdx(m, x, y, i);
rsys[idx] = test;
n[m*y+x]++;
nbox++;
}
}
}
}
int iptcbox, iptc;
// replicate the 8*8 box to all other cells in the system
for (i=0; i<NMAX; i++) {
for (y=0; y<m; y++) {
ybox = y%8;
for (x=0; x<m; x++) {
xbox = x%8;
iptcbox = cellIdx(m, xbox, ybox, i);
iptc = cellIdx(m, x, y, i);
n[y*m+x] = n[ybox*m+xbox];
rsys[iptc] = rsys[iptcbox];
}
}
}
return;
} // Done with genbox
// Monte Carlo Sweep
double MCsweep(struct ptc *rsys_d, int *n_d, struct ptc *rsys_update, int *n_update, int bdim, int m,\
int iter, curandState *state) {
// initialization
int chksets[] = {'a', 'b', 'c', 'd'}; // collection of checkerboard sets
int set; // checkerboard set
int i; // index
unsigned int nrand; // random number
int off_x, off_y; // cell index offset to the lower-leftmost active cell in the current set
FP d; // cell shift distance
int shftvec[4][2] = {{-1, 0}, {1, 0}, {0, -1}, {0, 1}}; // unit vectors for cell shift
int fx, fy; // vector to perform cell shift;
int bx, by, gx, gy; // blockdim and griddim, bx=by=bdim
const FP diameter = 1.0;
const FP w = 1.4142*diameter;
double start, end, cput;
// start timing
timing(&start, &cput);
// Fisher-Yates shuffling
srand(time(NULL));
for (i=3; i>0; i--) {
//pick a rand number in [0, i]
nrand = randint(i+1);
//swap chksets[i] and chksets[nrand]
int temp;
temp = chksets[nrand];
chksets[nrand] = chksets[i];
chksets[i] = temp;
}
// define grid and block size
bx = by = bdim; // bx and by set from command line input
gx = gy = m/(bdim*2); // total # threads=m/2, each thread controls every other r/c of cells
if (bx*gx < m/2) {
printf("Error: number of threads in x dimension less than half the number of cells. \
Aborting...\n");
exit(-1);
}
dim3 dimBlock(bx, by, 1);
dim3 dimGrid(gx, gy, 1);
printf("# sub-sweeps: Block x = %d, Block y = %d, Grid x = %d, Grid y = %d.\n", bx, by, gx, \
gy);
// Loop over checkerboard sets
for(i=0; i<4; i++) {
set = chksets[i];
switch(set) {
case 'a':
off_x = 0;
off_y = 0;
break;
case 'b':
off_x = 1;
off_y = 0;
break;
case 'c':
off_x = 0;
off_y = 1;
break;
case 'd':
off_x = 1;
off_y = 1;
break;
default:
printf("Error: set not in the checkerboard sets. Aborting...\n");
exit(-1);
}
// Sub-sweep GPU kernel
// need to swap buffer each iteration
if (iter%2)
subsweep<<<dimGrid, dimBlock>>>(rsys_update, n_update, off_x, off_y, m, iter, state);
else
subsweep<<<dimGrid, dimBlock>>>(rsys_d, n_d, off_x, off_y, m, iter, state);
// synchronize all threads in the device
HANDLE_ERROR(cudaDeviceSynchronize());
} // Done with sub-sweeps
// Shift cells
d = (float) rand()/(float)(RAND_MAX)*w/2.;//generate random floating point number [0, w/2.]
nrand = randint(4); // randomly select a direction to perform cell shift
fx = shftvec[nrand][0];
fy = shftvec[nrand][1];
gx = gy = m/bdim; // total # threads=m, each thread controls a cell
if (bx*gx < m) {
printf("Error: number of threads in x dimension less than the nubmer of the number of cells. \
Aborting...\n");
exit(-1);
}
printf("# shift cells: Block x = %d, Block y = %d, Grid x = %d, Grid y = %d.\n", bx, by, gx, gy);
dim3 dimGrid2(gx, gy, 1);
// need to swap buffer each iteration
if (iter%2)
shift_cells<<<dimGrid2, dimBlock>>>(fx, fy, d, rsys_update, n_update, rsys_d, n_d, m);
else
shift_cells<<<dimGrid2, dimBlock>>>(fx, fy, d, rsys_d, n_d, rsys_update, n_update, m);
HANDLE_ERROR(cudaDeviceSynchronize());
//end timing
timing(&end, &cput);
return(end - start);
} // Done with MC sweep
// random number generator, returns an integer in the range [0, n)
// credit: http://stackoverflow.com/questions/822323
int randint(int n) {
if ((n-1) == RAND_MAX) {
return rand();
} else {
// chop off all values that would cause skew
long end = RAND_MAX / n;
assert (end > 0L);
end *= n;
//ignore results from rand() that fall above the limit
int r;
while ((r=rand()) >= end) ;
return r%n; // obtain rand number that give uniform distribution
}
}
// timer function
// credit: Dr. Andrew Sherman, Yale University
void timing(double* wcTime, double* cpuTime)
{
struct timeval tp;
struct rusage ruse;
gettimeofday(&tp, NULL);
*wcTime=(double) (tp.tv_sec + tp.tv_usec/1000000.0);
getrusage(RUSAGE_SELF, &ruse);
*cpuTime=(double)(ruse.ru_utime.tv_sec+ruse.ru_utime.tv_usec / 1000000.0);
}
|
01d0c816db4559a0161dc771f1ce289210cd6e19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cstring>
#include <string>
using namespace std;
#define NUM_DATA 512
__global__ void vecAdd(int *a,int *b,int *c)
{
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main()
{
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int memSize = sizeof(int)*NUM_DATA;
cout << "elements : " << NUM_DATA <<"\n";
a = new int[NUM_DATA]; memset(a,0,memSize);
b = new int[NUM_DATA]; memset(a,0,memSize);
c = new int[NUM_DATA]; memset(a,0,memSize);
for(int i = 0 ; i < NUM_DATA; i++)
{
a[i] = rand() % 10;
b[i] = rand() % 10;
}
hipMalloc(&d_a,memSize);
hipMalloc(&d_b,memSize);
hipMalloc(&d_c,memSize);
hipMemcpy(d_a,a,memSize,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,memSize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vecAdd), dim3(1),dim3(NUM_DATA), 0, 0, d_a,d_b,d_c);
hipDeviceSynchronize();
hipMemcpy(c,d_c,memSize,hipMemcpyDeviceToHost);
bool result = true;
for(int i = 0 ; i < NUM_DATA; i++)
{
if(a[i] + b[i] != c[i]){
cout << "Gpu has error in vecAdd\n";
result = false;
}
}
if(result)
cout << "GPU WORKS WELL \n";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
delete[] a; delete[] b; delete[] c;
return 0;
}
|
01d0c816db4559a0161dc771f1ce289210cd6e19.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cstring>
#include <string>
using namespace std;
#define NUM_DATA 512
__global__ void vecAdd(int *a,int *b,int *c)
{
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main()
{
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int memSize = sizeof(int)*NUM_DATA;
cout << "elements : " << NUM_DATA <<"\n";
a = new int[NUM_DATA]; memset(a,0,memSize);
b = new int[NUM_DATA]; memset(a,0,memSize);
c = new int[NUM_DATA]; memset(a,0,memSize);
for(int i = 0 ; i < NUM_DATA; i++)
{
a[i] = rand() % 10;
b[i] = rand() % 10;
}
cudaMalloc(&d_a,memSize);
cudaMalloc(&d_b,memSize);
cudaMalloc(&d_c,memSize);
cudaMemcpy(d_a,a,memSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,memSize,cudaMemcpyHostToDevice);
vecAdd<<<1,NUM_DATA>>>(d_a,d_b,d_c);
cudaDeviceSynchronize();
cudaMemcpy(c,d_c,memSize,cudaMemcpyDeviceToHost);
bool result = true;
for(int i = 0 ; i < NUM_DATA; i++)
{
if(a[i] + b[i] != c[i]){
cout << "Gpu has error in vecAdd\n";
result = false;
}
}
if(result)
cout << "GPU WORKS WELL \n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete[] a; delete[] b; delete[] c;
return 0;
}
|
pyrlk.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace pyrlk
{
__constant__ int c_winSize_x;
__constant__ int c_winSize_y;
__constant__ int c_halfWin_x;
__constant__ int c_halfWin_y;
__constant__ int c_iters;
texture<float, hipTextureType2D, hipReadModeElementType> tex_If(false, hipFilterModeLinear, hipAddressModeClamp);
texture<float4, hipTextureType2D, hipReadModeElementType> tex_If4(false, hipFilterModeLinear, hipAddressModeClamp);
texture<uchar, hipTextureType2D, hipReadModeElementType> tex_Ib(false, hipFilterModePoint, hipAddressModeClamp);
texture<float, hipTextureType2D, hipReadModeElementType> tex_Jf(false, hipFilterModeLinear, hipAddressModeClamp);
texture<float4, hipTextureType2D, hipReadModeElementType> tex_Jf4(false, hipFilterModeLinear, hipAddressModeClamp);
template <int cn> struct Tex_I;
template <> struct Tex_I<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_If, x, y);
}
};
template <> struct Tex_I<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_If4, x, y);
}
};
template <int cn> struct Tex_J;
template <> struct Tex_J<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_Jf, x, y);
}
};
template <> struct Tex_J<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_Jf4, x, y);
}
};
__device__ __forceinline__ void accum(float& dst, float val)
{
dst += val;
}
__device__ __forceinline__ void accum(float& dst, const float4& val)
{
dst += val.x + val.y + val.z;
}
__device__ __forceinline__ float abs_(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ float4 abs_(const float4& a)
{
return abs(a);
}
template <int cn, int PATCH_X, int PATCH_Y, bool calcErr>
__global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)
{
#if __CUDA_ARCH__ <= 110
const int BLOCK_SIZE = 128;
#else
const int BLOCK_SIZE = 256;
#endif
__shared__ float smem1[BLOCK_SIZE];
__shared__ float smem2[BLOCK_SIZE];
__shared__ float smem3[BLOCK_SIZE];
const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
float2 prevPt = prevPts[blockIdx.x];
prevPt.x *= (1.0f / (1 << level));
prevPt.y *= (1.0f / (1 << level));
if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
prevPt.x -= c_halfWin_x;
prevPt.y -= c_halfWin_y;
// extract the patch from the first image, compute covariation matrix of derivatives
float A11 = 0;
float A12 = 0;
float A22 = 0;
typedef typename TypeVec<float, cn>::vec_type work_type;
work_type I_patch [PATCH_Y][PATCH_X];
work_type dIdx_patch[PATCH_Y][PATCH_X];
work_type dIdy_patch[PATCH_Y][PATCH_X];
for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i)
{
for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j)
{
float x = prevPt.x + xBase + 0.5f;
float y = prevPt.y + yBase + 0.5f;
I_patch[i][j] = Tex_I<cn>::read(x, y);
// Sharr Deriv
work_type dIdx = 3.0f * Tex_I<cn>::read(x+1, y-1) + 10.0f * Tex_I<cn>::read(x+1, y) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x-1, y) + 3.0f * Tex_I<cn>::read(x-1, y+1));
work_type dIdy = 3.0f * Tex_I<cn>::read(x-1, y+1) + 10.0f * Tex_I<cn>::read(x, y+1) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x, y-1) + 3.0f * Tex_I<cn>::read(x+1, y-1));
dIdx_patch[i][j] = dIdx;
dIdy_patch[i][j] = dIdy;
accum(A11, dIdx * dIdx);
accum(A12, dIdx * dIdy);
accum(A22, dIdy * dIdy);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = A11;
smem2[0] = A12;
smem3[0] = A22;
}
#endif
__syncthreads();
A11 = smem1[0];
A12 = smem2[0];
A22 = smem3[0];
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt = nextPts[blockIdx.x];
nextPt.x *= 2.f;
nextPt.y *= 2.f;
nextPt.x -= c_halfWin_x;
nextPt.y -= c_halfWin_y;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
float b1 = 0;
float b2 = 0;
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = (J_val - I_val) * 32.0f;
accum(b1, diff * dIdx_patch[i][j]);
accum(b2, diff * dIdy_patch[i][j]);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = b1;
smem2[0] = b2;
}
#endif
__syncthreads();
b1 = smem1[0];
b2 = smem2[0];
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
float errval = 0;
if (calcErr)
{
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = J_val - I_val;
accum(errval, abs_(diff));
}
}
reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>());
}
if (tid == 0)
{
nextPt.x += c_halfWin_x;
nextPt.y += c_halfWin_y;
nextPts[blockIdx.x] = nextPt;
if (calcErr)
err[blockIdx.x] = static_cast<float>(errval) / (cn * c_winSize_x * c_winSize_y);
}
}
template <int cn, int PATCH_X, int PATCH_Y>
void sparse_caller(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, hipStream_t stream)
{
dim3 grid(ptcount);
if (level == 0 && err)
hipLaunchKernelGGL(( sparseKernel<cn, PATCH_X, PATCH_Y, true>), dim3(grid), dim3(block), 0, 0, prevPts, nextPts, status, err, level, rows, cols);
else
hipLaunchKernelGGL(( sparseKernel<cn, PATCH_X, PATCH_Y, false>), dim3(grid), dim3(block), 0, 0, prevPts, nextPts, status, err, level, rows, cols);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <bool calcErr>
__global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols)
{
extern __shared__ int smem[];
const int patchWidth = blockDim.x + 2 * c_halfWin_x;
const int patchHeight = blockDim.y + 2 * c_halfWin_y;
int* I_patch = smem;
int* dIdx_patch = I_patch + patchWidth * patchHeight;
int* dIdy_patch = dIdx_patch + patchWidth * patchHeight;
const int xBase = blockIdx.x * blockDim.x;
const int yBase = blockIdx.y * blockDim.y;
for (int i = threadIdx.y; i < patchHeight; i += blockDim.y)
{
for (int j = threadIdx.x; j < patchWidth; j += blockDim.x)
{
float x = xBase - c_halfWin_x + j + 0.5f;
float y = yBase - c_halfWin_y + i + 0.5f;
I_patch[i * patchWidth + j] = tex2D(tex_Ib, x, y);
// Sharr Deriv
dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x+1, y-1) + 10 * tex2D(tex_Ib, x+1, y) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x-1, y) + 3 * tex2D(tex_Ib, x-1, y+1));
dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x-1, y+1) + 10 * tex2D(tex_Ib, x, y+1) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x, y-1) + 3 * tex2D(tex_Ib, x+1, y-1));
}
}
__syncthreads();
const int x = xBase + threadIdx.x;
const int y = yBase + threadIdx.y;
if (x >= cols || y >= rows)
return;
int A11i = 0;
int A12i = 0;
int A22i = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
A11i += dIdx * dIdx;
A12i += dIdx * dIdy;
A22i += dIdy * dIdy;
}
}
float A11 = A11i;
float A12 = A12i;
float A22 = A22i;
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt;
nextPt.x = x + prevU(y/2, x/2) * 2.0f;
nextPt.y = y + prevV(y/2, x/2) * 2.0f;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows)
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
int b1 = 0;
int b2 = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
int diff = (J - I) * 32;
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
b1 += diff * dIdx;
b2 += diff * dIdy;
}
}
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
u(y, x) = nextPt.x - x;
v(y, x) = nextPt.y - y;
if (calcErr)
{
int errval = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
errval += ::abs(J - I);
}
}
err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y);
}
}
void loadConstants(int2 winSize, int iters)
{
cudaSafeCall( hipMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) );
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
cudaSafeCall( hipMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
}
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, hipStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, hipStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<1, 1, 1>, sparse_caller<1, 2, 1>, sparse_caller<1, 3, 1>, sparse_caller<1, 4, 1>, sparse_caller<1, 5, 1>},
{sparse_caller<1, 1, 2>, sparse_caller<1, 2, 2>, sparse_caller<1, 3, 2>, sparse_caller<1, 4, 2>, sparse_caller<1, 5, 2>},
{sparse_caller<1, 1, 3>, sparse_caller<1, 2, 3>, sparse_caller<1, 3, 3>, sparse_caller<1, 4, 3>, sparse_caller<1, 5, 3>},
{sparse_caller<1, 1, 4>, sparse_caller<1, 2, 4>, sparse_caller<1, 3, 4>, sparse_caller<1, 4, 4>, sparse_caller<1, 5, 4>},
{sparse_caller<1, 1, 5>, sparse_caller<1, 2, 5>, sparse_caller<1, 3, 5>, sparse_caller<1, 4, 5>, sparse_caller<1, 5, 5>}
};
bindTexture(&tex_If, I);
bindTexture(&tex_Jf, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, hipStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, hipStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<4, 1, 1>, sparse_caller<4, 2, 1>, sparse_caller<4, 3, 1>, sparse_caller<4, 4, 1>, sparse_caller<4, 5, 1>},
{sparse_caller<4, 1, 2>, sparse_caller<4, 2, 2>, sparse_caller<4, 3, 2>, sparse_caller<4, 4, 2>, sparse_caller<4, 5, 2>},
{sparse_caller<4, 1, 3>, sparse_caller<4, 2, 3>, sparse_caller<4, 3, 3>, sparse_caller<4, 4, 3>, sparse_caller<4, 5, 3>},
{sparse_caller<4, 1, 4>, sparse_caller<4, 2, 4>, sparse_caller<4, 3, 4>, sparse_caller<4, 4, 4>, sparse_caller<4, 5, 4>},
{sparse_caller<4, 1, 5>, sparse_caller<4, 2, 5>, sparse_caller<4, 3, 5>, sparse_caller<4, 4, 5>, sparse_caller<4, 5, 5>}
};
bindTexture(&tex_If4, I);
bindTexture(&tex_Jf4, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, hipStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y));
bindTexture(&tex_Ib, I);
bindTexture(&tex_Jf, J);
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
const int patchWidth = block.x + 2 * halfWin.x;
const int patchHeight = block.y + 2 * halfWin.y;
size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int);
if (err.data)
{
hipLaunchKernelGGL(( denseKernel<true>), dim3(grid), dim3(block), smem_size, stream, u, v, prevU, prevV, err, I.rows, I.cols);
cudaSafeCall( hipGetLastError() );
}
else
{
hipLaunchKernelGGL(( denseKernel<false>), dim3(grid), dim3(block), smem_size, stream, u, v, prevU, prevV, PtrStepf(), I.rows, I.cols);
cudaSafeCall( hipGetLastError() );
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
#endif /* CUDA_DISABLER */
|
pyrlk.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/reduce.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace pyrlk
{
__constant__ int c_winSize_x;
__constant__ int c_winSize_y;
__constant__ int c_halfWin_x;
__constant__ int c_halfWin_y;
__constant__ int c_iters;
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_If(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_If4(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_Ib(false, cudaFilterModePoint, cudaAddressModeClamp);
texture<float, cudaTextureType2D, cudaReadModeElementType> tex_Jf(false, cudaFilterModeLinear, cudaAddressModeClamp);
texture<float4, cudaTextureType2D, cudaReadModeElementType> tex_Jf4(false, cudaFilterModeLinear, cudaAddressModeClamp);
template <int cn> struct Tex_I;
template <> struct Tex_I<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_If, x, y);
}
};
template <> struct Tex_I<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_If4, x, y);
}
};
template <int cn> struct Tex_J;
template <> struct Tex_J<1>
{
static __device__ __forceinline__ float read(float x, float y)
{
return tex2D(tex_Jf, x, y);
}
};
template <> struct Tex_J<4>
{
static __device__ __forceinline__ float4 read(float x, float y)
{
return tex2D(tex_Jf4, x, y);
}
};
__device__ __forceinline__ void accum(float& dst, float val)
{
dst += val;
}
__device__ __forceinline__ void accum(float& dst, const float4& val)
{
dst += val.x + val.y + val.z;
}
__device__ __forceinline__ float abs_(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ float4 abs_(const float4& a)
{
return abs(a);
}
template <int cn, int PATCH_X, int PATCH_Y, bool calcErr>
__global__ void sparseKernel(const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)
{
#if __CUDA_ARCH__ <= 110
const int BLOCK_SIZE = 128;
#else
const int BLOCK_SIZE = 256;
#endif
__shared__ float smem1[BLOCK_SIZE];
__shared__ float smem2[BLOCK_SIZE];
__shared__ float smem3[BLOCK_SIZE];
const unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
float2 prevPt = prevPts[blockIdx.x];
prevPt.x *= (1.0f / (1 << level));
prevPt.y *= (1.0f / (1 << level));
if (prevPt.x < 0 || prevPt.x >= cols || prevPt.y < 0 || prevPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
prevPt.x -= c_halfWin_x;
prevPt.y -= c_halfWin_y;
// extract the patch from the first image, compute covariation matrix of derivatives
float A11 = 0;
float A12 = 0;
float A22 = 0;
typedef typename TypeVec<float, cn>::vec_type work_type;
work_type I_patch [PATCH_Y][PATCH_X];
work_type dIdx_patch[PATCH_Y][PATCH_X];
work_type dIdy_patch[PATCH_Y][PATCH_X];
for (int yBase = threadIdx.y, i = 0; yBase < c_winSize_y; yBase += blockDim.y, ++i)
{
for (int xBase = threadIdx.x, j = 0; xBase < c_winSize_x; xBase += blockDim.x, ++j)
{
float x = prevPt.x + xBase + 0.5f;
float y = prevPt.y + yBase + 0.5f;
I_patch[i][j] = Tex_I<cn>::read(x, y);
// Sharr Deriv
work_type dIdx = 3.0f * Tex_I<cn>::read(x+1, y-1) + 10.0f * Tex_I<cn>::read(x+1, y) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x-1, y) + 3.0f * Tex_I<cn>::read(x-1, y+1));
work_type dIdy = 3.0f * Tex_I<cn>::read(x-1, y+1) + 10.0f * Tex_I<cn>::read(x, y+1) + 3.0f * Tex_I<cn>::read(x+1, y+1) -
(3.0f * Tex_I<cn>::read(x-1, y-1) + 10.0f * Tex_I<cn>::read(x, y-1) + 3.0f * Tex_I<cn>::read(x+1, y-1));
dIdx_patch[i][j] = dIdx;
dIdy_patch[i][j] = dIdy;
accum(A11, dIdx * dIdx);
accum(A12, dIdx * dIdy);
accum(A22, dIdy * dIdy);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2, smem3), thrust::tie(A11, A12, A22), tid, thrust::make_tuple(plus<float>(), plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = A11;
smem2[0] = A12;
smem3[0] = A22;
}
#endif
__syncthreads();
A11 = smem1[0];
A12 = smem2[0];
A22 = smem3[0];
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt = nextPts[blockIdx.x];
nextPt.x *= 2.f;
nextPt.y *= 2.f;
nextPt.x -= c_halfWin_x;
nextPt.y -= c_halfWin_y;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < -c_halfWin_x || nextPt.x >= cols || nextPt.y < -c_halfWin_y || nextPt.y >= rows)
{
if (tid == 0 && level == 0)
status[blockIdx.x] = 0;
return;
}
float b1 = 0;
float b2 = 0;
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = (J_val - I_val) * 32.0f;
accum(b1, diff * dIdx_patch[i][j]);
accum(b2, diff * dIdy_patch[i][j]);
}
}
reduce<BLOCK_SIZE>(smem_tuple(smem1, smem2), thrust::tie(b1, b2), tid, thrust::make_tuple(plus<float>(), plus<float>()));
#if __CUDA_ARCH__ >= 300
if (tid == 0)
{
smem1[0] = b1;
smem2[0] = b2;
}
#endif
__syncthreads();
b1 = smem1[0];
b2 = smem2[0];
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
float errval = 0;
if (calcErr)
{
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i)
{
for (int x = threadIdx.x, j = 0; x < c_winSize_x; x += blockDim.x, ++j)
{
work_type I_val = I_patch[i][j];
work_type J_val = Tex_J<cn>::read(nextPt.x + x + 0.5f, nextPt.y + y + 0.5f);
work_type diff = J_val - I_val;
accum(errval, abs_(diff));
}
}
reduce<BLOCK_SIZE>(smem1, errval, tid, plus<float>());
}
if (tid == 0)
{
nextPt.x += c_halfWin_x;
nextPt.y += c_halfWin_y;
nextPts[blockIdx.x] = nextPt;
if (calcErr)
err[blockIdx.x] = static_cast<float>(errval) / (cn * c_winSize_x * c_winSize_y);
}
}
template <int cn, int PATCH_X, int PATCH_Y>
void sparse_caller(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream)
{
dim3 grid(ptcount);
if (level == 0 && err)
sparseKernel<cn, PATCH_X, PATCH_Y, true><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
else
sparseKernel<cn, PATCH_X, PATCH_Y, false><<<grid, block>>>(prevPts, nextPts, status, err, level, rows, cols);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <bool calcErr>
__global__ void denseKernel(PtrStepf u, PtrStepf v, const PtrStepf prevU, const PtrStepf prevV, PtrStepf err, const int rows, const int cols)
{
extern __shared__ int smem[];
const int patchWidth = blockDim.x + 2 * c_halfWin_x;
const int patchHeight = blockDim.y + 2 * c_halfWin_y;
int* I_patch = smem;
int* dIdx_patch = I_patch + patchWidth * patchHeight;
int* dIdy_patch = dIdx_patch + patchWidth * patchHeight;
const int xBase = blockIdx.x * blockDim.x;
const int yBase = blockIdx.y * blockDim.y;
for (int i = threadIdx.y; i < patchHeight; i += blockDim.y)
{
for (int j = threadIdx.x; j < patchWidth; j += blockDim.x)
{
float x = xBase - c_halfWin_x + j + 0.5f;
float y = yBase - c_halfWin_y + i + 0.5f;
I_patch[i * patchWidth + j] = tex2D(tex_Ib, x, y);
// Sharr Deriv
dIdx_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x+1, y-1) + 10 * tex2D(tex_Ib, x+1, y) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x-1, y) + 3 * tex2D(tex_Ib, x-1, y+1));
dIdy_patch[i * patchWidth + j] = 3 * tex2D(tex_Ib, x-1, y+1) + 10 * tex2D(tex_Ib, x, y+1) + 3 * tex2D(tex_Ib, x+1, y+1) -
(3 * tex2D(tex_Ib, x-1, y-1) + 10 * tex2D(tex_Ib, x, y-1) + 3 * tex2D(tex_Ib, x+1, y-1));
}
}
__syncthreads();
const int x = xBase + threadIdx.x;
const int y = yBase + threadIdx.y;
if (x >= cols || y >= rows)
return;
int A11i = 0;
int A12i = 0;
int A22i = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
A11i += dIdx * dIdx;
A12i += dIdx * dIdy;
A22i += dIdy * dIdy;
}
}
float A11 = A11i;
float A12 = A12i;
float A22 = A22i;
float D = A11 * A22 - A12 * A12;
if (D < numeric_limits<float>::epsilon())
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
D = 1.f / D;
A11 *= D;
A12 *= D;
A22 *= D;
float2 nextPt;
nextPt.x = x + prevU(y/2, x/2) * 2.0f;
nextPt.y = y + prevV(y/2, x/2) * 2.0f;
for (int k = 0; k < c_iters; ++k)
{
if (nextPt.x < 0 || nextPt.x >= cols || nextPt.y < 0 || nextPt.y >= rows)
{
if (calcErr)
err(y, x) = numeric_limits<float>::max();
return;
}
int b1 = 0;
int b2 = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
int diff = (J - I) * 32;
int dIdx = dIdx_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
int dIdy = dIdy_patch[(threadIdx.y + i) * patchWidth + (threadIdx.x + j)];
b1 += diff * dIdx;
b2 += diff * dIdy;
}
}
float2 delta;
delta.x = A12 * b2 - A22 * b1;
delta.y = A12 * b1 - A11 * b2;
nextPt.x += delta.x;
nextPt.y += delta.y;
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f)
break;
}
u(y, x) = nextPt.x - x;
v(y, x) = nextPt.y - y;
if (calcErr)
{
int errval = 0;
for (int i = 0; i < c_winSize_y; ++i)
{
for (int j = 0; j < c_winSize_x; ++j)
{
int I = I_patch[(threadIdx.y + i) * patchWidth + threadIdx.x + j];
int J = tex2D(tex_Jf, nextPt.x - c_halfWin_x + j + 0.5f, nextPt.y - c_halfWin_y + i + 0.5f);
errval += ::abs(J - I);
}
}
err(y, x) = static_cast<float>(errval) / (c_winSize_x * c_winSize_y);
}
}
void loadConstants(int2 winSize, int iters)
{
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) );
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) );
}
void sparse1(PtrStepSzf I, PtrStepSzf J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<1, 1, 1>, sparse_caller<1, 2, 1>, sparse_caller<1, 3, 1>, sparse_caller<1, 4, 1>, sparse_caller<1, 5, 1>},
{sparse_caller<1, 1, 2>, sparse_caller<1, 2, 2>, sparse_caller<1, 3, 2>, sparse_caller<1, 4, 2>, sparse_caller<1, 5, 2>},
{sparse_caller<1, 1, 3>, sparse_caller<1, 2, 3>, sparse_caller<1, 3, 3>, sparse_caller<1, 4, 3>, sparse_caller<1, 5, 3>},
{sparse_caller<1, 1, 4>, sparse_caller<1, 2, 4>, sparse_caller<1, 3, 4>, sparse_caller<1, 4, 4>, sparse_caller<1, 5, 4>},
{sparse_caller<1, 1, 5>, sparse_caller<1, 2, 5>, sparse_caller<1, 3, 5>, sparse_caller<1, 4, 5>, sparse_caller<1, 5, 5>}
};
bindTexture(&tex_If, I);
bindTexture(&tex_Jf, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void sparse4(PtrStepSz<float4> I, PtrStepSz<float4> J, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, dim3 patch, cudaStream_t stream)
{
typedef void (*func_t)(int rows, int cols, const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
int level, dim3 block, cudaStream_t stream);
static const func_t funcs[5][5] =
{
{sparse_caller<4, 1, 1>, sparse_caller<4, 2, 1>, sparse_caller<4, 3, 1>, sparse_caller<4, 4, 1>, sparse_caller<4, 5, 1>},
{sparse_caller<4, 1, 2>, sparse_caller<4, 2, 2>, sparse_caller<4, 3, 2>, sparse_caller<4, 4, 2>, sparse_caller<4, 5, 2>},
{sparse_caller<4, 1, 3>, sparse_caller<4, 2, 3>, sparse_caller<4, 3, 3>, sparse_caller<4, 4, 3>, sparse_caller<4, 5, 3>},
{sparse_caller<4, 1, 4>, sparse_caller<4, 2, 4>, sparse_caller<4, 3, 4>, sparse_caller<4, 4, 4>, sparse_caller<4, 5, 4>},
{sparse_caller<4, 1, 5>, sparse_caller<4, 2, 5>, sparse_caller<4, 3, 5>, sparse_caller<4, 4, 5>, sparse_caller<4, 5, 5>}
};
bindTexture(&tex_If4, I);
bindTexture(&tex_Jf4, J);
funcs[patch.y - 1][patch.x - 1](I.rows, I.cols, prevPts, nextPts, status, err, ptcount,
level, block, stream);
}
void dense(PtrStepSzb I, PtrStepSzf J, PtrStepSzf u, PtrStepSzf v, PtrStepSzf prevU, PtrStepSzf prevV, PtrStepSzf err, int2 winSize, cudaStream_t stream)
{
dim3 block(16, 16);
dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y));
bindTexture(&tex_Ib, I);
bindTexture(&tex_Jf, J);
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2);
const int patchWidth = block.x + 2 * halfWin.x;
const int patchHeight = block.y + 2 * halfWin.y;
size_t smem_size = 3 * patchWidth * patchHeight * sizeof(int);
if (err.data)
{
denseKernel<true><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, err, I.rows, I.cols);
cudaSafeCall( cudaGetLastError() );
}
else
{
denseKernel<false><<<grid, block, smem_size, stream>>>(u, v, prevU, prevV, PtrStepf(), I.rows, I.cols);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
#endif /* CUDA_DISABLER */
|
53c19870f83ae00d09f2b20a174fcf466cb5b524.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/*int *dev_idata;*/
int *dev_odata;
thrust::device_vector<int> dev_thrust_idata;
thrust::device_vector<int> dev_thrust_odata;
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
/*
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");*/
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
/*hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy idata to dev_idata failed!");*/
thrust::device_vector<int> dev_thrust_idata(idata, idata + n);
thrust::device_vector<int> dev_thrust_odata(odata, odata + n);
thrust::exclusive_scan(dev_thrust_idata.begin(), dev_thrust_idata.end(),
dev_thrust_odata.begin());
thrust::copy(dev_thrust_odata.begin(), dev_thrust_odata.end(), dev_odata);
hipMemcpy(odata, dev_odata, n*sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_odata to odata failed!");
}
}
}
|
53c19870f83ae00d09f2b20a174fcf466cb5b524.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/*int *dev_idata;*/
int *dev_odata;
thrust::device_vector<int> dev_thrust_idata;
thrust::device_vector<int> dev_thrust_odata;
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
/*
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");*/
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
/*cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy idata to dev_idata failed!");*/
thrust::device_vector<int> dev_thrust_idata(idata, idata + n);
thrust::device_vector<int> dev_thrust_odata(odata, odata + n);
thrust::exclusive_scan(dev_thrust_idata.begin(), dev_thrust_idata.end(),
dev_thrust_odata.begin());
thrust::copy(dev_thrust_odata.begin(), dev_thrust_odata.end(), dev_odata);
cudaMemcpy(odata, dev_odata, n*sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_odata to odata failed!");
}
}
}
|
e9ad0672518a5df75bcedfc159fb8863241f16f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/nvidia/cub/hipcub/hipcub.hpp>
namespace NKernel {
template <typename T, typename TOut>
hipError_t ScanVector(const T* input, TOut* output, ui32 size, bool inclusive, TScanKernelContext<T, TOut>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T, TOut>;
if (inclusive) {
return hipcub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return hipcub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template <typename T_, typename TOut_>
hipError_t SegmentedScanNonNegativeVector(const T_* input, TOut_* output, ui32 size, bool inclusive, TScanKernelContext<T_, TOut_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_, TOut_>;
using T = typename TToSignedConversion<T_>::TSignedType;
using TOut = typename TToSignedConversion<TOut_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return hipcub::DeviceScan::InclusiveScan((TOut*)context.PartResults.Get(), context.NumParts, (const T*)input, (TOut*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return hipcub::DeviceScan::ExclusiveScan((TOut*)context.PartResults.Get(), context.NumParts, (const T*) input, (TOut*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template <typename T_>
hipError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_, T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_, T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T, class TOut>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
hipcub::DeviceScan::InclusiveSum<const T*, TOut*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
hipcub::DeviceScan::ExclusiveSum<const T*, TOut*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type, TypeOut) \
template hipError_t ScanVector<Type, TypeOut>(const Type *input, TypeOut *output, ui32 size, bool inclusive, TScanKernelContext<Type, TypeOut>& context, TCudaStream stream); \
template hipError_t SegmentedScanNonNegativeVector<Type>(const Type *input, TypeOut *output, ui32 size, bool inclusive, TScanKernelContext<Type, TypeOut>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type, TypeOut>(ui32, bool);
SCAN_VECTOR(int, int)
SCAN_VECTOR(ui32, ui32)
SCAN_VECTOR(float, float)
SCAN_VECTOR(double, double)
SCAN_VECTOR(ui32, ui64)
#define SEGMENTED_SCAN_VECTOR(Type) \
template hipError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type, Type>& context, TCudaStream stream);
SEGMENTED_SCAN_VECTOR(int)
SEGMENTED_SCAN_VECTOR(ui32)
SEGMENTED_SCAN_VECTOR(float)
SEGMENTED_SCAN_VECTOR(double)
}
|
e9ad0672518a5df75bcedfc159fb8863241f16f2.cu
|
#include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/nvidia/cub/cub/device/device_scan.cuh>
namespace NKernel {
template <typename T, typename TOut>
cudaError_t ScanVector(const T* input, TOut* output, ui32 size, bool inclusive, TScanKernelContext<T, TOut>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T, TOut>;
if (inclusive) {
return cub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return cub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template <typename T_, typename TOut_>
cudaError_t SegmentedScanNonNegativeVector(const T_* input, TOut_* output, ui32 size, bool inclusive, TScanKernelContext<T_, TOut_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_, TOut_>;
using T = typename TToSignedConversion<T_>::TSignedType;
using TOut = typename TToSignedConversion<TOut_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return cub::DeviceScan::InclusiveScan((TOut*)context.PartResults.Get(), context.NumParts, (const T*)input, (TOut*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return cub::DeviceScan::ExclusiveScan((TOut*)context.PartResults.Get(), context.NumParts, (const T*) input, (TOut*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template <typename T_>
cudaError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_, T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_, T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T, class TOut>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
cub::DeviceScan::InclusiveSum<const T*, TOut*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
cub::DeviceScan::ExclusiveSum<const T*, TOut*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type, TypeOut) \
template cudaError_t ScanVector<Type, TypeOut>(const Type *input, TypeOut *output, ui32 size, bool inclusive, TScanKernelContext<Type, TypeOut>& context, TCudaStream stream); \
template cudaError_t SegmentedScanNonNegativeVector<Type>(const Type *input, TypeOut *output, ui32 size, bool inclusive, TScanKernelContext<Type, TypeOut>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type, TypeOut>(ui32, bool);
SCAN_VECTOR(int, int)
SCAN_VECTOR(ui32, ui32)
SCAN_VECTOR(float, float)
SCAN_VECTOR(double, double)
SCAN_VECTOR(ui32, ui64)
#define SEGMENTED_SCAN_VECTOR(Type) \
template cudaError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type, Type>& context, TCudaStream stream);
SEGMENTED_SCAN_VECTOR(int)
SEGMENTED_SCAN_VECTOR(ui32)
SEGMENTED_SCAN_VECTOR(float)
SEGMENTED_SCAN_VECTOR(double)
}
|
c2a8a378f5929cc639c13bd1dcfe8f45664def33.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
#define HASH_BITS 12
// TBD: Tentatively limits to 2-byte codes to prevent long copy search followed by long literal encoding
#define MAX_LITERAL_LENGTH 256
#define MAX_COPY_LENGTH 64 // Syntax limit
#define MAX_COPY_DISTANCE 32768 // Matches encoder limit as described in snappy format description
struct snap_state_s
{
const uint8_t *src;
uint32_t src_len;
uint8_t *dst_base;
uint8_t *dst;
uint8_t *end;
volatile uint32_t literal_length;
volatile uint32_t copy_length;
volatile uint32_t copy_distance;
uint16_t hash_map[1 << HASH_BITS];
};
static inline __device__ uint32_t snap_hash(uint32_t v)
{
return (v * ((1 << 20) + (0x2a00) + (0x6a) + 1)) >> (32 - HASH_BITS);
}
static inline __device__ uint32_t fetch4(const uint8_t *src)
{
uint32_t src_align = 3 & reinterpret_cast<uintptr_t>(src);
const uint32_t *src32 = reinterpret_cast<const uint32_t *>(src - src_align);
uint32_t v = src32[0];
return (src_align) ? __funnelshift_r(v, src32[1], src_align * 8) : v;
}
static __device__ uint8_t * StoreLiterals(uint8_t *dst, uint8_t *end, const uint8_t *src, uint32_t len_minus1, uint32_t t)
{
if (len_minus1 < 60)
{
if (!t && dst < end)
dst[0] = (len_minus1 << 2);
dst += 1;
}
else if (len_minus1 <= 0xff)
{
if (!t && dst + 1 < end)
{
dst[0] = 60 << 2;
dst[1] = len_minus1;
}
dst += 2;
}
else if (len_minus1 <= 0xffff)
{
if (!t && dst + 2 < end)
{
dst[0] = 61 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
}
dst += 3;
}
else if (len_minus1 <= 0xffffff)
{
if (!t && dst + 3 < end)
{
dst[0] = 62 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
dst[3] = len_minus1 >> 16;
}
dst += 4;
}
else
{
if (!t && dst + 4 < end)
{
dst[0] = 63 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
dst[3] = len_minus1 >> 16;
dst[4] = len_minus1 >> 24;
}
dst += 5;
}
for (uint32_t i = t; i <= len_minus1; i += 32)
{
if (dst + i < end)
dst[i] = src[i];
}
return dst + len_minus1 + 1;
}
static __device__ uint8_t * StoreCopy(uint8_t *dst, uint8_t *end, uint32_t copy_len, uint32_t distance)
{
if (copy_len < 12 && distance < 2048)
{
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
if (dst + 2 <= end)
{
dst[0] = ((distance & 0x700) >> 3) | ((copy_len - 4) << 2) | 0x01;
dst[1] = distance;
}
return dst + 2;
}
else
{
// xxxxxx1x: copy with 6-bit length, 16-bit offset
if (dst + 3 <= end)
{
dst[0] = ((copy_len - 1) << 2) | 0x2;
dst[1] = distance;
dst[2] = distance >> 8;
}
return dst + 3;
}
}
static inline __device__ uint32_t HashMatchAny(uint32_t v, uint32_t t)
{
#if (__CUDA_ARCH__ >= 700)
return __match_any_sync(~0, v);
#else
uint32_t err_map = 0;
for (uint32_t i = 0; i < HASH_BITS; i++, v >>= 1)
{
uint32_t b = v & 1;
uint32_t match_b = BALLOT(b);
err_map |= match_b ^ -(int32_t)b;
}
return ~err_map;
#endif
}
static __device__ uint32_t FindFourByteMatch(snap_state_s *s, const uint8_t *src, uint32_t pos0, uint32_t t)
{
uint32_t len = s->src_len;
uint32_t pos = pos0;
uint32_t maxpos = pos0 + MAX_LITERAL_LENGTH - 31;
uint32_t match_mask, literal_cnt;
if (t == 0)
{
s->copy_length = 0;
}
do
{
bool valid4 = (pos + t + 4 <= len);
uint32_t data32 = (valid4) ? fetch4(src + pos + t) : 0;
uint32_t hash = (valid4) ? snap_hash(data32) : 0;
uint32_t local_match = HashMatchAny(hash, t);
uint32_t local_match_lane = 31 - __clz(local_match & ((1 << t) - 1));
uint32_t local_match_data = SHFL(data32, min(local_match_lane, t));
uint32_t offset, match;
if (valid4)
{
if (local_match_lane < t && local_match_data == data32)
{
match = 1;
offset = pos + local_match_lane;
}
else
{
offset = (pos & ~0xffff) | s->hash_map[hash];
if (offset >= pos)
{
offset = (offset >= 0x10000) ? offset - 0x10000 : pos;
}
match = (offset < pos && offset + MAX_COPY_DISTANCE >= pos + t && fetch4(src + offset) == data32);
}
}
else
{
match = 0;
local_match = 0;
offset = pos + t;
}
match_mask = BALLOT(match);
if (match_mask != 0)
{
literal_cnt = __ffs(match_mask) - 1;
if (t == literal_cnt)
{
s->copy_distance = pos + t - offset;
s->copy_length = 4;
}
}
else
{
literal_cnt = 32;
}
// Update hash up to the first 4 bytes of the copy length
local_match &= (0x2 << literal_cnt) - 1;
if (t <= literal_cnt && t == 31 - __clz(local_match))
{
s->hash_map[hash] = pos + t;
}
pos += literal_cnt;
} while (literal_cnt == 32 && pos < maxpos);
return min(pos, len) - pos0;
}
// @brief Returns the number of matching bytes for two byte sequences up to 63 bytes
static __device__ uint32_t Match60(const uint8_t *src1, const uint8_t *src2, uint32_t len, uint32_t t)
{
uint32_t mismatch = BALLOT(t >= len || src1[t] != src2[t]);
if (mismatch == 0)
{
mismatch = BALLOT(32 + t >= len || src1[32 + t] != src2[32 + t]);
return 31 + __ffs(mismatch); // mismatch cannot be zero here if len <= 63
}
else
{
return __ffs(mismatch) - 1;
}
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
snap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count)
{
__shared__ __align__(16) snap_state_s state_g;
snap_state_s * const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t pos;
const uint8_t *src;
if (!t)
{
const uint8_t *src = reinterpret_cast<const uint8_t *>(inputs[blockIdx.x].srcDevice);
uint32_t src_len = static_cast<uint32_t>(inputs[blockIdx.x].srcSize);
uint8_t *dst = reinterpret_cast<uint8_t *>(inputs[blockIdx.x].dstDevice);
uint32_t dst_len = static_cast<uint32_t>(inputs[blockIdx.x].dstSize);
uint8_t *end = dst + dst_len;
s->src = src;
s->src_len = src_len;
s->dst_base = dst;
s->end = end;
while (src_len > 0x7f)
{
if (dst < end)
{
dst[0] = src_len | 0x80;
}
dst++;
src_len >>= 7;
}
if (dst < end)
{
dst[0] = src_len;
}
s->dst = dst + 1;
s->literal_length = 0;
s->copy_length = 0;
s->copy_distance = 0;
}
for (uint32_t i = t; i < sizeof(s->hash_map) / sizeof(uint32_t); i += 128)
{
*reinterpret_cast<volatile uint32_t *>(&s->hash_map[i*2]) = 0;
}
__syncthreads();
src = s->src;
pos = 0;
while (pos < s->src_len)
{
uint32_t literal_len = s->literal_length;
uint32_t copy_len = s->copy_length;
uint32_t distance = s->copy_distance;
__syncthreads();
if (t < 32)
{
// WARP0: Encode literals and copies
uint8_t *dst = s->dst;
uint8_t *end = s->end;
if (literal_len > 0)
{
dst = StoreLiterals(dst, end, src + pos, literal_len - 1, t);
pos += literal_len;
}
if (copy_len > 0)
{
if (t == 0)
{
dst = StoreCopy(dst, end, copy_len, distance);
}
pos += copy_len;
}
SYNCWARP();
if (t == 0)
{
s->dst = dst;
}
}
else
{
pos += literal_len + copy_len;
if (t < 32 * 2)
{
// WARP1: Find a match using 12-bit hashes of 4-byte blocks
uint32_t t5 = t & 0x1f;
literal_len = FindFourByteMatch(s, src, pos, t5);
if (t5 == 0)
{
s->literal_length = literal_len;
}
copy_len = s->copy_length;
if (copy_len != 0)
{
uint32_t match_pos = pos + literal_len + copy_len; // NOTE: copy_len is always 4 here
copy_len += Match60(src + match_pos, src + match_pos - s->copy_distance, min(s->src_len - match_pos, 64 - copy_len), t5);
if (t5 == 0)
{
s->copy_length = copy_len;
}
}
}
}
__syncthreads();
}
__syncthreads();
if (!t)
{
outputs[blockIdx.x].bytes_written = s->dst - s->dst_base;
outputs[blockIdx.x].status = (s->dst > s->end) ? 1 : 0;
outputs[blockIdx.x].reserved = 0;
}
}
hipError_t __host__ gpu_snap(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, hipStream_t stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count, 1);
if (count > 0)
{
hipLaunchKernelGGL(( snap_kernel) , dim3(dim_grid), dim3(dim_block), 0, stream , inputs, outputs, count);
}
return hipSuccess;
}
} // namespace io
} // namespace cudf
|
c2a8a378f5929cc639c13bd1dcfe8f45664def33.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
#define HASH_BITS 12
// TBD: Tentatively limits to 2-byte codes to prevent long copy search followed by long literal encoding
#define MAX_LITERAL_LENGTH 256
#define MAX_COPY_LENGTH 64 // Syntax limit
#define MAX_COPY_DISTANCE 32768 // Matches encoder limit as described in snappy format description
struct snap_state_s
{
const uint8_t *src;
uint32_t src_len;
uint8_t *dst_base;
uint8_t *dst;
uint8_t *end;
volatile uint32_t literal_length;
volatile uint32_t copy_length;
volatile uint32_t copy_distance;
uint16_t hash_map[1 << HASH_BITS];
};
static inline __device__ uint32_t snap_hash(uint32_t v)
{
return (v * ((1 << 20) + (0x2a00) + (0x6a) + 1)) >> (32 - HASH_BITS);
}
static inline __device__ uint32_t fetch4(const uint8_t *src)
{
uint32_t src_align = 3 & reinterpret_cast<uintptr_t>(src);
const uint32_t *src32 = reinterpret_cast<const uint32_t *>(src - src_align);
uint32_t v = src32[0];
return (src_align) ? __funnelshift_r(v, src32[1], src_align * 8) : v;
}
static __device__ uint8_t * StoreLiterals(uint8_t *dst, uint8_t *end, const uint8_t *src, uint32_t len_minus1, uint32_t t)
{
if (len_minus1 < 60)
{
if (!t && dst < end)
dst[0] = (len_minus1 << 2);
dst += 1;
}
else if (len_minus1 <= 0xff)
{
if (!t && dst + 1 < end)
{
dst[0] = 60 << 2;
dst[1] = len_minus1;
}
dst += 2;
}
else if (len_minus1 <= 0xffff)
{
if (!t && dst + 2 < end)
{
dst[0] = 61 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
}
dst += 3;
}
else if (len_minus1 <= 0xffffff)
{
if (!t && dst + 3 < end)
{
dst[0] = 62 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
dst[3] = len_minus1 >> 16;
}
dst += 4;
}
else
{
if (!t && dst + 4 < end)
{
dst[0] = 63 << 2;
dst[1] = len_minus1;
dst[2] = len_minus1 >> 8;
dst[3] = len_minus1 >> 16;
dst[4] = len_minus1 >> 24;
}
dst += 5;
}
for (uint32_t i = t; i <= len_minus1; i += 32)
{
if (dst + i < end)
dst[i] = src[i];
}
return dst + len_minus1 + 1;
}
static __device__ uint8_t * StoreCopy(uint8_t *dst, uint8_t *end, uint32_t copy_len, uint32_t distance)
{
if (copy_len < 12 && distance < 2048)
{
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
if (dst + 2 <= end)
{
dst[0] = ((distance & 0x700) >> 3) | ((copy_len - 4) << 2) | 0x01;
dst[1] = distance;
}
return dst + 2;
}
else
{
// xxxxxx1x: copy with 6-bit length, 16-bit offset
if (dst + 3 <= end)
{
dst[0] = ((copy_len - 1) << 2) | 0x2;
dst[1] = distance;
dst[2] = distance >> 8;
}
return dst + 3;
}
}
static inline __device__ uint32_t HashMatchAny(uint32_t v, uint32_t t)
{
#if (__CUDA_ARCH__ >= 700)
return __match_any_sync(~0, v);
#else
uint32_t err_map = 0;
for (uint32_t i = 0; i < HASH_BITS; i++, v >>= 1)
{
uint32_t b = v & 1;
uint32_t match_b = BALLOT(b);
err_map |= match_b ^ -(int32_t)b;
}
return ~err_map;
#endif
}
static __device__ uint32_t FindFourByteMatch(snap_state_s *s, const uint8_t *src, uint32_t pos0, uint32_t t)
{
uint32_t len = s->src_len;
uint32_t pos = pos0;
uint32_t maxpos = pos0 + MAX_LITERAL_LENGTH - 31;
uint32_t match_mask, literal_cnt;
if (t == 0)
{
s->copy_length = 0;
}
do
{
bool valid4 = (pos + t + 4 <= len);
uint32_t data32 = (valid4) ? fetch4(src + pos + t) : 0;
uint32_t hash = (valid4) ? snap_hash(data32) : 0;
uint32_t local_match = HashMatchAny(hash, t);
uint32_t local_match_lane = 31 - __clz(local_match & ((1 << t) - 1));
uint32_t local_match_data = SHFL(data32, min(local_match_lane, t));
uint32_t offset, match;
if (valid4)
{
if (local_match_lane < t && local_match_data == data32)
{
match = 1;
offset = pos + local_match_lane;
}
else
{
offset = (pos & ~0xffff) | s->hash_map[hash];
if (offset >= pos)
{
offset = (offset >= 0x10000) ? offset - 0x10000 : pos;
}
match = (offset < pos && offset + MAX_COPY_DISTANCE >= pos + t && fetch4(src + offset) == data32);
}
}
else
{
match = 0;
local_match = 0;
offset = pos + t;
}
match_mask = BALLOT(match);
if (match_mask != 0)
{
literal_cnt = __ffs(match_mask) - 1;
if (t == literal_cnt)
{
s->copy_distance = pos + t - offset;
s->copy_length = 4;
}
}
else
{
literal_cnt = 32;
}
// Update hash up to the first 4 bytes of the copy length
local_match &= (0x2 << literal_cnt) - 1;
if (t <= literal_cnt && t == 31 - __clz(local_match))
{
s->hash_map[hash] = pos + t;
}
pos += literal_cnt;
} while (literal_cnt == 32 && pos < maxpos);
return min(pos, len) - pos0;
}
// @brief Returns the number of matching bytes for two byte sequences up to 63 bytes
static __device__ uint32_t Match60(const uint8_t *src1, const uint8_t *src2, uint32_t len, uint32_t t)
{
uint32_t mismatch = BALLOT(t >= len || src1[t] != src2[t]);
if (mismatch == 0)
{
mismatch = BALLOT(32 + t >= len || src1[32 + t] != src2[32 + t]);
return 31 + __ffs(mismatch); // mismatch cannot be zero here if len <= 63
}
else
{
return __ffs(mismatch) - 1;
}
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
snap_kernel(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count)
{
__shared__ __align__(16) snap_state_s state_g;
snap_state_s * const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t pos;
const uint8_t *src;
if (!t)
{
const uint8_t *src = reinterpret_cast<const uint8_t *>(inputs[blockIdx.x].srcDevice);
uint32_t src_len = static_cast<uint32_t>(inputs[blockIdx.x].srcSize);
uint8_t *dst = reinterpret_cast<uint8_t *>(inputs[blockIdx.x].dstDevice);
uint32_t dst_len = static_cast<uint32_t>(inputs[blockIdx.x].dstSize);
uint8_t *end = dst + dst_len;
s->src = src;
s->src_len = src_len;
s->dst_base = dst;
s->end = end;
while (src_len > 0x7f)
{
if (dst < end)
{
dst[0] = src_len | 0x80;
}
dst++;
src_len >>= 7;
}
if (dst < end)
{
dst[0] = src_len;
}
s->dst = dst + 1;
s->literal_length = 0;
s->copy_length = 0;
s->copy_distance = 0;
}
for (uint32_t i = t; i < sizeof(s->hash_map) / sizeof(uint32_t); i += 128)
{
*reinterpret_cast<volatile uint32_t *>(&s->hash_map[i*2]) = 0;
}
__syncthreads();
src = s->src;
pos = 0;
while (pos < s->src_len)
{
uint32_t literal_len = s->literal_length;
uint32_t copy_len = s->copy_length;
uint32_t distance = s->copy_distance;
__syncthreads();
if (t < 32)
{
// WARP0: Encode literals and copies
uint8_t *dst = s->dst;
uint8_t *end = s->end;
if (literal_len > 0)
{
dst = StoreLiterals(dst, end, src + pos, literal_len - 1, t);
pos += literal_len;
}
if (copy_len > 0)
{
if (t == 0)
{
dst = StoreCopy(dst, end, copy_len, distance);
}
pos += copy_len;
}
SYNCWARP();
if (t == 0)
{
s->dst = dst;
}
}
else
{
pos += literal_len + copy_len;
if (t < 32 * 2)
{
// WARP1: Find a match using 12-bit hashes of 4-byte blocks
uint32_t t5 = t & 0x1f;
literal_len = FindFourByteMatch(s, src, pos, t5);
if (t5 == 0)
{
s->literal_length = literal_len;
}
copy_len = s->copy_length;
if (copy_len != 0)
{
uint32_t match_pos = pos + literal_len + copy_len; // NOTE: copy_len is always 4 here
copy_len += Match60(src + match_pos, src + match_pos - s->copy_distance, min(s->src_len - match_pos, 64 - copy_len), t5);
if (t5 == 0)
{
s->copy_length = copy_len;
}
}
}
}
__syncthreads();
}
__syncthreads();
if (!t)
{
outputs[blockIdx.x].bytes_written = s->dst - s->dst_base;
outputs[blockIdx.x].status = (s->dst > s->end) ? 1 : 0;
outputs[blockIdx.x].reserved = 0;
}
}
cudaError_t __host__ gpu_snap(gpu_inflate_input_s *inputs, gpu_inflate_status_s *outputs, int count, cudaStream_t stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(count, 1);
if (count > 0)
{
snap_kernel <<< dim_grid, dim_block, 0, stream >>>(inputs, outputs, count);
}
return cudaSuccess;
}
} // namespace io
} // namespace cudf
|
351bceb938fb3b710ff1b9a457258227f7c01701.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_linear_frac.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
const REAL scalea = 1;
const REAL shifta = 1;
const REAL scaleb = 1;
const REAL shiftb = 1;
REAL *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_linear_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_linear_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_linear_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
351bceb938fb3b710ff1b9a457258227f7c01701.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_linear_frac.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
const REAL scalea = 1;
const REAL shifta = 1;
const REAL scaleb = 1;
const REAL shiftb = 1;
REAL *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_linear_frac<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_linear_frac<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_linear_frac<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,scalea,shifta,scaleb,shiftb,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
76e5e43ae0fe4d03096ce5fa802e5cef8e390a68.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define N 100000000
__global__ void daxpy(int n, double alpha, double *x, double *y) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < n;
idx += blockDim.x * gridDim.x) {
y[idx] += alpha * x[idx];
}
}
|
76e5e43ae0fe4d03096ce5fa802e5cef8e390a68.cu
|
#include "includes.h"
#define N 100000000
__global__ void daxpy(int n, double alpha, double *x, double *y) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < n;
idx += blockDim.x * gridDim.x) {
y[idx] += alpha * x[idx];
}
}
|
b63138d3745a169dae214198b2020ccb115ccde3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define PerThread 1024*16//i
#define N 64*256*1024*16//PI
#define BlockNum 64 //block
#define ThreadNum 256 //blockthreads
__global__ void Gpu_calPI(double* Gpu_list)
{
int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x;
int begin=tid*PerThread+1;
int end=begin+PerThread;
double temp=0;
int flag=1;
for(int i=begin;i<end;i++){
temp+=flag*(1.0/(2*i-1));
flag=flag*(-1);
}
Gpu_list[tid]=temp;
}
int main(void)
{
double * cpu_list;
double * Gpu_list;
double outcome=0;
cpu_list=(double*)malloc(sizeof(double)*BlockNum*ThreadNum);
hipMalloc((void**)&Gpu_list,sizeof(double)*BlockNum*ThreadNum);
// dim3 blocksize=dim3(1,ThreadNum);
// dim3 gridsize=dim3(1,BlockNum);
double begin=clock();
hipLaunchKernelGGL(( Gpu_calPI), dim3(BlockNum),dim3(ThreadNum), 0, 0, Gpu_list);
hipMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum*ThreadNum,hipMemcpyDeviceToHost);
for(int i=0;i<BlockNum*ThreadNum;i++){
outcome+=cpu_list[i];
}
outcome=4*outcome;
double end=clock();
printf("Cu2: N=%d, outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC));
// printf("block x=%d,y=%d\n",blocksize.x,blocksize.y);
// printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y);
}
|
b63138d3745a169dae214198b2020ccb115ccde3.cu
|
#include <stdio.h>
#include <time.h>
#define PerThread 1024*16//每个线程计算多少个i
#define N 64*256*1024*16//积分计算PI总共划分为这么多项相加
#define BlockNum 64 //block的数量
#define ThreadNum 256 //每个block中threads的数量
__global__ void Gpu_calPI(double* Gpu_list)
{
int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x;
int begin=tid*PerThread+1;
int end=begin+PerThread;
double temp=0;
int flag=1;
for(int i=begin;i<end;i++){
temp+=flag*(1.0/(2*i-1));
flag=flag*(-1);
}
Gpu_list[tid]=temp;
}
int main(void)
{
double * cpu_list;
double * Gpu_list;
double outcome=0;
cpu_list=(double*)malloc(sizeof(double)*BlockNum*ThreadNum);
cudaMalloc((void**)&Gpu_list,sizeof(double)*BlockNum*ThreadNum);
// dim3 blocksize=dim3(1,ThreadNum);
// dim3 gridsize=dim3(1,BlockNum);
double begin=clock();
Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list);
cudaMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum*ThreadNum,cudaMemcpyDeviceToHost);
for(int i=0;i<BlockNum*ThreadNum;i++){
outcome+=cpu_list[i];
}
outcome=4*outcome;
double end=clock();
printf("Cu2: N=%d, outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC));
// printf("block x=%d,y=%d\n",blocksize.x,blocksize.y);
// printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y);
}
|
7347327e84227bd2acddfcd9c43fb883d1609d2f.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* The MIT License (MIT)
*
* Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nova/pomdp/utilities/pomdp_model_gpu.h>
#include <stdio.h>
#include <nova/error_codes.h>
#include <nova/constants.h>
namespace nova {
int pomdp_initialize_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += pomdp_initialize_successors_gpu(pomdp);
result += pomdp_initialize_state_transitions_gpu(pomdp);
result += pomdp_initialize_observation_transitions_gpu(pomdp);
result += pomdp_initialize_rewards_gpu(pomdp);
if (pomdp->r > 0) {
result += pomdp_initialize_nonzero_beliefs_gpu(pomdp);
result += pomdp_initialize_belief_points_gpu(pomdp);
}
return result;
}
int pomdp_uninitialize_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += pomdp_uninitialize_successors_gpu(pomdp);
result += pomdp_uninitialize_state_transitions_gpu(pomdp);
result += pomdp_uninitialize_observation_transitions_gpu(pomdp);
result += pomdp_uninitialize_rewards_gpu(pomdp);
if (pomdp->r > 0) {
result += pomdp_uninitialize_nonzero_beliefs_gpu(pomdp);
result += pomdp_uninitialize_belief_points_gpu(pomdp);
}
return result;
}
int pomdp_initialize_successors_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->ns == 0 || pomdp->S == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_S, pomdp->n * pomdp->m * pomdp->ns * sizeof(int)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_S, pomdp->S, pomdp->n * pomdp->m * pomdp->ns * sizeof(int),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_successors_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_S != nullptr) {
if (hipFree(pomdp->d_S) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_state_transitions_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->ns == 0 || pomdp->T == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_T, pomdp->n * pomdp->m * pomdp->ns * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_T, pomdp->T, pomdp->n * pomdp->m * pomdp->ns * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[nova_pomdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_state_transitions_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_T != nullptr) {
if (hipFree(pomdp->d_T) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_observation_transitions_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->z == 0 || pomdp->O == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_O, pomdp->m * pomdp->n * pomdp->z * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the observation transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_O, pomdp->O, pomdp->m * pomdp->n * pomdp->z * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n",
"Failed to copy memory from host to device for the observation transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_observation_transitions_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_observation_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_O != nullptr) {
if (hipFree(pomdp->d_O) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_observation_transitions_gpu]: %s\n",
"Failed to free device-side memory for the observation transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_O = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_rewards_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->R == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_R, pomdp->n * pomdp->m * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_R, pomdp->R, pomdp->n * pomdp->m * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_rewards_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_R != nullptr) {
if (hipFree(pomdp->d_R) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_R = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_nonzero_beliefs_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->r == 0 || pomdp->rz == 0 || pomdp->Z == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_Z, pomdp->r * pomdp->rz * sizeof(int)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n",
"Failed to allocate device-side memory for the non-zero belief states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_Z, pomdp->Z, pomdp->r * pomdp->rz * sizeof(int),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n",
"Failed to copy memory from host to device for the non-zero belief states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_nonzero_beliefs_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_nonzero_beliefs_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_Z != nullptr) {
if (hipFree(pomdp->d_Z) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_nonzero_beliefs_gpu]: %s\n",
"Failed to free device-side memory for the non-zero belief states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_Z = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_belief_points_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->r == 0 || pomdp->rz == 0 || pomdp->B == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&pomdp->d_B, pomdp->r * pomdp->rz * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n",
"Failed to allocate device-side memory for the belief points.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(pomdp->d_B, pomdp->B, pomdp->r * pomdp->rz * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n",
"Failed to copy memory from host to device for the belief points.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_belief_points_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_belief_points_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_B != nullptr) {
if (hipFree(pomdp->d_B) != hipSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_belief_points_gpu]: %s\n",
"Failed to free device-side memory for the belief points.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_B = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
|
7347327e84227bd2acddfcd9c43fb883d1609d2f.cu
|
/**
* The MIT License (MIT)
*
* Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nova/pomdp/utilities/pomdp_model_gpu.h>
#include <stdio.h>
#include <nova/error_codes.h>
#include <nova/constants.h>
namespace nova {
int pomdp_initialize_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += pomdp_initialize_successors_gpu(pomdp);
result += pomdp_initialize_state_transitions_gpu(pomdp);
result += pomdp_initialize_observation_transitions_gpu(pomdp);
result += pomdp_initialize_rewards_gpu(pomdp);
if (pomdp->r > 0) {
result += pomdp_initialize_nonzero_beliefs_gpu(pomdp);
result += pomdp_initialize_belief_points_gpu(pomdp);
}
return result;
}
int pomdp_uninitialize_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += pomdp_uninitialize_successors_gpu(pomdp);
result += pomdp_uninitialize_state_transitions_gpu(pomdp);
result += pomdp_uninitialize_observation_transitions_gpu(pomdp);
result += pomdp_uninitialize_rewards_gpu(pomdp);
if (pomdp->r > 0) {
result += pomdp_uninitialize_nonzero_beliefs_gpu(pomdp);
result += pomdp_uninitialize_belief_points_gpu(pomdp);
}
return result;
}
int pomdp_initialize_successors_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->ns == 0 || pomdp->S == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_S, pomdp->n * pomdp->m * pomdp->ns * sizeof(int)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_S, pomdp->S, pomdp->n * pomdp->m * pomdp->ns * sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_successors_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_S != nullptr) {
if (cudaFree(pomdp->d_S) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_state_transitions_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->ns == 0 || pomdp->T == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_T, pomdp->n * pomdp->m * pomdp->ns * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_T, pomdp->T, pomdp->n * pomdp->m * pomdp->ns * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[nova_pomdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_state_transitions_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_T != nullptr) {
if (cudaFree(pomdp->d_T) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_observation_transitions_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->z == 0 || pomdp->O == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_O, pomdp->m * pomdp->n * pomdp->z * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the observation transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_O, pomdp->O, pomdp->m * pomdp->n * pomdp->z * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_observation_transitions_gpu]: %s\n",
"Failed to copy memory from host to device for the observation transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_observation_transitions_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_observation_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_O != nullptr) {
if (cudaFree(pomdp->d_O) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_observation_transitions_gpu]: %s\n",
"Failed to free device-side memory for the observation transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_O = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_rewards_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->n == 0 || pomdp->m == 0 || pomdp->R == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_R, pomdp->n * pomdp->m * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_R, pomdp->R, pomdp->n * pomdp->m * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_rewards_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_R != nullptr) {
if (cudaFree(pomdp->d_R) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_R = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_nonzero_beliefs_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->r == 0 || pomdp->rz == 0 || pomdp->Z == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_Z, pomdp->r * pomdp->rz * sizeof(int)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n",
"Failed to allocate device-side memory for the non-zero belief states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_Z, pomdp->Z, pomdp->r * pomdp->rz * sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_nonzero_beliefs_gpu]: %s\n",
"Failed to copy memory from host to device for the non-zero belief states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_nonzero_beliefs_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_nonzero_beliefs_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_Z != nullptr) {
if (cudaFree(pomdp->d_Z) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_nonzero_beliefs_gpu]: %s\n",
"Failed to free device-side memory for the non-zero belief states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_Z = nullptr;
return NOVA_SUCCESS;
}
int pomdp_initialize_belief_points_gpu(POMDP *pomdp)
{
// Ensure the data is valid.
if (pomdp == nullptr || pomdp->r == 0 || pomdp->rz == 0 || pomdp->B == nullptr) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&pomdp->d_B, pomdp->r * pomdp->rz * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n",
"Failed to allocate device-side memory for the belief points.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(pomdp->d_B, pomdp->B, pomdp->r * pomdp->rz * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_initialize_belief_points_gpu]: %s\n",
"Failed to copy memory from host to device for the belief points.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int pomdp_uninitialize_belief_points_gpu(POMDP *pomdp)
{
if (pomdp == nullptr) {
fprintf(stderr, "Error[pomdp_uninitialize_belief_points_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (pomdp->d_B != nullptr) {
if (cudaFree(pomdp->d_B) != cudaSuccess) {
fprintf(stderr, "Error[pomdp_uninitialize_belief_points_gpu]: %s\n",
"Failed to free device-side memory for the belief points.");
return NOVA_ERROR_DEVICE_FREE;
}
}
pomdp->d_B = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
|
ed27ee8a158e19d621736692dd684fe2de0e87e2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file sat.cu
* @brief CUDA device code for GPU-Efficient Summed-Area Tables
* @author Andre Maximo
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <symbol.h>
#include <dvector.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <sat.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Algorithm SAT Stage 1 ----------------------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage1( const float *g_in,
float *g_ybar,
float *g_vhat ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
g_ybar += by*c_width+col;
g_vhat += bx*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate ybar -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev = **bdata;
++bdata;
#pragma unroll
for (int i = 1; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
*g_ybar = prev;
}
{ // calculate vhat -----------------------
float *bdata = s_block[tx];
float prev = *bdata;
++bdata;
#pragma unroll
for (int i = 1; i < WS; ++i, ++bdata)
prev = *bdata + prev;
*g_vhat = prev;
}
}
}
//-- Algorithm SAT Stage 2 ----------------------------------------------------
__global__ __launch_bounds__( WS * MW, MBO )
void algSAT_stage2( float *g_ybar,
float *g_ysum ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, col0 = bx*MW+ty, col = col0*WS+tx;
if( col >= c_width ) return;
g_ybar += col;
float y = *g_ybar;
int ln = HWS+tx;
if( tx == WS-1 )
g_ysum += col0;
volatile __shared__ float s_block[ MW ][ HWS+WS+1 ];
if( tx < HWS ) s_block[ty][tx] = 0.f;
else s_block[ty][ln] = 0.f;
for (int n = 1; n < c_n_size; ++n) {
// calculate ysum -----------------------
s_block[ty][ln] = y;
s_block[ty][ln] += s_block[ty][ln-1];
s_block[ty][ln] += s_block[ty][ln-2];
s_block[ty][ln] += s_block[ty][ln-4];
s_block[ty][ln] += s_block[ty][ln-8];
s_block[ty][ln] += s_block[ty][ln-16];
if( tx == WS-1 ) {
*g_ysum = s_block[ty][ln];
g_ysum += c_m_size;
}
// fix ybar -> y -------------------------
g_ybar += c_width;
y = *g_ybar += y;
}
}
//-- Algorithm SAT Stage 3 ----------------------------------------------------
__global__ __launch_bounds__( WS * MW, MBO )
void algSAT_stage3( const float *g_ysum,
float *g_vhat ) {
const int tx = threadIdx.x, ty = threadIdx.y,
by = blockIdx.y, row0 = by*MW+ty, row = row0*WS+tx;
if( row >= c_height ) return;
g_vhat += row;
float y = 0.f, v = 0.f;
if( row0 > 0 )
g_ysum += (row0-1)*c_m_size;
for (int m = 0; m < c_m_size; ++m) {
// fix vhat -> v -------------------------
if( row0 > 0 ) {
y = *g_ysum;
g_ysum += 1;
}
v = *g_vhat += v + y;
g_vhat += c_height;
}
}
//-- Algorithm SAT Stage 4 ----------------------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_inout,
const float *g_y,
const float *g_v ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_inout += (row0+ty)*c_width+col;
if( by > 0 ) g_y += (by-1)*c_width+col;
if( bx > 0 ) g_v += (bx-1)*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_inout;
bdata += SOW;
g_inout += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_inout;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev;
if( by > 0 ) prev = *g_y;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev;
if( bx > 0 ) prev = *g_v;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_inout -= (WS-(WS%SOW))*c_width;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_inout = **bdata;
bdata += SOW;
g_inout += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_inout = **bdata;
}
}
//-- Algorithm SAT Stage 4 (not-in-place) -------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_out,
const float *g_in,
const float *g_y,
const float *g_v ) {
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
if( by > 0 ) g_y += (by-1)*c_width+col;
if( bx > 0 ) g_v += (bx-1)*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev;
if( by > 0 ) prev = *g_y;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev;
if( bx > 0 ) prev = *g_v;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_out += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_out = **bdata;
bdata += SOW;
g_out += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_out = **bdata;
}
}
__global__ __launch_bounds__( WS * WS/4, MBO )
void algSAT_box( float *g_out,
float *g_sat,
float *g_in,
const int box_filter_radius)
{
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y;
#pragma unroll
for (int y=0; y<4; y++) {
int col = bx*WS+tx;
int row = by*WS+(4*ty+y);
int z = box_filter_radius;
int w = c_width-1;
int h = c_height-1;
int row_a = max(0,row-z-1), col_a = max(0,col-z-1);
int row_b = min(h,row+z), col_b = max(0,col-z-1);
int row_c = max(0,row-z-1), col_c = min(w,col+z);
int row_d = min(h,row+z), col_d = min(w,col+z);
float u = g_in [ row*c_width+col ];
float a = g_sat[ row_a*c_width+col_a ];
float b = g_sat[ row_b*c_width+col_b ];
float c = g_sat[ row_c*c_width+col_c ];
float d = g_sat[ row_d*c_width+col_d ];
if (row<z+1 || row>c_height-z-1 || col<z+1 || col>c_width-z-1) {
g_out[row*c_width+col] = u;
} else {
g_out[row*c_width+col] = (a+d-c-b) / ((2*z+1)*(2*z+1));
}
}
}
//-- Host ---------------------------------------------------------------------
__host__
void prepare_algSAT( alg_setup& algs,
dvector<float>& d_inout,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const float *h_in,
const int& w,
const int& h ) {
algs.width = w;
algs.height = h;
if( w % 32 > 0 ) algs.width += (32 - (w % 32));
if( h % 32 > 0 ) algs.height += (32 - (h % 32));
calc_alg_setup( algs, algs.width, algs.height );
up_alg_setup( algs );
d_inout.copy_from( h_in, w, h, algs.width, algs.height );
d_ybar.resize( algs.n_size * algs.width );
d_vhat.resize( algs.m_size * algs.height );
d_ysum.resize( algs.m_size * algs.n_size );
}
__host__
void algSAT( dvector<float>& d_out,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
hipLaunchKernelGGL(( algSAT_stage1), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_in, d_ybar, d_vhat );
hipLaunchKernelGGL(( algSAT_stage2), dim3(cg_ybar), dim3(dim3(WS, MW)) , 0, 0, d_ybar, d_ysum );
hipLaunchKernelGGL(( algSAT_stage3), dim3(cg_vhat), dim3(dim3(WS, MW)) , 0, 0, d_ysum, d_vhat );
hipLaunchKernelGGL(( algSAT_stage4), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_out, d_in, d_ybar, d_vhat );
}
__host__
void algSAT( dvector<float>& d_inout,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
hipLaunchKernelGGL(( algSAT_stage1), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_inout, d_ybar, d_vhat );
hipLaunchKernelGGL(( algSAT_stage2), dim3(cg_ybar), dim3(dim3(WS, MW)) , 0, 0, d_ybar, d_ysum );
hipLaunchKernelGGL(( algSAT_stage3), dim3(cg_vhat), dim3(dim3(WS, MW)) , 0, 0, d_ysum, d_vhat );
hipLaunchKernelGGL(( algSAT_stage4), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_inout, d_ybar, d_vhat );
}
__host__
void algSAT( float *h_inout,
const int& w,
const int& h ) {
alg_setup algs;
dvector<float> d_out, d_ybar, d_vhat, d_ysum;
prepare_algSAT( algs, d_out, d_ybar, d_vhat, d_ysum, h_inout, w, h );
algSAT( d_out, d_ybar, d_vhat, d_ysum, algs );
d_out.copy_to( h_inout, algs.width, algs.height, w, h );
}
__host__
void algBox( const int& box_filter_radius,
dvector<float>& d_tmp,
dvector<float>& d_box,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
hipLaunchKernelGGL(( algSAT_stage1), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_in, d_ybar, d_vhat );
hipLaunchKernelGGL(( algSAT_stage2), dim3(cg_ybar), dim3(dim3(WS, MW)) , 0, 0, d_ybar, d_ysum );
hipLaunchKernelGGL(( algSAT_stage3), dim3(cg_vhat), dim3(dim3(WS, MW)) , 0, 0, d_ysum, d_vhat );
hipLaunchKernelGGL(( algSAT_stage4), dim3(cg_img), dim3(dim3(WS, SOW)) , 0, 0, d_tmp, d_in, d_ybar, d_vhat );
hipLaunchKernelGGL(( algSAT_box) , dim3(cg_img), dim3(dim3(WS, WS/4)) , 0, 0, d_box, d_tmp, d_in, box_filter_radius);
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
|
ed27ee8a158e19d621736692dd684fe2de0e87e2.cu
|
/**
* @file sat.cu
* @brief CUDA device code for GPU-Efficient Summed-Area Tables
* @author Andre Maximo
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <symbol.h>
#include <dvector.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <sat.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Algorithm SAT Stage 1 ----------------------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage1( const float *g_in,
float *g_ybar,
float *g_vhat ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
g_ybar += by*c_width+col;
g_vhat += bx*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate ybar -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev = **bdata;
++bdata;
#pragma unroll
for (int i = 1; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
*g_ybar = prev;
}
{ // calculate vhat -----------------------
float *bdata = s_block[tx];
float prev = *bdata;
++bdata;
#pragma unroll
for (int i = 1; i < WS; ++i, ++bdata)
prev = *bdata + prev;
*g_vhat = prev;
}
}
}
//-- Algorithm SAT Stage 2 ----------------------------------------------------
__global__ __launch_bounds__( WS * MW, MBO )
void algSAT_stage2( float *g_ybar,
float *g_ysum ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, col0 = bx*MW+ty, col = col0*WS+tx;
if( col >= c_width ) return;
g_ybar += col;
float y = *g_ybar;
int ln = HWS+tx;
if( tx == WS-1 )
g_ysum += col0;
volatile __shared__ float s_block[ MW ][ HWS+WS+1 ];
if( tx < HWS ) s_block[ty][tx] = 0.f;
else s_block[ty][ln] = 0.f;
for (int n = 1; n < c_n_size; ++n) {
// calculate ysum -----------------------
s_block[ty][ln] = y;
s_block[ty][ln] += s_block[ty][ln-1];
s_block[ty][ln] += s_block[ty][ln-2];
s_block[ty][ln] += s_block[ty][ln-4];
s_block[ty][ln] += s_block[ty][ln-8];
s_block[ty][ln] += s_block[ty][ln-16];
if( tx == WS-1 ) {
*g_ysum = s_block[ty][ln];
g_ysum += c_m_size;
}
// fix ybar -> y -------------------------
g_ybar += c_width;
y = *g_ybar += y;
}
}
//-- Algorithm SAT Stage 3 ----------------------------------------------------
__global__ __launch_bounds__( WS * MW, MBO )
void algSAT_stage3( const float *g_ysum,
float *g_vhat ) {
const int tx = threadIdx.x, ty = threadIdx.y,
by = blockIdx.y, row0 = by*MW+ty, row = row0*WS+tx;
if( row >= c_height ) return;
g_vhat += row;
float y = 0.f, v = 0.f;
if( row0 > 0 )
g_ysum += (row0-1)*c_m_size;
for (int m = 0; m < c_m_size; ++m) {
// fix vhat -> v -------------------------
if( row0 > 0 ) {
y = *g_ysum;
g_ysum += 1;
}
v = *g_vhat += v + y;
g_vhat += c_height;
}
}
//-- Algorithm SAT Stage 4 ----------------------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_inout,
const float *g_y,
const float *g_v ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_inout += (row0+ty)*c_width+col;
if( by > 0 ) g_y += (by-1)*c_width+col;
if( bx > 0 ) g_v += (bx-1)*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_inout;
bdata += SOW;
g_inout += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_inout;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev;
if( by > 0 ) prev = *g_y;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev;
if( bx > 0 ) prev = *g_v;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_inout -= (WS-(WS%SOW))*c_width;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_inout = **bdata;
bdata += SOW;
g_inout += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_inout = **bdata;
}
}
//-- Algorithm SAT Stage 4 (not-in-place) -------------------------------------
__global__ __launch_bounds__( WS * SOW, MBO )
void algSAT_stage4( float *g_out,
const float *g_in,
const float *g_y,
const float *g_v ) {
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row0 = by*WS;
__shared__ float s_block[ WS ][ WS+1 ];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx];
g_in += (row0+ty)*c_width+col;
if( by > 0 ) g_y += (by-1)*c_width+col;
if( bx > 0 ) g_v += (bx-1)*c_height+row0+tx;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
**bdata = *g_in;
bdata += SOW;
g_in += SOW * c_width;
}
if( ty < WS%SOW ) {
**bdata = *g_in;
}
__syncthreads();
if( ty == 0 ) {
{ // calculate y -----------------------
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[0][tx];
float prev;
if( by > 0 ) prev = *g_y;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
**bdata = prev = **bdata + prev;
}
{ // calculate x -----------------------
float *bdata = s_block[tx];
float prev;
if( bx > 0 ) prev = *g_v;
else prev = 0.f;
#pragma unroll
for (int i = 0; i < WS; ++i, ++bdata)
*bdata = prev = *bdata + prev;
}
}
__syncthreads();
bdata = (float (*)[WS+1]) &s_block[ty][tx];
g_out += (row0+ty)*c_width+col;
#pragma unroll
for (int i = 0; i < WS-(WS%SOW); i+=SOW) {
*g_out = **bdata;
bdata += SOW;
g_out += SOW * c_width;
}
if( ty < WS%SOW ) {
*g_out = **bdata;
}
}
__global__ __launch_bounds__( WS * WS/4, MBO )
void algSAT_box( float *g_out,
float *g_sat,
float *g_in,
const int box_filter_radius)
{
const int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y;
#pragma unroll
for (int y=0; y<4; y++) {
int col = bx*WS+tx;
int row = by*WS+(4*ty+y);
int z = box_filter_radius;
int w = c_width-1;
int h = c_height-1;
int row_a = max(0,row-z-1), col_a = max(0,col-z-1);
int row_b = min(h,row+z), col_b = max(0,col-z-1);
int row_c = max(0,row-z-1), col_c = min(w,col+z);
int row_d = min(h,row+z), col_d = min(w,col+z);
float u = g_in [ row*c_width+col ];
float a = g_sat[ row_a*c_width+col_a ];
float b = g_sat[ row_b*c_width+col_b ];
float c = g_sat[ row_c*c_width+col_c ];
float d = g_sat[ row_d*c_width+col_d ];
if (row<z+1 || row>c_height-z-1 || col<z+1 || col>c_width-z-1) {
g_out[row*c_width+col] = u;
} else {
g_out[row*c_width+col] = (a+d-c-b) / ((2*z+1)*(2*z+1));
}
}
}
//-- Host ---------------------------------------------------------------------
__host__
void prepare_algSAT( alg_setup& algs,
dvector<float>& d_inout,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const float *h_in,
const int& w,
const int& h ) {
algs.width = w;
algs.height = h;
if( w % 32 > 0 ) algs.width += (32 - (w % 32));
if( h % 32 > 0 ) algs.height += (32 - (h % 32));
calc_alg_setup( algs, algs.width, algs.height );
up_alg_setup( algs );
d_inout.copy_from( h_in, w, h, algs.width, algs.height );
d_ybar.resize( algs.n_size * algs.width );
d_vhat.resize( algs.m_size * algs.height );
d_ysum.resize( algs.m_size * algs.n_size );
}
__host__
void algSAT( dvector<float>& d_out,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
algSAT_stage1<<< cg_img, dim3(WS, SOW) >>>( d_in, d_ybar, d_vhat );
algSAT_stage2<<< cg_ybar, dim3(WS, MW) >>>( d_ybar, d_ysum );
algSAT_stage3<<< cg_vhat, dim3(WS, MW) >>>( d_ysum, d_vhat );
algSAT_stage4<<< cg_img, dim3(WS, SOW) >>>( d_out, d_in, d_ybar, d_vhat );
}
__host__
void algSAT( dvector<float>& d_inout,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
algSAT_stage1<<< cg_img, dim3(WS, SOW) >>>( d_inout, d_ybar, d_vhat );
algSAT_stage2<<< cg_ybar, dim3(WS, MW) >>>( d_ybar, d_ysum );
algSAT_stage3<<< cg_vhat, dim3(WS, MW) >>>( d_ysum, d_vhat );
algSAT_stage4<<< cg_img, dim3(WS, SOW) >>>( d_inout, d_ybar, d_vhat );
}
__host__
void algSAT( float *h_inout,
const int& w,
const int& h ) {
alg_setup algs;
dvector<float> d_out, d_ybar, d_vhat, d_ysum;
prepare_algSAT( algs, d_out, d_ybar, d_vhat, d_ysum, h_inout, w, h );
algSAT( d_out, d_ybar, d_vhat, d_ysum, algs );
d_out.copy_to( h_inout, algs.width, algs.height, w, h );
}
__host__
void algBox( const int& box_filter_radius,
dvector<float>& d_tmp,
dvector<float>& d_box,
dvector<float>& d_ybar,
dvector<float>& d_vhat,
dvector<float>& d_ysum,
dvector<float>& d_in,
const alg_setup& algs ) {
const int nWm = (algs.width+MTS-1)/MTS, nHm = (algs.height+MTS-1)/MTS;
const dim3 cg_img( algs.m_size, algs.n_size );
const dim3 cg_ybar( nWm, 1 );
const dim3 cg_vhat( 1, nHm );
algSAT_stage1<<< cg_img, dim3(WS, SOW) >>>( d_in, d_ybar, d_vhat );
algSAT_stage2<<< cg_ybar, dim3(WS, MW) >>>( d_ybar, d_ysum );
algSAT_stage3<<< cg_vhat, dim3(WS, MW) >>>( d_ysum, d_vhat );
algSAT_stage4<<< cg_img, dim3(WS, SOW) >>>( d_tmp, d_in, d_ybar, d_vhat );
algSAT_box <<< cg_img, dim3(WS, WS/4) >>>( d_box, d_tmp, d_in, box_filter_radius);
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
|
8b5a94ebb6f3172ec0ad07f672f32fc1846d4459.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "compute_disparity.hip"
// #include "computeHalfDisparity.cu"
// #include "computeFullDisparity.cu"
__global__ void eventScheduler(
const struct pixel * imageR,
const struct pixel * imageL,
const int window_size,
const int image_height,
const int image_width,
const bool * foregroundR,
const bool * foregroundL,
float * disparity_output)
{
dim3 blockSize(8,64); //Each thread would be responsible for 8 pixels, so a block takes care of 64 * 64 pixels
dim3 fullGridSize( celeing(image_height/64) ,celeing(image_width/64));
hipLaunchKernelGGL(( computeDisparity) , dim3(fullGridSize), dim3(blockSize), 0, 0,
imageR,
imageL,
window_size,
image_height,
image_width,
foregroundR,
foregroundL,
disparity_output);
}
|
8b5a94ebb6f3172ec0ad07f672f32fc1846d4459.cu
|
#include <math.h>
#include "compute_disparity.cu"
// #include "computeHalfDisparity.cu"
// #include "computeFullDisparity.cu"
__global__ void eventScheduler(
const struct pixel * imageR,
const struct pixel * imageL,
const int window_size,
const int image_height,
const int image_width,
const bool * foregroundR,
const bool * foregroundL,
float * disparity_output)
{
dim3 blockSize(8,64); //Each thread would be responsible for 8 pixels, so a block takes care of 64 * 64 pixels
dim3 fullGridSize( celeing(image_height/64) ,celeing(image_width/64));
computeDisparity <<<fullGridSize, blockSize>>>
(imageR,
imageL,
window_size,
image_height,
image_width,
foregroundR,
foregroundL,
disparity_output);
}
|
4b9ef68f4ce112c90419bb1cd52a4085ca5c4d1b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
slacpy_batched_kernel(
int m, int n,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_slacpy_batched(
char uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
/*
Note
========
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
=======
SLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
= 'U': Upper triangular part
= 'L': Lower triangular part
Otherwise: All of each matrix dAarray[i]
M (input) INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
N (input) INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = 'U', only the upper triangle or trapezoid is accessed;
if UPLO = 'L', only the lower triangle or trapezoid is accessed.
LDDA (input) INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
dBarray (output) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
LDDB (input) INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
batchCount (input) INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( (uplo == 'U') || (uplo == 'u') ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
hipLaunchKernelGGL(( slacpy_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, dAarray, ldda, dBarray, lddb );
}
}
|
4b9ef68f4ce112c90419bb1cd52a4085ca5c4d1b.cu
|
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
slacpy_batched_kernel(
int m, int n,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_slacpy_batched(
char uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
/*
Note
========
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
=======
SLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
= 'U': Upper triangular part
= 'L': Lower triangular part
Otherwise: All of each matrix dAarray[i]
M (input) INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
N (input) INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = 'U', only the upper triangle or trapezoid is accessed;
if UPLO = 'L', only the lower triangle or trapezoid is accessed.
LDDA (input) INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
dBarray (output) array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
LDDB (input) INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
batchCount (input) INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( (uplo == 'U') || (uplo == 'u') ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
slacpy_batched_kernel<<< grid, threads, 0, magma_stream >>>(
m, n, dAarray, ldda, dBarray, lddb );
}
}
|
accd0e3f1f22357f038524883e6c537fb8cec326.hip
|
// !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" %cuda_args
// Taken from Jonathan Hui blog https://jhui.github.io/2017/03/06/CUDA
#include <stdio.h>
// CHECK: #include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
// Will not conttinue until all threads completed.
__syncthreads();
d[t] = s[tr];
}
int main(void)
{
const int n = 64;
int a[n], r[n], d[n];
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
// CHECK: hipMalloc(&d_d, n * sizeof(int));
hipMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
// CHECK: hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
// CHECK: hipLaunchKernelGGL(staticReverse, dim3(1), dim3(n), 0, 0, d_d, n);
hipLaunchKernelGGL(( staticReverse), dim3(1),dim3(n), 0, 0, d_d, n);
// CHECK: hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
}
|
accd0e3f1f22357f038524883e6c537fb8cec326.cu
|
// RUN: %run_test hipify "%s" "%t" %cuda_args
// Taken from Jonathan Hui blog https://jhui.github.io/2017/03/06/CUDA
#include <stdio.h>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
// Will not conttinue until all threads completed.
__syncthreads();
d[t] = s[tr];
}
int main(void)
{
const int n = 64;
int a[n], r[n], d[n];
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
// CHECK: hipMalloc(&d_d, n * sizeof(int));
cudaMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
// CHECK: hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
// CHECK: hipLaunchKernelGGL(staticReverse, dim3(1), dim3(n), 0, 0, d_d, n);
staticReverse<<<1,n>>>(d_d, n);
// CHECK: hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)n", i, i, d[i], r[i]);
}
|
c710437788d44b407a21d98f500be7c5c3785f38.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <string>
using namespace std;
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
__global__ void process(unsigned char* input_img, unsigned char* output_img, int radius, int img_x, int img_y) {
int img_row = blockIdx.x * blockDim.x + threadIdx.x;
int img_col = blockIdx.y * blockDim.y + threadIdx.y;
if (img_row < img_x && img_col < img_y) {
int hist[256][3] = {{0}};
int sum = 0, local_sum_r = 0, local_sum_g = 0, local_sum_b = 0;
int x_start = max(0, img_row - radius);
int x_end = min(img_x - 1, img_row + radius);
int y_start = max(0, img_col - radius);
int y_end = min(img_y - 1, img_col + radius);
for (int i = x_start; i <= x_end; i++) {
for (int j = y_start; j <= y_end; j++) {
sum++;
hist[input_img[(i * img_y + j) * 3]][0]++;
hist[input_img[(i * img_y + j) * 3 + 1]][1]++;
hist[input_img[(i * img_y + j) * 3 + 2]][2]++;
}
}
bool is_r_enable = true, is_g_enable = true, is_b_enable = true;
for (int i = 0; i < 256; ++i) {
local_sum_r += hist[i][0];
local_sum_g += hist[i][1];
local_sum_b += hist[i][2];
if (is_r_enable && local_sum_r >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3] = i;
is_r_enable = false;
}
if (is_g_enable && local_sum_g >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3 + 1] = i;
is_g_enable = false;
}
if (is_b_enable && local_sum_b >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3 + 2] = i;
is_b_enable = false;
}
}
}
}
int main(int argc, char* argv[]) {
using namespace cv;
cv::Mat img_load = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
auto img_size = img_load.size();
int radius = 0;
sscanf(argv[2], "%d", &radius);
int grid_size = 32;
int rows = img_size.height, cols = img_size.width;
dim3 grid_dim(grid_size, grid_size);
dim3 block_dim(rows / grid_size + 1, cols / grid_size + 1);
unsigned char* input_img = new unsigned char[rows * cols * 3];
unsigned char* output_img = new unsigned char[rows * cols * 3];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
input_img[(i * cols + j) * 3] = img_load.at<cv::Vec3b>(i, j).val[0];
input_img[(i * cols + j) * 3 + 1] = img_load.at<cv::Vec3b>(i, j).val[1];
input_img[(i * cols + j) * 3 + 2] = img_load.at<cv::Vec3b>(i, j).val[2];
}
}
unsigned char* device_input_img;
unsigned char* device_output_img;
hipMalloc((void**)(&device_input_img), rows *cols * 3 * sizeof(unsigned char));
hipMemcpy(device_input_img, input_img, rows*cols*3 * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMalloc((void**)(&device_output_img), rows*cols *3 *sizeof(unsigned char));
hipLaunchKernelGGL(( process), dim3(grid_dim), dim3(block_dim) , 0, 0, device_input_img, device_output_img, radius, rows, cols);
hipMemcpy(output_img, device_output_img, rows*cols*3*sizeof(unsigned char), hipMemcpyDeviceToHost);
//std::cout << img_result.size().height << " " << img_result.size().width << " " << rows << " " << cols << endl;
cv::Mat img_result(img_load);
cv::resize(img_result, img_result, cv::Size(cols, rows));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cv::Vec3b new_color(output_img[(i * cols + j) * 3], output_img[(i * cols + j) * 3 + 1], output_img[(i * cols + j) * 3 + 2]);
//cout << static_cast<int>(output_img[(i * cols + j) * 3]) << " " << static_cast<int>(output_img[(i * cols + j) * 3 + 1]) << " " << static_cast<int>(output_img[(i * cols + j) * 3 + 2]) << " " << i << " " << j << endl;
//cout << i << " " << j << endl;
img_result.at<Vec3b>(i, j) = new_color;//cv::Vec3b(1, 1, 1);
}
}
//return 0;
imwrite( "result_img.jpg", img_result );
hipFree(device_input_img);
hipFree(device_output_img);
delete[] input_img;
return 0;
}
|
c710437788d44b407a21d98f500be7c5c3785f38.cu
|
#include <iostream>
#include <cuda.h>
#include <string>
using namespace std;
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
__global__ void process(unsigned char* input_img, unsigned char* output_img, int radius, int img_x, int img_y) {
int img_row = blockIdx.x * blockDim.x + threadIdx.x;
int img_col = blockIdx.y * blockDim.y + threadIdx.y;
if (img_row < img_x && img_col < img_y) {
int hist[256][3] = {{0}};
int sum = 0, local_sum_r = 0, local_sum_g = 0, local_sum_b = 0;
int x_start = max(0, img_row - radius);
int x_end = min(img_x - 1, img_row + radius);
int y_start = max(0, img_col - radius);
int y_end = min(img_y - 1, img_col + radius);
for (int i = x_start; i <= x_end; i++) {
for (int j = y_start; j <= y_end; j++) {
sum++;
hist[input_img[(i * img_y + j) * 3]][0]++;
hist[input_img[(i * img_y + j) * 3 + 1]][1]++;
hist[input_img[(i * img_y + j) * 3 + 2]][2]++;
}
}
bool is_r_enable = true, is_g_enable = true, is_b_enable = true;
for (int i = 0; i < 256; ++i) {
local_sum_r += hist[i][0];
local_sum_g += hist[i][1];
local_sum_b += hist[i][2];
if (is_r_enable && local_sum_r >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3] = i;
is_r_enable = false;
}
if (is_g_enable && local_sum_g >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3 + 1] = i;
is_g_enable = false;
}
if (is_b_enable && local_sum_b >= sum / 2) {
output_img[(img_row * img_y + img_col) * 3 + 2] = i;
is_b_enable = false;
}
}
}
}
int main(int argc, char* argv[]) {
using namespace cv;
cv::Mat img_load = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
auto img_size = img_load.size();
int radius = 0;
sscanf(argv[2], "%d", &radius);
int grid_size = 32;
int rows = img_size.height, cols = img_size.width;
dim3 grid_dim(grid_size, grid_size);
dim3 block_dim(rows / grid_size + 1, cols / grid_size + 1);
unsigned char* input_img = new unsigned char[rows * cols * 3];
unsigned char* output_img = new unsigned char[rows * cols * 3];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
input_img[(i * cols + j) * 3] = img_load.at<cv::Vec3b>(i, j).val[0];
input_img[(i * cols + j) * 3 + 1] = img_load.at<cv::Vec3b>(i, j).val[1];
input_img[(i * cols + j) * 3 + 2] = img_load.at<cv::Vec3b>(i, j).val[2];
}
}
unsigned char* device_input_img;
unsigned char* device_output_img;
cudaMalloc((void**)(&device_input_img), rows *cols * 3 * sizeof(unsigned char));
cudaMemcpy(device_input_img, input_img, rows*cols*3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMalloc((void**)(&device_output_img), rows*cols *3 *sizeof(unsigned char));
process<<< grid_dim, block_dim >>>(device_input_img, device_output_img, radius, rows, cols);
cudaMemcpy(output_img, device_output_img, rows*cols*3*sizeof(unsigned char), cudaMemcpyDeviceToHost);
//std::cout << img_result.size().height << " " << img_result.size().width << " " << rows << " " << cols << endl;
cv::Mat img_result(img_load);
cv::resize(img_result, img_result, cv::Size(cols, rows));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cv::Vec3b new_color(output_img[(i * cols + j) * 3], output_img[(i * cols + j) * 3 + 1], output_img[(i * cols + j) * 3 + 2]);
//cout << static_cast<int>(output_img[(i * cols + j) * 3]) << " " << static_cast<int>(output_img[(i * cols + j) * 3 + 1]) << " " << static_cast<int>(output_img[(i * cols + j) * 3 + 2]) << " " << i << " " << j << endl;
//cout << i << " " << j << endl;
img_result.at<Vec3b>(i, j) = new_color;//cv::Vec3b(1, 1, 1);
}
}
//return 0;
imwrite( "result_img.jpg", img_result );
cudaFree(device_input_img);
cudaFree(device_output_img);
delete[] input_img;
return 0;
}
|
b740438a520c2e7d322098b8ba45b89824599a5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois,
const T* out_grad, const int num_rois,
const float spatial_scale,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data, T* input_grad) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( GPUROIAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
auto roi_ptr = allocator.Allocate(roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUROIAlignBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data,
in_grad->mutable_data<T>(ctx.GetPlace()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
b740438a520c2e7d322098b8ba45b89824599a5e.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois,
const T* out_grad, const int num_rois,
const float spatial_scale,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data, T* input_grad) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
GPUROIAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
auto roi_ptr = allocator.Allocate(roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUROIAlignBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data,
in_grad->mutable_data<T>(ctx.GetPlace()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
11c42abc52da55a091ea652bd9d75e76a43e0f8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_TOP_K_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE
#include "lbann/layers/loss/top_k_categorical_accuracy.hpp"
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/exception.hpp"
#include "lbann/utils/distconv.hpp"
#include <thrust/sort.h>
#include <thrust/iterator/discard_iterator.h>
namespace lbann {
namespace {
/** Sparse vector entry. */
template <typename TensorDataType>
struct entry {
/** Vector entry value. */
TensorDataType value;
/** Vector entry index. */
El::Int index;
};
/** Comparison operation to sort sparse vector entries.
* Entries are sorted by value in decreasing order, with ties broken
* in favor of entries with smaller indices.
*/
template <typename TensorDataType>
struct entry_compare : ::thrust::binary_function<entry<TensorDataType>,entry<TensorDataType>,bool> {
__host__ __device__ bool operator()(const entry<TensorDataType>& a, const entry<TensorDataType>& b) const {
return a.value > b.value || (a.value == b.value && a.index < b.index);
}
};
/** Convert columns of a dense matrix into sparse vectors.
* The matrix and vectors are both distributed, so entry indices in
* the sparse vectors correspond to global row indices in the dense
* matrix.
*/
template <typename TensorDataType>
__global__ void dense_matrix_to_sparse_vectors(El::Int local_vector_size,
El::Int local_matrix_height,
El::Int local_matrix_width,
El::Int global_matrix_height,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
entry<TensorDataType>* __restrict__ local_entries,
El::Int local_entries_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_local_entries = local_vector_size * local_matrix_width;
for (El::Int i = gid; i < num_local_entries; i += num_threads) {
const auto& local_row = i % local_vector_size;
const auto& local_col = i / local_vector_size;
auto& current_entry = local_entries[local_row + local_col * local_entries_ldim];
if (local_row < local_matrix_height) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
current_entry.value = local_matrix[local_row + local_col * local_matrix_ldim];
current_entry.index = global_row;
} else {
current_entry.value = -cuda::infinity<TensorDataType>();
current_entry.index = global_matrix_height;
}
}
}
/** Fill an array with a corresponding tensor index.
* Consider a d(1) x d(2) x ... x d(n) tensor with entry indices
* denoted with (i(1), ..., i(n)). This tensor is contiguous in
* memory with d(1) as the most major dimension and d(n) as the most
* minor (e.g. d(1) is the width and d(2) is the height for a
* column-major matrix). Given some k, this kernel sets each entry in
* the tensor to i(k). Using this notation:
* tensor_size = d(1) * ... * d(n)
* dim = d(k)
* dim_stride = d(k+1) * ... * d(n)
*/
__global__ void fill_with_tensor_index(El::Int tensor_size,
El::Int dim,
El::Int dim_stride,
El::Int* tensor) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int i = gid; i < tensor_size; i += num_threads) {
tensor[i] = (i / dim_stride) % dim;
}
}
/** Get indices corresponding to one-hot matrix.
* Each column of the input matrix is interpreted as a one-hot
* vector. Note that we may get race conditions if a matrix column is
* not a one-hot vector.
*/
template <typename TensorDataType>
__global__ void one_hot_matrix_to_indices(El::Int local_height,
El::Int local_width,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
El::Int* __restrict__ indices) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int local_size = local_height * local_width;
for (El::Int i = gid; i < local_size; i += num_threads) {
const auto& local_row = i % local_height;
const auto& local_col = i / local_height;
if (local_matrix[local_row + local_col * local_matrix_ldim] > TensorDataType(0.0)) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
indices[local_col] = global_row;
}
}
}
/** Compute categorical accuracy for each matrix column.
* Loss is one if the label index matches one of the top-k entries
* and is otherwise zero.
*/
template <typename TensorDataType>
__global__ void compute_categorical_accuracy(El::Int k,
El::Int width,
El::Int max_entry,
const entry<TensorDataType>* __restrict__ top_entries,
El::Int top_entries_ldim,
const El::Int* __restrict__ label_indices,
TensorDataType* __restrict__ loss,
El::Int loss_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_entries = width * k;
for (El::Int i = gid; i < num_entries; i += num_threads) {
const auto& ind = i % k;
const auto& col = i / k;
const auto& label_index = label_indices[col];
if (top_entries[ind + col * top_entries_ldim].index == label_index
&& label_index <= max_entry) {
loss[col * loss_stride] = TensorDataType(1.0);
}
}
}
/** GPU implementation of top-k categorical accuracy layer forward prop. */
template <typename TensorDataType>
void fp_gpu(lbann_comm& comm,
El::Int k,
const El::AbstractDistMatrix<TensorDataType>& predictions,
const El::AbstractDistMatrix<TensorDataType>& labels,
El::AbstractDistMatrix<TensorDataType>& loss) {
#ifdef LBANN_HAS_DISTCONV
if (dc::evaluate_performance()) {
El::Zero(loss);
return;
}
#endif
// Local matrices
const auto& local_predictions = predictions.LockedMatrix();
const auto& local_labels = labels.LockedMatrix();
auto& local_loss = loss.Matrix();
const auto& height = predictions.Height();
const auto& local_height = local_predictions.Height();
const auto& local_width = local_predictions.Width();
// Trivial cases
if (k < 1) {
El::Zero(loss);
return;
} else if (k >= height) {
El::Fill(loss, El::TypeTraits<TensorDataType>::One());
return;
} else if (local_width < 1) {
return;
}
// Column communicator
auto&& col_comm = predictions.ColComm();
const auto& col_comm_rank = El::mpi::Rank(col_comm);
const auto& col_comm_size = El::mpi::Size(col_comm);
const auto& col_comm_root = loss.RowOwner(0);
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> syncInfo{stream, event};
cuda::thrust::allocator<> alloc(stream);
// Get label indices
cuda::thrust::vector<El::Int> label_indices(local_width, height);
{
const auto& local_size = local_height * local_width;
const auto& block_dim = 256;
const auto& grid_dim = (local_size + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( one_hot_matrix_to_indices), dim3(grid_dim), dim3(block_dim), 0, stream,
local_height, local_width,
labels.ColShift(), labels.ColStride(),
local_labels.LockedBuffer(), local_labels.LDim(),
label_indices.data().get());
/// @todo The LBANN Aluminum interface doesn't gracefully handle
/// GPU data that is not TensorDataType.
El::mpi::AllReduce(label_indices.data().get(),
label_indices.size(),
El::mpi::MIN,
col_comm, syncInfo);
}
// Find top-k entries in each column of local prediction matrix
cuda::thrust::vector<entry<TensorDataType>> top_entries(local_width * k);
{
const auto& num_local_entries_per_col = ::max(local_height, k);
const auto& num_local_entries = local_width * num_local_entries_per_col;
const auto& block_dim = 256;
const auto& grid_dim = (num_local_entries + block_dim - 1) / block_dim;
cuda::thrust::vector<entry<TensorDataType>> local_entries(num_local_entries);
cuda::thrust::vector<El::Int> local_entries_cols(num_local_entries);
hipLaunchKernelGGL(( dense_matrix_to_sparse_vectors), dim3(grid_dim), dim3(block_dim), 0, stream,
num_local_entries_per_col, local_height, local_width, height,
predictions.ColShift(), predictions.ColStride(),
local_predictions.LockedBuffer(), local_predictions.LDim(),
local_entries.data().get(), num_local_entries_per_col);
hipLaunchKernelGGL(( fill_with_tensor_index), dim3(grid_dim), dim3(block_dim), 0, stream,
num_local_entries, local_width, num_local_entries_per_col,
local_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
local_entries.begin(),
local_entries.end(),
local_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
local_entries_cols.begin(),
local_entries_cols.end(),
local_entries.begin());
CHECK_CUDA(hipMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
local_entries.data().get(),
num_local_entries_per_col * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
hipMemcpyDeviceToDevice,
stream));
}
// Find top-k entries in each column of global prediction matrix
if (col_comm_size > 1) {
const auto& num_entries_per_rank = local_width * k;
const auto& num_entries = col_comm_size * num_entries_per_rank;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
if (col_comm_rank != col_comm_root) {
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
col_comm_root,
col_comm, syncInfo);
} else {
cuda::thrust::vector<entry<TensorDataType>> global_top_entries(num_entries);
cuda::thrust::vector<El::Int> global_top_entries_cols(num_entries);
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
reinterpret_cast<El::byte*>(global_top_entries.data().get()),
col_comm, syncInfo);
hipLaunchKernelGGL(( fill_with_tensor_index), dim3(grid_dim), dim3(block_dim), 0, stream,
num_entries, local_width, k, global_top_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
global_top_entries.begin(),
global_top_entries.end(),
global_top_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
global_top_entries_cols.begin(),
global_top_entries_cols.end(),
global_top_entries.begin());
CHECK_CUDA(hipMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
global_top_entries.data().get(),
col_comm_size * k * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
hipMemcpyDeviceToDevice,
stream));
}
}
// Compute categorical accuracy
El::Zero(loss);
if (col_comm_rank == col_comm_root) {
const auto& num_entries = local_width * k;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
hipLaunchKernelGGL(( compute_categorical_accuracy), dim3(grid_dim), dim3(block_dim), 0, stream,
k, local_width, height-1,
top_entries.data().get(), k,
label_indices.data().get(),
local_loss.Buffer(), local_loss.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void top_k_categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() {
fp_gpu(*this->get_comm(),
this->m_k,
this->get_prev_activations(0),
this->get_prev_activations(1),
this->get_activations());
}
#define PROTO(T) \
template class top_k_categorical_accuracy_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class top_k_categorical_accuracy_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
11c42abc52da55a091ea652bd9d75e76a43e0f8e.cu
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_TOP_K_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE
#include "lbann/layers/loss/top_k_categorical_accuracy.hpp"
#include "lbann/utils/cuda.hpp"
#include "lbann/utils/exception.hpp"
#include "lbann/utils/distconv.hpp"
#include <thrust/sort.h>
#include <thrust/iterator/discard_iterator.h>
namespace lbann {
namespace {
/** Sparse vector entry. */
template <typename TensorDataType>
struct entry {
/** Vector entry value. */
TensorDataType value;
/** Vector entry index. */
El::Int index;
};
/** Comparison operation to sort sparse vector entries.
* Entries are sorted by value in decreasing order, with ties broken
* in favor of entries with smaller indices.
*/
template <typename TensorDataType>
struct entry_compare : ::thrust::binary_function<entry<TensorDataType>,entry<TensorDataType>,bool> {
__host__ __device__ bool operator()(const entry<TensorDataType>& a, const entry<TensorDataType>& b) const {
return a.value > b.value || (a.value == b.value && a.index < b.index);
}
};
/** Convert columns of a dense matrix into sparse vectors.
* The matrix and vectors are both distributed, so entry indices in
* the sparse vectors correspond to global row indices in the dense
* matrix.
*/
template <typename TensorDataType>
__global__ void dense_matrix_to_sparse_vectors(El::Int local_vector_size,
El::Int local_matrix_height,
El::Int local_matrix_width,
El::Int global_matrix_height,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
entry<TensorDataType>* __restrict__ local_entries,
El::Int local_entries_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_local_entries = local_vector_size * local_matrix_width;
for (El::Int i = gid; i < num_local_entries; i += num_threads) {
const auto& local_row = i % local_vector_size;
const auto& local_col = i / local_vector_size;
auto& current_entry = local_entries[local_row + local_col * local_entries_ldim];
if (local_row < local_matrix_height) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
current_entry.value = local_matrix[local_row + local_col * local_matrix_ldim];
current_entry.index = global_row;
} else {
current_entry.value = -cuda::infinity<TensorDataType>();
current_entry.index = global_matrix_height;
}
}
}
/** Fill an array with a corresponding tensor index.
* Consider a d(1) x d(2) x ... x d(n) tensor with entry indices
* denoted with (i(1), ..., i(n)). This tensor is contiguous in
* memory with d(1) as the most major dimension and d(n) as the most
* minor (e.g. d(1) is the width and d(2) is the height for a
* column-major matrix). Given some k, this kernel sets each entry in
* the tensor to i(k). Using this notation:
* tensor_size = d(1) * ... * d(n)
* dim = d(k)
* dim_stride = d(k+1) * ... * d(n)
*/
__global__ void fill_with_tensor_index(El::Int tensor_size,
El::Int dim,
El::Int dim_stride,
El::Int* tensor) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int i = gid; i < tensor_size; i += num_threads) {
tensor[i] = (i / dim_stride) % dim;
}
}
/** Get indices corresponding to one-hot matrix.
* Each column of the input matrix is interpreted as a one-hot
* vector. Note that we may get race conditions if a matrix column is
* not a one-hot vector.
*/
template <typename TensorDataType>
__global__ void one_hot_matrix_to_indices(El::Int local_height,
El::Int local_width,
El::Int global_matrix_col_shift,
El::Int global_matrix_col_stride,
const TensorDataType* __restrict__ local_matrix,
El::Int local_matrix_ldim,
El::Int* __restrict__ indices) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int local_size = local_height * local_width;
for (El::Int i = gid; i < local_size; i += num_threads) {
const auto& local_row = i % local_height;
const auto& local_col = i / local_height;
if (local_matrix[local_row + local_col * local_matrix_ldim] > TensorDataType(0.0)) {
const auto& global_row = (global_matrix_col_shift
+ local_row * global_matrix_col_stride);
indices[local_col] = global_row;
}
}
}
/** Compute categorical accuracy for each matrix column.
* Loss is one if the label index matches one of the top-k entries
* and is otherwise zero.
*/
template <typename TensorDataType>
__global__ void compute_categorical_accuracy(El::Int k,
El::Int width,
El::Int max_entry,
const entry<TensorDataType>* __restrict__ top_entries,
El::Int top_entries_ldim,
const El::Int* __restrict__ label_indices,
TensorDataType* __restrict__ loss,
El::Int loss_stride) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int num_threads = blockDim.x * gridDim.x;
const El::Int num_entries = width * k;
for (El::Int i = gid; i < num_entries; i += num_threads) {
const auto& ind = i % k;
const auto& col = i / k;
const auto& label_index = label_indices[col];
if (top_entries[ind + col * top_entries_ldim].index == label_index
&& label_index <= max_entry) {
loss[col * loss_stride] = TensorDataType(1.0);
}
}
}
/** GPU implementation of top-k categorical accuracy layer forward prop. */
template <typename TensorDataType>
void fp_gpu(lbann_comm& comm,
El::Int k,
const El::AbstractDistMatrix<TensorDataType>& predictions,
const El::AbstractDistMatrix<TensorDataType>& labels,
El::AbstractDistMatrix<TensorDataType>& loss) {
#ifdef LBANN_HAS_DISTCONV
if (dc::evaluate_performance()) {
El::Zero(loss);
return;
}
#endif
// Local matrices
const auto& local_predictions = predictions.LockedMatrix();
const auto& local_labels = labels.LockedMatrix();
auto& local_loss = loss.Matrix();
const auto& height = predictions.Height();
const auto& local_height = local_predictions.Height();
const auto& local_width = local_predictions.Width();
// Trivial cases
if (k < 1) {
El::Zero(loss);
return;
} else if (k >= height) {
El::Fill(loss, El::TypeTraits<TensorDataType>::One());
return;
} else if (local_width < 1) {
return;
}
// Column communicator
auto&& col_comm = predictions.ColComm();
const auto& col_comm_rank = El::mpi::Rank(col_comm);
const auto& col_comm_size = El::mpi::Size(col_comm);
const auto& col_comm_root = loss.RowOwner(0);
// GPU objects
auto&& stream = El::GPUManager::Stream();
auto&& event = El::GPUManager::Event();
El::SyncInfo<El::Device::GPU> syncInfo{stream, event};
cuda::thrust::allocator<> alloc(stream);
// Get label indices
cuda::thrust::vector<El::Int> label_indices(local_width, height);
{
const auto& local_size = local_height * local_width;
const auto& block_dim = 256;
const auto& grid_dim = (local_size + block_dim - 1) / block_dim;
one_hot_matrix_to_indices<<<grid_dim, block_dim, 0, stream>>>(
local_height, local_width,
labels.ColShift(), labels.ColStride(),
local_labels.LockedBuffer(), local_labels.LDim(),
label_indices.data().get());
/// @todo The LBANN Aluminum interface doesn't gracefully handle
/// GPU data that is not TensorDataType.
El::mpi::AllReduce(label_indices.data().get(),
label_indices.size(),
El::mpi::MIN,
col_comm, syncInfo);
}
// Find top-k entries in each column of local prediction matrix
cuda::thrust::vector<entry<TensorDataType>> top_entries(local_width * k);
{
const auto& num_local_entries_per_col = std::max(local_height, k);
const auto& num_local_entries = local_width * num_local_entries_per_col;
const auto& block_dim = 256;
const auto& grid_dim = (num_local_entries + block_dim - 1) / block_dim;
cuda::thrust::vector<entry<TensorDataType>> local_entries(num_local_entries);
cuda::thrust::vector<El::Int> local_entries_cols(num_local_entries);
dense_matrix_to_sparse_vectors<<<grid_dim, block_dim, 0, stream>>>(
num_local_entries_per_col, local_height, local_width, height,
predictions.ColShift(), predictions.ColStride(),
local_predictions.LockedBuffer(), local_predictions.LDim(),
local_entries.data().get(), num_local_entries_per_col);
fill_with_tensor_index<<<grid_dim, block_dim, 0, stream>>>(
num_local_entries, local_width, num_local_entries_per_col,
local_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
local_entries.begin(),
local_entries.end(),
local_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
local_entries_cols.begin(),
local_entries_cols.end(),
local_entries.begin());
CHECK_CUDA(cudaMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
local_entries.data().get(),
num_local_entries_per_col * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
cudaMemcpyDeviceToDevice,
stream));
}
// Find top-k entries in each column of global prediction matrix
if (col_comm_size > 1) {
const auto& num_entries_per_rank = local_width * k;
const auto& num_entries = col_comm_size * num_entries_per_rank;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
if (col_comm_rank != col_comm_root) {
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
col_comm_root,
col_comm, syncInfo);
} else {
cuda::thrust::vector<entry<TensorDataType>> global_top_entries(num_entries);
cuda::thrust::vector<El::Int> global_top_entries_cols(num_entries);
comm.gather(reinterpret_cast<El::byte*>(top_entries.data().get()),
top_entries.size() * sizeof(entry<TensorDataType>),
reinterpret_cast<El::byte*>(global_top_entries.data().get()),
col_comm, syncInfo);
fill_with_tensor_index<<<grid_dim, block_dim, 0, stream>>>(
num_entries, local_width, k, global_top_entries_cols.data().get());
::thrust::sort_by_key(alloc.system(),
global_top_entries.begin(),
global_top_entries.end(),
global_top_entries_cols.begin(),
entry_compare<TensorDataType>());
::thrust::stable_sort_by_key(alloc.system(),
global_top_entries_cols.begin(),
global_top_entries_cols.end(),
global_top_entries.begin());
CHECK_CUDA(cudaMemcpy2DAsync(top_entries.data().get(),
k * sizeof(entry<TensorDataType>),
global_top_entries.data().get(),
col_comm_size * k * sizeof(entry<TensorDataType>),
k * sizeof(entry<TensorDataType>),
local_width,
cudaMemcpyDeviceToDevice,
stream));
}
}
// Compute categorical accuracy
El::Zero(loss);
if (col_comm_rank == col_comm_root) {
const auto& num_entries = local_width * k;
const auto& block_dim = 256;
const auto& grid_dim = (num_entries + block_dim - 1) / block_dim;
compute_categorical_accuracy<<<grid_dim, block_dim, 0, stream>>>(
k, local_width, height-1,
top_entries.data().get(), k,
label_indices.data().get(),
local_loss.Buffer(), local_loss.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void top_k_categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() {
fp_gpu(*this->get_comm(),
this->m_k,
this->get_prev_activations(0),
this->get_prev_activations(1),
this->get_activations());
}
#define PROTO(T) \
template class top_k_categorical_accuracy_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class top_k_categorical_accuracy_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
6be5d68e3f75a19eeb3d789d877a8d417794d361.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
namespace at { namespace native {
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f);
AT_CUDA_CHECK(hipGetLastError());
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIterator();
iter.check_all_same_dtype(false);
iter.dont_resize_outputs();
iter.add_output(self_restrided);
iter.add_input(src_restrided);
iter.add_input(index);
iter.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIterator();
iter.check_all_same_dtype(false);
iter.dont_resize_outputs();
iter.add_output(self_restrided);
iter.add_input(index);
iter.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
gpuAtomicAdd(lhs, *rhs);
}
);
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
}} // namespace at::native
|
6be5d68e3f75a19eeb3d789d877a8d417794d361.cu
|
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
namespace at { namespace native {
// The kernels are implemented on an opaque,
// self-aligned type of the correct size,
// to avoid redundant kernels for different types
// of the same size.
template <int N> struct alignas(N) OpaqueType { char data[N]; };
// essentialy rewritten related to legacy::launch_kernel parts
template <int nt, int vt, typename func_t>
C10_LAUNCH_BOUNDS_2(nt, vt)
__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) {
constexpr int nv = nt * vt;
int idx = nv * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < vt; ++i) {
if (idx < N) {
f(idx);
idx += nt;
}
}
}
template <int nt, int vt, typename func_t>
static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) {
TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
if (N == 0) {
return;
}
dim3 block(nt);
dim3 grid((N + block.x * vt - 1) / (block.x * vt));
auto stream = at::cuda::getCurrentCUDAStream();
_scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
AT_CUDA_CHECK(cudaGetLastError());
}
template <bool is_scatter_like, typename scalar_t>
struct _cuda_scatter_gather_internal_kernel {
template <typename func_t>
void operator() (
TensorIterator& iter,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()(
sub_iter, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* src_ptr = (char*)iter.data_ptr(1);
char* index_ptr = (char*)iter.data_ptr(2);
auto offset_calc = make_offset_calculator<3>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds");
char* self_data = self_ptr + offsets[0];
char* src_data = src_ptr + offsets[1];
f(
(scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0),
(scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride)
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool is_scatter_like = true, bool cast_to_opaque = true>
struct cuda_scatter_gather_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, const Tensor& src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index, src);
if (is_scatter_like) {
scatter_shape_check(self, dim, index, src);
}
else {
gather_shape_check(self, dim, index, src);
}
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
auto self_strides = ensure_nonempty_vec(self.strides().vec());
auto src_strides = ensure_nonempty_vec(src.strides().vec());
// restride self and src such that
// self.shape = src.shape = index.shape
//
// restride stride[dim] such that
// if (is_scatter_like) self.stride[dim] = 0
// else src.stride[dim] = 0
auto self_restrided = is_scatter_like ?
restride_dim(self, dim, index_sizes)
: self.as_strided(index_sizes, self_strides);
auto src_restrided = is_scatter_like ?
src.as_strided(index_sizes, src_strides)
: restride_dim(src, dim, index_sizes);
auto iter = TensorIterator();
iter.check_all_same_dtype(false);
iter.dont_resize_outputs();
iter.add_output(self_restrided);
iter.add_input(src_restrided);
iter.add_input(index);
iter.build();
auto self_dim_stride = ensure_nonempty_stride(self, dim);
auto self_dim_size = ensure_nonempty_size(self, dim);
auto src_dim_stride = ensure_nonempty_stride(src, dim);
auto src_dim_size = ensure_nonempty_size(src, dim);
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
_cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()(
iter, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_gather_base_kernel
template <typename scalar_t>
struct _cuda_scatter_fill_internal_kernel {
template <typename func_t>
void operator()(
TensorIterator& iter,
scalar_t src_val,
int64_t index_size,
int64_t index_stride,
const func_t& f
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_cuda_scatter_fill_internal_kernel<scalar_t>()(
sub_iter, src_val, index_size, index_stride, f
);
}
return;
}
char* self_ptr = (char*)iter.data_ptr(0);
char* index_ptr = (char*)iter.data_ptr(1);
auto offset_calc = make_offset_calculator<2>(iter);
auto loop = [=]C10_DEVICE(int i) {
auto offsets = offset_calc.get(i);
int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]);
CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size
&& "index out of bounds"
);
char* self_data = self_ptr + offsets[0];
f(
(scalar_t*)self_data + idx_dim * index_stride,
&src_val
);
};
_launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop);
}
}; // struct _cuda_scatter_fill_internal_kernel
template <bool cast_to_opaque = true>
struct cuda_scatter_fill_base_kernel {
template <typename func_t>
void operator()(
Tensor& self, int64_t dim,
const Tensor& index, Scalar src,
const std::string& method_name,
const func_t& f
) {
// no-op if index is empty
if (index.numel() == 0) {
return;
}
dim = maybe_wrap_dim(dim, self.dim());
scatter_gather_dtype_check(method_name, self, index);
scatter_shape_check(self, dim, index);
auto index_sizes = ensure_nonempty_vec(index.sizes().vec());
// restride self such that
// self.shape = index.shape and
// self.stride[dim] = 0
auto self_restrided = restride_dim(self, dim, index_sizes);
auto iter = TensorIterator();
iter.check_all_same_dtype(false);
iter.dont_resize_outputs();
iter.add_output(self_restrided);
iter.add_input(index);
iter.build();
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
method_name, [&] {
using dtype = typename std::conditional<cast_to_opaque,
OpaqueType<sizeof(scalar_t)>, scalar_t>::type;
auto src_scalar_val = src.to<scalar_t>();
auto src_val = *(dtype*)&src_scalar_val;
_cuda_scatter_fill_internal_kernel<dtype>()(
iter, src_val, index_size, index_stride, f
);
}
);
}
}; // struct cuda_scatter_fill_base_kernel
void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()(
result, dim, index, self,
"gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel<>()(
self, dim, index, src,
"scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) {
cuda_scatter_fill_base_kernel<>()(
self, dim, index, src,
"scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
*lhs = *rhs;
}
);
}
void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) {
cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()(
self, dim, index, src,
"scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) {
gpuAtomicAdd(lhs, *rhs);
}
);
}
REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel);
REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel);
REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel);
REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel);
}} // namespace at::native
|
c6b3d92ffd040a91a8fb9129338592f6221f23d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include <hip/hip_runtime.h>
#define N 16
__global__ void sumThredBlk(int* dA) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
dA[index] = threadIdx.x + blockIdx.x;
}
int main(void) {
int* hA; // host copies of a
int* dA; // device copies of a
int size = N * sizeof(int);
// Alloc space for device copies for dA
hipMalloc((void**)& dA, size);
// Alloc space for host copies of a
hA = (int*)malloc(size);
// Copy inputs to device
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
// Launch sumThredBlk() kernel on GPU with 2 blocks and 8 threads
hipLaunchKernelGGL(( sumThredBlk), dim3(2),dim3(8), 0, 0, dA);
hipDeviceSynchronize();
// Copy result back to host
hipMemcpy(hA, dA, size, hipMemcpyDeviceToHost);
// prints the dA array
for (int i = 0; i < N; i++)
{
std::printf("%d ", hA[i]);
}
// Cleanup
free(hA);
hipFree(dA);
return 0;
}
|
c6b3d92ffd040a91a8fb9129338592f6221f23d0.cu
|
#include<iostream>
#include <cuda.h>
#define N 16
__global__ void sumThredBlk(int* dA) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
dA[index] = threadIdx.x + blockIdx.x;
}
int main(void) {
int* hA; // host copies of a
int* dA; // device copies of a
int size = N * sizeof(int);
// Alloc space for device copies for dA
cudaMalloc((void**)& dA, size);
// Alloc space for host copies of a
hA = (int*)malloc(size);
// Copy inputs to device
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
// Launch sumThredBlk() kernel on GPU with 2 blocks and 8 threads
sumThredBlk<<<2,8>>>(dA);
cudaDeviceSynchronize();
// Copy result back to host
cudaMemcpy(hA, dA, size, cudaMemcpyDeviceToHost);
// prints the dA array
for (int i = 0; i < N; i++)
{
std::printf("%d ", hA[i]);
}
// Cleanup
free(hA);
cudaFree(dA);
return 0;
}
|
25d01beaf6817737a90f6ea9ebeebb8304097341.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "CUDA_renderBase.h"
#include "CUDA_typeRange.h"
#include "CUDA_matrix_math.h"
#include "CUDA_zbuffer_math.h"
#include "vtkType.h"
//#define USE_TIMER
#define BLOCK_DIM2D 8 // this must be set to 4 or more
#define ACC(X,Y,Z) ( ( (Z)*(sizeX)*(sizeY) ) + ( (Y)*(sizeX) ) + (X) )
#define SQR(X) ((X) * (X) )
__constant__ cudaRendererInformation cRenInfo;
__constant__ cudaVolumeInformation cVolInfo;
__constant__ float colorTF[256*3];
__constant__ float alphaTF[256];
template <typename T>
__device__ T CUDAkernel_InterpolateNN(T * sourceData,
float posX,
float posY,
float posZ){
return sourceData[(int)(__float2int_rn(posZ)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(posY)*cVolInfo.VolumeSize.x+__float2int_rn(posX))];
}
template <typename T>
__device__ T CUDAkernel_InterpolateTrilinear(T * sourceData,
float posX,
float posY,
float posZ){
float fracX=posX-(int)posX;
float fracY=posY-(int)posY;
float fracZ=posZ-(int)posZ;
float revX=1-fracX;
float revY=1-fracY;
float revZ=1-fracZ;
int base=(int)((int)(posZ)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+(int)(posY)*cVolInfo.VolumeSize.x+(int)(posX));
return ((T) (revX*(revY*(revZ* (sourceData)[(int)(base)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y)])+
fracY*(revZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+cVolInfo.VolumeSize.x)]))+
fracX*(revY*(revZ* (sourceData)[(int)(base+1)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+1)])+
fracY*(revZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x+1)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+cVolInfo.VolumeSize.x+1)])))
);
}
template <typename T>
__device__ T CUDAkernel_Interpolate(T * sourceData,
float posX,
float posY,
float posZ){
if(cRenInfo.interpolationMethod == 0){
return CUDAkernel_InterpolateNN(sourceData, posX, posY, posZ);
}else if(cRenInfo.interpolationMethod == 1){
return CUDAkernel_InterpolateTrilinear(sourceData, posX, posY, posZ);
}
return 0;
}
#include "CUDA_renderRayCastComposite.h"
#include "CUDA_renderRayCastCompositeShaded.h"
#include "CUDA_renderRayCastMIP.h"
#include "CUDA_renderRayCastIsosurface.h"
template <typename T>
__device__ void CUDAkernel_RayCast(cudaRendererInformation& renInfo,
cudaVolumeInformation& volInfo,
float* colorTF,
float* alphaTF,
float3* s_rayMap,
float2* s_minmaxTrace,
float3* s_clippingPoints,
int tempacc,
int xIndex,
int yIndex){
if(renInfo.rayCastingMethod==0){
CUDAkernel_RayCastIsosurface<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==1){
CUDAkernel_RayCastMIP<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==2){
CUDAkernel_RayCastComposite<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==3){
CUDAkernel_RayCastCompositeShaded<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}
}
__device__ void CUDAkernel_SetRayMapVolumeRendering(long int base, float3* rayMap, float3* clippingPoints, long int index, float* lensMap, int xIndex, int yIndex){
float3 start;
float3 end;
start.x=cRenInfo.CameraRayStart.x+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.x+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.x;
start.y=cRenInfo.CameraRayStart.y+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.y+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.y;
start.z=cRenInfo.CameraRayStart.z+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.z+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.z;
end.x=cRenInfo.CameraRayEnd.x+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.x+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.x;
end.y=cRenInfo.CameraRayEnd.y+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.y+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.y;
end.z=cRenInfo.CameraRayEnd.z+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.z+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.z;
rayMap[base*2].x=start.x;
rayMap[base*2].y=start.y;
rayMap[base*2].z=start.z;
rayMap[base*2+1].x=end.x-start.x;
rayMap[base*2+1].y=end.y-start.y;
rayMap[base*2+1].z=end.z-start.z;
rayMap[base*2]=MatMul(cVolInfo.Transform, rayMap[base*2]);
rayMap[base*2+1]=MatMul(cVolInfo.Transform, rayMap[base*2+1], 0.0f);
clippingPoints[base*2].x=rayMap[base*2].x;
clippingPoints[base*2].y=rayMap[base*2].y;
clippingPoints[base*2].z=rayMap[base*2].z;
clippingPoints[base*2+1].x=rayMap[base*2].x+rayMap[base*2+1].x;
clippingPoints[base*2+1].y=rayMap[base*2].y+rayMap[base*2+1].y;
clippingPoints[base*2+1].z=rayMap[base*2].z+rayMap[base*2+1].z;
float getmax = fabs(rayMap[base*2+1].x);
if(fabs(rayMap[base*2+1].y)>getmax) getmax = fabs(rayMap[base*2+1].y);
if(fabs(rayMap[base*2+1].z)>getmax) getmax = fabs(rayMap[base*2+1].z);
if(getmax!=0){
float temp= 1.0f/getmax;
rayMap[base*2+1].x*=temp;
rayMap[base*2+1].y*=temp;
rayMap[base*2+1].z*=temp;
}
}
__device__ void CUDAkernel_CalculateMinmax(long int tempacc, float3* rayMap, float2* minmax, int xIndex, int yIndex){
float test;
minmax[tempacc].x=-100000.0f;
minmax[tempacc].y=100000.0f;
if(rayMap[tempacc*2+1].x > 0){
minmax[tempacc].y = ( ((cVolInfo.maxROI.x-2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
minmax[tempacc].x = ( ((cVolInfo.minROI.x+2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
}
else if(rayMap[tempacc*2+1].x < 0){
minmax[tempacc].x = ( ((cVolInfo.maxROI.x-2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
minmax[tempacc].y = ( ((cVolInfo.minROI.x+2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
}
if(rayMap[tempacc*2+1].y > 0){
test = ( ((cVolInfo.maxROI.y-2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
test = ( ((cVolInfo.minROI.y+2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
}
else if(rayMap[tempacc*2+1].y < 0){
test = ( ((cVolInfo.maxROI.y-2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
test = ( ((cVolInfo.minROI.y+2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
}
if(rayMap[tempacc*2+1].z > 0){
test = ( ((cVolInfo.maxROI.z-2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
test = ( ((cVolInfo.minROI.z+2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
}
else if(rayMap[tempacc*2+1].z < 0){
test = ( ((cVolInfo.maxROI.z-2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
test = ( ((cVolInfo.minROI.z+2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
}
minmax[tempacc].x-=2;
minmax[tempacc].y+=2;
float3 zVec;
float3 normalVec;
float3 newOrigin;
zVec.x=0;
zVec.y=0;
zVec.z=1;
normalVec=MatMul(cVolInfo.SliceMatrix, zVec, 0.0f);
newOrigin=MatMul(cVolInfo.SliceMatrix, make_float3(0,0,0), 1.0f);
float3 transformedOrigin;
float3 transformedVector;
transformedVector=MatMul(cVolInfo.OrientationMatrix, rayMap[2*tempacc+1], 0.0f);
transformedOrigin=MatMul(cVolInfo.OrientationMatrix, rayMap[2*tempacc], 1.0f);
float length=sqrt(normalVec.x*normalVec.x+
normalVec.y*normalVec.y+
normalVec.z*normalVec.z);
normalVec.x/=length;
normalVec.y/=length;
normalVec.z/=length;
float3 relPos;
relPos.x=newOrigin.x-transformedOrigin.x;
relPos.y=newOrigin.y-transformedOrigin.y;
relPos.z=newOrigin.z-transformedOrigin.z;
float dot = (transformedVector.x*normalVec.x+
transformedVector.y*normalVec.y+
transformedVector.z*normalVec.z);
float unit=(relPos.x*normalVec.x+
relPos.y*normalVec.y+
relPos.z*normalVec.z)/dot;
if(dot>=0){
if(cVolInfo.VolumeRenderDirection == 1){
if(unit>minmax[tempacc].x)minmax[tempacc].x=unit;
}else if(cVolInfo.VolumeRenderDirection == 2){
if(unit<minmax[tempacc].y)minmax[tempacc].y=unit;
}
}else{
if(cVolInfo.VolumeRenderDirection == 1){
if(unit<minmax[tempacc].y)minmax[tempacc].y=unit;
}else if(cVolInfo.VolumeRenderDirection == 2){
if(unit>minmax[tempacc].x)minmax[tempacc].x=unit;
}
}
}
template <typename T>
__global__ void CUDAkernel_renderBase_calculateShadeField()
{
int xIndex = (blockDim.x*blockIdx.x + threadIdx.x) % (int)cVolInfo.VolumeSize.x;
int yIndex = (blockDim.x*blockIdx.x + threadIdx.x) / (int)cVolInfo.VolumeSize.x;
int zIndex = blockDim.y*blockIdx.y+ threadIdx.y;
long int index = (xIndex+yIndex*cVolInfo.VolumeSize.x+zIndex*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y);
float3 tempShade;
if(xIndex>0 && xIndex < cVolInfo.VolumeSize.x-1 && yIndex>0 && yIndex < cVolInfo.VolumeSize.y-1 && zIndex>0 && zIndex < cVolInfo.VolumeSize.z-1){
tempShade.x = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex+1))];
tempShade.y = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex+1)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.z = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex+1)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.x-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex-1))];
tempShade.y-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex-1)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.z-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex-1)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
}else if((xIndex==0 || xIndex == cVolInfo.VolumeSize.x-1) && (yIndex==0 || yIndex == cVolInfo.VolumeSize.y-1) && (zIndex==0 || zIndex == cVolInfo.VolumeSize.z-1)){
tempShade.x=0;
tempShade.y=0;
tempShade.z=0;
}else{
index=-1;
}
if(index!=-1){
float range=(cVolInfo.TypeRange[1]-cVolInfo.TypeRange[0])*2;
cVolInfo.shadeField[index].x=tempShade.x/range;
cVolInfo.shadeField[index].y=tempShade.y/range;
cVolInfo.shadeField[index].z=tempShade.z/range;
}
}
template <typename T>
__global__ void CUDAkernel_renderBase_doRendering()
{
int xIndex = blockDim.x *blockIdx.x + threadIdx.x;
int yIndex = blockDim.y *blockIdx.y + threadIdx.y;
__shared__ float2 s_minmaxTrace[BLOCK_DIM2D*BLOCK_DIM2D];
__shared__ float3 s_rayMap[BLOCK_DIM2D*BLOCK_DIM2D*2];
__shared__ float3 s_clippingPoints[BLOCK_DIM2D*BLOCK_DIM2D*2];
int tempacc=threadIdx.x+threadIdx.y*BLOCK_DIM2D;
__syncthreads();
long int index = (xIndex+yIndex*cRenInfo.ActualResolution.x)*4;
if(xIndex<cRenInfo.ActualResolution.x && yIndex <cRenInfo.ActualResolution.y){
CUDAkernel_SetRayMapVolumeRendering(tempacc, s_rayMap, s_clippingPoints, index, cRenInfo.LensMap, xIndex, yIndex);
CUDAkernel_CalculateMinmax(tempacc, s_rayMap, s_minmaxTrace, xIndex, yIndex);
CUDAkernel_RayCast<T>(cRenInfo,
cVolInfo,
colorTF,
alphaTF,
s_rayMap,
s_minmaxTrace,
s_clippingPoints,
tempacc,
xIndex, yIndex);
}
}
void CUDArenderBase_doRender(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo)
{
int blockX=(((int)renInfo.ActualResolution.x-1)/ BLOCK_DIM2D) + 1;
int blockY=(((int)renInfo.ActualResolution.y-1)/ BLOCK_DIM2D) + 1;
// setup execution parameters
dim3 grid(blockX, blockY, 1);
dim3 threads(BLOCK_DIM2D, BLOCK_DIM2D, 1);
blockX=((int)(volInfo.VolumeSize.x*volInfo.VolumeSize.y-1)/ BLOCK_DIM2D) + 1;
blockY=((int)(volInfo.VolumeSize.z-1)/ BLOCK_DIM2D) + 1;
dim3 grid2(blockX, blockY, 1);
dim3 threads2(BLOCK_DIM2D, BLOCK_DIM2D, 1);
// copy host memory to device
prepareShadeField(renInfo, volInfo);
CUDA_SAFE_CALL( hipMemcpyToSymbol(cRenInfo, &renInfo, sizeof(cudaRendererInformation)));
CUDA_SAFE_CALL( hipMemcpyToSymbol(cVolInfo, &volInfo, sizeof(cudaVolumeInformation)));
CUDA_SAFE_CALL( hipMemcpyToSymbol(colorTF, volInfo.ColorTransferFunction, sizeof(float)*256*3));
CUDA_SAFE_CALL( hipMemcpyToSymbol(alphaTF, volInfo.AlphaTransferFunction, sizeof(float)*256));
//execute the kernel
#define CALL_KERNEL_DO_RENDER(ID, TYPE) \
(ID==volInfo.InputDataType) \
hipLaunchKernelGGL(( CUDAkernel_renderBase_doRendering<TYPE>), dim3(grid), dim3(threads), 0, 0, )
if CALL_KERNEL_DO_RENDER(VTK_CHAR, char);
else if CALL_KERNEL_DO_RENDER(VTK_CHAR, char);
else if CALL_KERNEL_DO_RENDER(VTK_UNSIGNED_CHAR, unsigned char);
else if CALL_KERNEL_DO_RENDER(VTK_SHORT, short);
else if CALL_KERNEL_DO_RENDER(VTK_UNSIGNED_SHORT, unsigned short);
else if CALL_KERNEL_DO_RENDER(VTK_INT, int);
else if CALL_KERNEL_DO_RENDER(VTK_FLOAT, float);
deleteShadeField(renInfo, volInfo);
CUT_CHECK_ERROR("Kernel execution failed");
return;
}
void prepareShadeField(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo){
CUDA_SAFE_CALL(hipMalloc((void**)&volInfo.shadeField, (int)(volInfo.VolumeSize.x*volInfo.VolumeSize.y*volInfo.VolumeSize.z*sizeof(float3))));
}
void deleteShadeField(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo){
CUDA_SAFE_CALL( hipFree(volInfo.shadeField));
}
|
25d01beaf6817737a90f6ea9ebeebb8304097341.cu
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "CUDA_renderBase.h"
#include "CUDA_typeRange.h"
#include "CUDA_matrix_math.h"
#include "CUDA_zbuffer_math.h"
#include "vtkType.h"
//#define USE_TIMER
#define BLOCK_DIM2D 8 // this must be set to 4 or more
#define ACC(X,Y,Z) ( ( (Z)*(sizeX)*(sizeY) ) + ( (Y)*(sizeX) ) + (X) )
#define SQR(X) ((X) * (X) )
__constant__ cudaRendererInformation cRenInfo;
__constant__ cudaVolumeInformation cVolInfo;
__constant__ float colorTF[256*3];
__constant__ float alphaTF[256];
template <typename T>
__device__ T CUDAkernel_InterpolateNN(T * sourceData,
float posX,
float posY,
float posZ){
return sourceData[(int)(__float2int_rn(posZ)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(posY)*cVolInfo.VolumeSize.x+__float2int_rn(posX))];
}
template <typename T>
__device__ T CUDAkernel_InterpolateTrilinear(T * sourceData,
float posX,
float posY,
float posZ){
float fracX=posX-(int)posX;
float fracY=posY-(int)posY;
float fracZ=posZ-(int)posZ;
float revX=1-fracX;
float revY=1-fracY;
float revZ=1-fracZ;
int base=(int)((int)(posZ)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+(int)(posY)*cVolInfo.VolumeSize.x+(int)(posX));
return ((T) (revX*(revY*(revZ* (sourceData)[(int)(base)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y)])+
fracY*(revZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+cVolInfo.VolumeSize.x)]))+
fracX*(revY*(revZ* (sourceData)[(int)(base+1)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+1)])+
fracY*(revZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x+1)]+
fracZ* (sourceData)[(int)(base+cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+cVolInfo.VolumeSize.x+1)])))
);
}
template <typename T>
__device__ T CUDAkernel_Interpolate(T * sourceData,
float posX,
float posY,
float posZ){
if(cRenInfo.interpolationMethod == 0){
return CUDAkernel_InterpolateNN(sourceData, posX, posY, posZ);
}else if(cRenInfo.interpolationMethod == 1){
return CUDAkernel_InterpolateTrilinear(sourceData, posX, posY, posZ);
}
return 0;
}
#include "CUDA_renderRayCastComposite.h"
#include "CUDA_renderRayCastCompositeShaded.h"
#include "CUDA_renderRayCastMIP.h"
#include "CUDA_renderRayCastIsosurface.h"
template <typename T>
__device__ void CUDAkernel_RayCast(cudaRendererInformation& renInfo,
cudaVolumeInformation& volInfo,
float* colorTF,
float* alphaTF,
float3* s_rayMap,
float2* s_minmaxTrace,
float3* s_clippingPoints,
int tempacc,
int xIndex,
int yIndex){
if(renInfo.rayCastingMethod==0){
CUDAkernel_RayCastIsosurface<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==1){
CUDAkernel_RayCastMIP<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==2){
CUDAkernel_RayCastComposite<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}else if(renInfo.rayCastingMethod==3){
CUDAkernel_RayCastCompositeShaded<T>(renInfo, volInfo, colorTF, alphaTF, s_rayMap, s_minmaxTrace, s_clippingPoints, tempacc, xIndex, yIndex);
}
}
__device__ void CUDAkernel_SetRayMapVolumeRendering(long int base, float3* rayMap, float3* clippingPoints, long int index, float* lensMap, int xIndex, int yIndex){
float3 start;
float3 end;
start.x=cRenInfo.CameraRayStart.x+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.x+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.x;
start.y=cRenInfo.CameraRayStart.y+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.y+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.y;
start.z=cRenInfo.CameraRayStart.z+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayStartX.z+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayStartY.z;
end.x=cRenInfo.CameraRayEnd.x+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.x+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.x;
end.y=cRenInfo.CameraRayEnd.y+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.y+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.y;
end.z=cRenInfo.CameraRayEnd.z+
(float)xIndex/(cRenInfo.ActualResolution.x-1)*cRenInfo.CameraRayEndX.z+
(float)yIndex/(cRenInfo.ActualResolution.y-1)*cRenInfo.CameraRayEndY.z;
rayMap[base*2].x=start.x;
rayMap[base*2].y=start.y;
rayMap[base*2].z=start.z;
rayMap[base*2+1].x=end.x-start.x;
rayMap[base*2+1].y=end.y-start.y;
rayMap[base*2+1].z=end.z-start.z;
rayMap[base*2]=MatMul(cVolInfo.Transform, rayMap[base*2]);
rayMap[base*2+1]=MatMul(cVolInfo.Transform, rayMap[base*2+1], 0.0f);
clippingPoints[base*2].x=rayMap[base*2].x;
clippingPoints[base*2].y=rayMap[base*2].y;
clippingPoints[base*2].z=rayMap[base*2].z;
clippingPoints[base*2+1].x=rayMap[base*2].x+rayMap[base*2+1].x;
clippingPoints[base*2+1].y=rayMap[base*2].y+rayMap[base*2+1].y;
clippingPoints[base*2+1].z=rayMap[base*2].z+rayMap[base*2+1].z;
float getmax = fabs(rayMap[base*2+1].x);
if(fabs(rayMap[base*2+1].y)>getmax) getmax = fabs(rayMap[base*2+1].y);
if(fabs(rayMap[base*2+1].z)>getmax) getmax = fabs(rayMap[base*2+1].z);
if(getmax!=0){
float temp= 1.0f/getmax;
rayMap[base*2+1].x*=temp;
rayMap[base*2+1].y*=temp;
rayMap[base*2+1].z*=temp;
}
}
__device__ void CUDAkernel_CalculateMinmax(long int tempacc, float3* rayMap, float2* minmax, int xIndex, int yIndex){
float test;
minmax[tempacc].x=-100000.0f;
minmax[tempacc].y=100000.0f;
if(rayMap[tempacc*2+1].x > 0){
minmax[tempacc].y = ( ((cVolInfo.maxROI.x-2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
minmax[tempacc].x = ( ((cVolInfo.minROI.x+2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
}
else if(rayMap[tempacc*2+1].x < 0){
minmax[tempacc].x = ( ((cVolInfo.maxROI.x-2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
minmax[tempacc].y = ( ((cVolInfo.minROI.x+2)-rayMap[tempacc*2].x)/rayMap[tempacc*2+1].x );
}
if(rayMap[tempacc*2+1].y > 0){
test = ( ((cVolInfo.maxROI.y-2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
test = ( ((cVolInfo.minROI.y+2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
}
else if(rayMap[tempacc*2+1].y < 0){
test = ( ((cVolInfo.maxROI.y-2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
test = ( ((cVolInfo.minROI.y+2)-rayMap[tempacc*2].y)/rayMap[tempacc*2+1].y );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
}
if(rayMap[tempacc*2+1].z > 0){
test = ( ((cVolInfo.maxROI.z-2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
test = ( ((cVolInfo.minROI.z+2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
}
else if(rayMap[tempacc*2+1].z < 0){
test = ( ((cVolInfo.maxROI.z-2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test > minmax[tempacc].x){
minmax[tempacc].x = test;
}
test = ( ((cVolInfo.minROI.z+2)-rayMap[tempacc*2].z)/rayMap[tempacc*2+1].z );
if( test < minmax[tempacc].y){
minmax[tempacc].y = test;
}
}
minmax[tempacc].x-=2;
minmax[tempacc].y+=2;
float3 zVec;
float3 normalVec;
float3 newOrigin;
zVec.x=0;
zVec.y=0;
zVec.z=1;
normalVec=MatMul(cVolInfo.SliceMatrix, zVec, 0.0f);
newOrigin=MatMul(cVolInfo.SliceMatrix, make_float3(0,0,0), 1.0f);
float3 transformedOrigin;
float3 transformedVector;
transformedVector=MatMul(cVolInfo.OrientationMatrix, rayMap[2*tempacc+1], 0.0f);
transformedOrigin=MatMul(cVolInfo.OrientationMatrix, rayMap[2*tempacc], 1.0f);
float length=sqrt(normalVec.x*normalVec.x+
normalVec.y*normalVec.y+
normalVec.z*normalVec.z);
normalVec.x/=length;
normalVec.y/=length;
normalVec.z/=length;
float3 relPos;
relPos.x=newOrigin.x-transformedOrigin.x;
relPos.y=newOrigin.y-transformedOrigin.y;
relPos.z=newOrigin.z-transformedOrigin.z;
float dot = (transformedVector.x*normalVec.x+
transformedVector.y*normalVec.y+
transformedVector.z*normalVec.z);
float unit=(relPos.x*normalVec.x+
relPos.y*normalVec.y+
relPos.z*normalVec.z)/dot;
if(dot>=0){
if(cVolInfo.VolumeRenderDirection == 1){
if(unit>minmax[tempacc].x)minmax[tempacc].x=unit;
}else if(cVolInfo.VolumeRenderDirection == 2){
if(unit<minmax[tempacc].y)minmax[tempacc].y=unit;
}
}else{
if(cVolInfo.VolumeRenderDirection == 1){
if(unit<minmax[tempacc].y)minmax[tempacc].y=unit;
}else if(cVolInfo.VolumeRenderDirection == 2){
if(unit>minmax[tempacc].x)minmax[tempacc].x=unit;
}
}
}
template <typename T>
__global__ void CUDAkernel_renderBase_calculateShadeField()
{
int xIndex = (blockDim.x*blockIdx.x + threadIdx.x) % (int)cVolInfo.VolumeSize.x;
int yIndex = (blockDim.x*blockIdx.x + threadIdx.x) / (int)cVolInfo.VolumeSize.x;
int zIndex = blockDim.y*blockIdx.y+ threadIdx.y;
long int index = (xIndex+yIndex*cVolInfo.VolumeSize.x+zIndex*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y);
float3 tempShade;
if(xIndex>0 && xIndex < cVolInfo.VolumeSize.x-1 && yIndex>0 && yIndex < cVolInfo.VolumeSize.y-1 && zIndex>0 && zIndex < cVolInfo.VolumeSize.z-1){
tempShade.x = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex+1))];
tempShade.y = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex+1)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.z = (float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex+1)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.x-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex-1))];
tempShade.y-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex-1)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
tempShade.z-=(float)((T*)cVolInfo.SourceData)[(int)(__float2int_rn(zIndex-1)*cVolInfo.VolumeSize.x*cVolInfo.VolumeSize.y+__float2int_rn(yIndex)*cVolInfo.VolumeSize.x+__float2int_rn(xIndex))];
}else if((xIndex==0 || xIndex == cVolInfo.VolumeSize.x-1) && (yIndex==0 || yIndex == cVolInfo.VolumeSize.y-1) && (zIndex==0 || zIndex == cVolInfo.VolumeSize.z-1)){
tempShade.x=0;
tempShade.y=0;
tempShade.z=0;
}else{
index=-1;
}
if(index!=-1){
float range=(cVolInfo.TypeRange[1]-cVolInfo.TypeRange[0])*2;
cVolInfo.shadeField[index].x=tempShade.x/range;
cVolInfo.shadeField[index].y=tempShade.y/range;
cVolInfo.shadeField[index].z=tempShade.z/range;
}
}
template <typename T>
__global__ void CUDAkernel_renderBase_doRendering()
{
int xIndex = blockDim.x *blockIdx.x + threadIdx.x;
int yIndex = blockDim.y *blockIdx.y + threadIdx.y;
__shared__ float2 s_minmaxTrace[BLOCK_DIM2D*BLOCK_DIM2D];
__shared__ float3 s_rayMap[BLOCK_DIM2D*BLOCK_DIM2D*2];
__shared__ float3 s_clippingPoints[BLOCK_DIM2D*BLOCK_DIM2D*2];
int tempacc=threadIdx.x+threadIdx.y*BLOCK_DIM2D;
__syncthreads();
long int index = (xIndex+yIndex*cRenInfo.ActualResolution.x)*4;
if(xIndex<cRenInfo.ActualResolution.x && yIndex <cRenInfo.ActualResolution.y){
CUDAkernel_SetRayMapVolumeRendering(tempacc, s_rayMap, s_clippingPoints, index, cRenInfo.LensMap, xIndex, yIndex);
CUDAkernel_CalculateMinmax(tempacc, s_rayMap, s_minmaxTrace, xIndex, yIndex);
CUDAkernel_RayCast<T>(cRenInfo,
cVolInfo,
colorTF,
alphaTF,
s_rayMap,
s_minmaxTrace,
s_clippingPoints,
tempacc,
xIndex, yIndex);
}
}
void CUDArenderBase_doRender(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo)
{
int blockX=(((int)renInfo.ActualResolution.x-1)/ BLOCK_DIM2D) + 1;
int blockY=(((int)renInfo.ActualResolution.y-1)/ BLOCK_DIM2D) + 1;
// setup execution parameters
dim3 grid(blockX, blockY, 1);
dim3 threads(BLOCK_DIM2D, BLOCK_DIM2D, 1);
blockX=((int)(volInfo.VolumeSize.x*volInfo.VolumeSize.y-1)/ BLOCK_DIM2D) + 1;
blockY=((int)(volInfo.VolumeSize.z-1)/ BLOCK_DIM2D) + 1;
dim3 grid2(blockX, blockY, 1);
dim3 threads2(BLOCK_DIM2D, BLOCK_DIM2D, 1);
// copy host memory to device
prepareShadeField(renInfo, volInfo);
CUDA_SAFE_CALL( cudaMemcpyToSymbol(cRenInfo, &renInfo, sizeof(cudaRendererInformation)));
CUDA_SAFE_CALL( cudaMemcpyToSymbol(cVolInfo, &volInfo, sizeof(cudaVolumeInformation)));
CUDA_SAFE_CALL( cudaMemcpyToSymbol(colorTF, volInfo.ColorTransferFunction, sizeof(float)*256*3));
CUDA_SAFE_CALL( cudaMemcpyToSymbol(alphaTF, volInfo.AlphaTransferFunction, sizeof(float)*256));
//execute the kernel
#define CALL_KERNEL_DO_RENDER(ID, TYPE) \
(ID==volInfo.InputDataType) \
CUDAkernel_renderBase_doRendering<TYPE><<<grid, threads>>>()
if CALL_KERNEL_DO_RENDER(VTK_CHAR, char);
else if CALL_KERNEL_DO_RENDER(VTK_CHAR, char);
else if CALL_KERNEL_DO_RENDER(VTK_UNSIGNED_CHAR, unsigned char);
else if CALL_KERNEL_DO_RENDER(VTK_SHORT, short);
else if CALL_KERNEL_DO_RENDER(VTK_UNSIGNED_SHORT, unsigned short);
else if CALL_KERNEL_DO_RENDER(VTK_INT, int);
else if CALL_KERNEL_DO_RENDER(VTK_FLOAT, float);
deleteShadeField(renInfo, volInfo);
CUT_CHECK_ERROR("Kernel execution failed");
return;
}
void prepareShadeField(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo){
CUDA_SAFE_CALL(cudaMalloc((void**)&volInfo.shadeField, (int)(volInfo.VolumeSize.x*volInfo.VolumeSize.y*volInfo.VolumeSize.z*sizeof(float3))));
}
void deleteShadeField(cudaRendererInformation& renInfo, cudaVolumeInformation& volInfo){
CUDA_SAFE_CALL( cudaFree(volInfo.shadeField));
}
|
0d5226119a6010e062817b062cd2f6a8b82fefad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <cstdlib>
using namespace std;
__global__ void verticalOperation(int size, float *deviceArray, float *deviceResult) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int blockSize = blockDim.x; //NOTE: this would also be the amount of values in shared memory (or it should be anyways)
//allocated shared memory to reduce global memory access overhead
extern __shared__ float sdata[];
//move each value from deviceArray pointer into shared_memory_array
sdata[threadIdx.x] = deviceArray[index];
__syncthreads();
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = blockSize; stride >= 1; stride /= 2) {
if (threadIdx.x < stride/2) {
if (sdata[threadIdx.x + (stride/2)] > sdata[threadIdx.x]) {
sdata[threadIdx.x] = sdata[threadIdx.x + (stride/2)];
}
}
__syncthreads();
}
if (threadIdx.x == 0) { deviceResult[blockIdx.x] = sdata[0]; }
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = gridDim.x; stride >= 1; stride /= 2) {
if (index < stride/2) {
if (deviceResult[index + (stride/2)] > deviceResult[index]) {
deviceResult[index] = deviceResult[index + (stride/2)];
}
}
}
}
void testVerticalOperation() {
int number_of_values = 1 << 18;
int memSize = number_of_values*sizeof(float);
int blockSize = 256;
int numBlocks = (number_of_values/blockSize)+1;
float *deviceValue, *deviceResult; //device copies
float initialValue[number_of_values], result[number_of_values]; //host copies
for (int x = 0; x < number_of_values; x++) {
initialValue[x] = 0.0f;
}
initialValue[2] = 500.0f;
initialValue[3] = 600.0f;
initialValue[66] = 998.0f;
initialValue[30000] = 1000.0f;
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
hipError_t cudaMallocErr1 = hipMalloc(&deviceValue, memSize);
if (cudaMallocErr1 != hipSuccess) {
cout << "CUDA Error" << endl;
}
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
hipError_t cudaMallocErr2 = hipMalloc(&deviceResult, memSize);
if (cudaMallocErr2 != hipSuccess) {
cout << "CUDA Error" << endl;
}
//copy memory to device from host and print error if found
hipError_t cudaMemcpy1Err = hipMemcpy(deviceValue, &initialValue, memSize, hipMemcpyHostToDevice);
if (cudaMemcpy1Err != hipSuccess) {
cout << "Memcpy to Device Error: " << cudaMemcpy1Err << endl;
}
hipLaunchKernelGGL(( verticalOperation), dim3(numBlocks), dim3(blockSize), memSize/blockSize, 0, number_of_values, deviceValue, deviceResult);
//Forces CPU to wait for GPU to finish before accessing
hipDeviceSynchronize();
//copy memory to host from device and print error if found
hipError_t cudaMemcpy2Err = hipMemcpy(&result, deviceResult, memSize, hipMemcpyDeviceToHost);
if (cudaMemcpy2Err != hipSuccess) {
cout << "Memcpy to Host Error: " << cudaMemcpy2Err << endl;
}
cout << result[0] << endl;
cout << "Done!" << endl;
// Free memory
hipFree(deviceValue);
hipFree(deviceResult);
}
int main() {
//Runs test for verticalOperation kernal on GPU
testVerticalOperation();
return 0;
}
|
0d5226119a6010e062817b062cd2f6a8b82fefad.cu
|
#include <iostream>
#include <stdio.h>
#include <cstdlib>
using namespace std;
__global__ void verticalOperation(int size, float *deviceArray, float *deviceResult) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int blockSize = blockDim.x; //NOTE: this would also be the amount of values in shared memory (or it should be anyways)
//allocated shared memory to reduce global memory access overhead
extern __shared__ float sdata[];
//move each value from deviceArray pointer into shared_memory_array
sdata[threadIdx.x] = deviceArray[index];
__syncthreads();
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = blockSize; stride >= 1; stride /= 2) {
if (threadIdx.x < stride/2) {
if (sdata[threadIdx.x + (stride/2)] > sdata[threadIdx.x]) {
sdata[threadIdx.x] = sdata[threadIdx.x + (stride/2)];
}
}
__syncthreads();
}
if (threadIdx.x == 0) { deviceResult[blockIdx.x] = sdata[0]; }
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = gridDim.x; stride >= 1; stride /= 2) {
if (index < stride/2) {
if (deviceResult[index + (stride/2)] > deviceResult[index]) {
deviceResult[index] = deviceResult[index + (stride/2)];
}
}
}
}
void testVerticalOperation() {
int number_of_values = 1 << 18;
int memSize = number_of_values*sizeof(float);
int blockSize = 256;
int numBlocks = (number_of_values/blockSize)+1;
float *deviceValue, *deviceResult; //device copies
float initialValue[number_of_values], result[number_of_values]; //host copies
for (int x = 0; x < number_of_values; x++) {
initialValue[x] = 0.0f;
}
initialValue[2] = 500.0f;
initialValue[3] = 600.0f;
initialValue[66] = 998.0f;
initialValue[30000] = 1000.0f;
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
cudaError_t cudaMallocErr1 = cudaMalloc(&deviceValue, memSize);
if (cudaMallocErr1 != cudaSuccess) {
cout << "CUDA Error" << endl;
}
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
cudaError_t cudaMallocErr2 = cudaMalloc(&deviceResult, memSize);
if (cudaMallocErr2 != cudaSuccess) {
cout << "CUDA Error" << endl;
}
//copy memory to device from host and print error if found
cudaError_t cudaMemcpy1Err = cudaMemcpy(deviceValue, &initialValue, memSize, cudaMemcpyHostToDevice);
if (cudaMemcpy1Err != cudaSuccess) {
cout << "Memcpy to Device Error: " << cudaMemcpy1Err << endl;
}
verticalOperation<<<numBlocks, blockSize, memSize/blockSize>>>(number_of_values, deviceValue, deviceResult);
//Forces CPU to wait for GPU to finish before accessing
cudaDeviceSynchronize();
//copy memory to host from device and print error if found
cudaError_t cudaMemcpy2Err = cudaMemcpy(&result, deviceResult, memSize, cudaMemcpyDeviceToHost);
if (cudaMemcpy2Err != cudaSuccess) {
cout << "Memcpy to Host Error: " << cudaMemcpy2Err << endl;
}
cout << result[0] << endl;
cout << "Done!" << endl;
// Free memory
cudaFree(deviceValue);
cudaFree(deviceResult);
}
int main() {
//Runs test for verticalOperation kernal on GPU
testVerticalOperation();
return 0;
}
|
77f5b7ecde95629f4516cb884302b74dfed73d92.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorCopy.hip"
#else
void THCTensor_(copy)(THCState* state, THCTensor* dst, THCTensor* src) {
if (dst == src) return;
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor src_wrap = THTensor_wrap(src);
at::native::copy_(dst_wrap, src_wrap);
}
template <>
THCTensor *THCTensor_newClone<scalar_t>(THCState *state, THCTensor *self) {
THCTensor* tensor =
// THCTensor_new(state, THTensor_getStoragePtr(self)->dtype());
THCTensor_new(state, self->dtype());
THCTensor_resizeAs(state, tensor, self);
at::Tensor tensor_wrap = THTensor_wrap(tensor);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(tensor_wrap, self_wrap);
return tensor;
}
template <>
THCTensor *THCTensor_newContiguous<scalar_t>(THCState *state, THCTensor *self)
{
if(!self->is_contiguous()) {
return THCTensor_newClone<scalar_t>(state, self);
} else {
THCTensor_retain(state, self);
return self;
}
}
template <>
void THCTensor_freeCopyTo<scalar_t>(THCState *state, THCTensor *self, THCTensor *dst) {
if(self != dst) {
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(dst_wrap, self_wrap);
}
THCTensor_free(state, self);
}
template <>
void THCTensor_copyIgnoringOverlaps<scalar_t>(THCState* state, THCTensor* dst, THCTensor* src) {
// Called when we are copying into an overlapping index `dst`, but
// we don't care which writer wins. Hacky but it works.
// This is itself invoked by pointwiseApply2 / THCTensor_copy in
// case that there are write overlaps.
// FIXME: really, overlapping writes should be illegal/an error in Torch
THC_pointwiseApply2<scalar_t, scalar_t>(
state, dst, src,
CopyOp<scalar_t>(),
ReadOnly, /* ignore overwrites */
ReadOnly);
}
void THCTensor_(copyIgnoringOverlaps)(THCState* state, THCTensor* dst, THCTensor* src) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, dst, src);
}
#endif
|
77f5b7ecde95629f4516cb884302b74dfed73d92.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorCopy.cu"
#else
void THCTensor_(copy)(THCState* state, THCTensor* dst, THCTensor* src) {
if (dst == src) return;
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor src_wrap = THTensor_wrap(src);
at::native::copy_(dst_wrap, src_wrap);
}
template <>
THCTensor *THCTensor_newClone<scalar_t>(THCState *state, THCTensor *self) {
THCTensor* tensor =
// THCTensor_new(state, THTensor_getStoragePtr(self)->dtype());
THCTensor_new(state, self->dtype());
THCTensor_resizeAs(state, tensor, self);
at::Tensor tensor_wrap = THTensor_wrap(tensor);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(tensor_wrap, self_wrap);
return tensor;
}
template <>
THCTensor *THCTensor_newContiguous<scalar_t>(THCState *state, THCTensor *self)
{
if(!self->is_contiguous()) {
return THCTensor_newClone<scalar_t>(state, self);
} else {
THCTensor_retain(state, self);
return self;
}
}
template <>
void THCTensor_freeCopyTo<scalar_t>(THCState *state, THCTensor *self, THCTensor *dst) {
if(self != dst) {
at::Tensor dst_wrap = THTensor_wrap(dst);
at::Tensor self_wrap = THTensor_wrap(self);
at::native::copy_(dst_wrap, self_wrap);
}
THCTensor_free(state, self);
}
template <>
void THCTensor_copyIgnoringOverlaps<scalar_t>(THCState* state, THCTensor* dst, THCTensor* src) {
// Called when we are copying into an overlapping index `dst`, but
// we don't care which writer wins. Hacky but it works.
// This is itself invoked by pointwiseApply2 / THCTensor_copy in
// case that there are write overlaps.
// FIXME: really, overlapping writes should be illegal/an error in Torch
THC_pointwiseApply2<scalar_t, scalar_t>(
state, dst, src,
CopyOp<scalar_t>(),
ReadOnly, /* ignore overwrites */
ReadOnly);
}
void THCTensor_(copyIgnoringOverlaps)(THCState* state, THCTensor* dst, THCTensor* src) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, dst, src);
}
#endif
|
63c15809c02eb70d307eefe770b16881bcc93614.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "recombiner.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *rands = NULL;
hipMalloc(&rands, XSIZE*YSIZE);
unsigned int *parents = NULL;
hipMalloc(&parents, XSIZE*YSIZE);
unsigned int parent_rows = 1;
unsigned int parent_cols = 1;
unsigned int *off = NULL;
hipMalloc(&off, XSIZE*YSIZE);
unsigned int cols = 1;
unsigned int seq_offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
recombiner), dim3(gridBlock),dim3(threadBlock), 0, 0, rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
recombiner), dim3(gridBlock),dim3(threadBlock), 0, 0, rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
recombiner), dim3(gridBlock),dim3(threadBlock), 0, 0, rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
63c15809c02eb70d307eefe770b16881bcc93614.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "recombiner.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *rands = NULL;
cudaMalloc(&rands, XSIZE*YSIZE);
unsigned int *parents = NULL;
cudaMalloc(&parents, XSIZE*YSIZE);
unsigned int parent_rows = 1;
unsigned int parent_cols = 1;
unsigned int *off = NULL;
cudaMalloc(&off, XSIZE*YSIZE);
unsigned int cols = 1;
unsigned int seq_offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
recombiner<<<gridBlock,threadBlock>>>(rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
recombiner<<<gridBlock,threadBlock>>>(rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
recombiner<<<gridBlock,threadBlock>>>(rands,parents,parent_rows,parent_cols,off,cols,seq_offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
44c5bc289a4f8b63d7202b24deeb94e7140ed200.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <unistd.h>
#include "mgOnGpuConfig.h"
#include "mgOnGpuTypes.h"
#ifdef __HIPCC__
#include "rambo.cc"
#else
#include "rambo.h"
#endif
#ifdef MGONGPU_COMMONRAND_ONHOST
#include "CommonRandomNumbers.h"
#endif
#include "gCPPProcess.h"
#include "timermap.h"
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return (int)strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p] [--json|-j]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl << std::endl;
std::cout << "The number of events per iteration is #gpuBlocksPerGrid * #gpuThreadsPerBlock" << std::endl;
std::cout << "(also in CPU/C++ code, where only the product of these two parameters counts)" << std::endl << std::endl;
std::cout << "Summary stats are always computed: '-p' and '-j' only control their printout" << std::endl;
std::cout << "The '-d' flag only controls if nan's emit warnings" << std::endl;
return ret;
}
#ifdef __HIPCC__
template<typename T = fptype>
struct CudaDevDeleter {
void operator()(T* mem) {
checkCuda( hipFree( mem ) );
}
};
template<typename T = fptype>
std::unique_ptr<T, CudaDevDeleter<T>> devMakeUnique(std::size_t N) {
T* tmp = nullptr;
checkCuda( hipMalloc( &tmp, N * sizeof(T) ) );
return std::unique_ptr<T, CudaDevDeleter<T>>{ tmp };
}
template<typename T = fptype>
struct CudaHstDeleter {
void operator()(T* mem) {
checkCuda( hipHostFree( mem ) );
}
};
template<typename T = fptype>
std::unique_ptr<T[], CudaHstDeleter<T>> hstMakeUnique(std::size_t N) {
T* tmp = nullptr;
checkCuda( hipHostMalloc( &tmp, N * sizeof(T) ) );
return std::unique_ptr<T[], CudaHstDeleter<T>>{ tmp };
};
#else
template<typename T = fptype>
std::unique_ptr<T[]> hstMakeUnique(std::size_t N) { return std::unique_ptr<T[]>{ new T[N] }; };
#endif
int main(int argc, char **argv)
{
// READ COMMAND LINE ARGUMENTS
bool verbose = false;
bool debug = false;
bool perf = false;
bool json = false;
int niter = 0;
int gpublocks = 1;
int gputhreads = 32;
int jsondate = 0;
int jsonrun = 0;
int numvec[5] = {0,0,0,0,0};
int nnum = 0;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (strcmp(argv[argn], "--json") == 0 ||
strcmp(argv[argn], "-j") == 0)
json = true;
else if (is_number(argv[argn]) && nnum<5)
numvec[nnum++] = atoi(argv[argn]);
else
return usage(argv[0]);
}
if (nnum == 3 || nnum == 5) {
gpublocks = numvec[0];
gputhreads = numvec[1];
niter = numvec[2];
if (nnum == 5){
jsondate = numvec[3];
jsonrun = numvec[4];
}
} else if (nnum == 1) {
niter = numvec[0];
} else {
return usage(argv[0]);
}
if (niter == 0)
return usage(argv[0]);
const int neppR = mgOnGpu::neppR; // ASA layout: constant at compile-time
if ( gputhreads%neppR != 0 )
{
std::cout << "ERROR! #threads/block should be a multiple of neppR=" << neppR << std::endl;
return usage(argv[0]);
}
const int neppM = mgOnGpu::neppM; // ASA layout: constant at compile-time
if ( gputhreads%neppM != 0 )
{
std::cout << "ERROR! #threads/block should be a multiple of neppM=" << neppM << std::endl;
return usage(argv[0]);
}
using mgOnGpu::ntpbMAX;
if ( gputhreads > ntpbMAX )
{
std::cout << "ERROR! #threads/block should be <= " << ntpbMAX << std::endl;
return usage(argv[0]);
}
const int ndim = gpublocks * gputhreads; // number of threads in one GPU grid
const int nevt = ndim; // number of events in one iteration == number of GPU threads
const int nevtALL = niter*nevt; // total number of ALL events in all iterations
if (verbose)
std::cout << "# iterations: " << niter << std::endl;
// *** START THE NEW TIMERS ***
mgOnGpu::TimerMap timermap;
// === STEP 0 - INITIALISE
#ifdef __HIPCC__
// --- 00. Initialise cuda (call hipFree to ease cuda profile analysis)
const std::string cdfrKey = "00 CudaFree";
timermap.start( cdfrKey );
//std::cout << "Calling hipFree... " << std::endl;
checkCuda( hipFree( 0 ) ); // SLOW!
//std::cout << "Calling hipFree... done" << std::endl;
// --- Book the tear down at the end of main:
struct CudaTearDown {
CudaTearDown(bool print) : _print(print) { }
~CudaTearDown() {
if ( _print ) std::cout << "Calling hipDeviceReset()." << std::endl;
checkCuda( hipDeviceReset() ); // this is needed by cuda-memcheck --leak-check full
}
bool _print{false};
} cudaTearDown(debug);
#endif
// --- 0a. Initialise physics process
const std::string procKey = "0a ProcInit";
timermap.start( procKey );
// Create a process object
#ifdef __HIPCC__
gProc::CPPProcess process( niter, gpublocks, gputhreads, verbose );
#else
Proc::CPPProcess process( niter, gpublocks, gputhreads, verbose );
#endif
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
const fptype energy = 1500; // historical default, Ecms = 1500 GeV = 1.5 TeV (above the Z peak)
//const fptype energy = 91.2; // Ecms = 91.2 GeV (Z peak)
//const fptype energy = 0.100; // Ecms = 100 MeV (well below the Z peak, pure em scattering)
const int meGeVexponent = -(2 * process.nexternal - 8);
// --- 0b. Allocate memory structures
const std::string alloKey = "0b MemAlloc";
timermap.start( alloKey );
// Memory structures for random numbers, momenta, matrix elements and weights on host and device
using mgOnGpu::np4;
using mgOnGpu::nparf;
using mgOnGpu::npar;
using mgOnGpu::ncomb; // Number of helicity combinations
const int nRnarray = np4*nparf*nevt; // (NB: ASA layout with nevt=npagR*neppR events per iteration)
const int nMomenta = np4*npar*nevt; // (NB: nevt=npagM*neppM for ASA layouts)
const int nWeights = nevt;
const int nMEs = nevt;
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_COMMONRAND_ONHOST or not defined __HIPCC__
auto hstRnarray = hstMakeUnique<fptype>( nRnarray ); // AOSOA[npagR][nparf][np4][neppR] (NB: nevt=npagR*neppR)
#endif
auto hstMomenta = hstMakeUnique<fptype>( nMomenta ); // AOSOA[npagM][npar][np4][neppM] (previously was: lp)
auto hstIsGoodHel = hstMakeUnique<bool >( ncomb );
auto hstWeights = hstMakeUnique<fptype>( nWeights ); // (previously was: meHostPtr)
auto hstMEs = hstMakeUnique<fptype>( nMEs ); // (previously was: meHostPtr)
#ifdef __HIPCC__
auto devRnarray = devMakeUnique<fptype>( nRnarray ); // AOSOA[npagR][nparf][np4][neppR] (NB: nevt=npagR*neppR)
auto devMomenta = devMakeUnique<fptype>( nMomenta ); // (previously was: allMomenta)
auto devIsGoodHel = devMakeUnique<bool >( ncomb );
auto devWeights = devMakeUnique<fptype>( nWeights ); // (previously was: meDevPtr)
auto devMEs = devMakeUnique<fptype>( nMEs ); // (previously was: meDevPtr)
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_COMMONRAND_ONHOST
const int nbytesRnarray = nRnarray * sizeof(fptype);
#endif
const int nbytesMomenta = nMomenta * sizeof(fptype);
const int nbytesIsGoodHel = ncomb * sizeof(bool);
const int nbytesWeights = nWeights * sizeof(fptype);
const int nbytesMEs = nMEs * sizeof(fptype);
#endif
std::unique_ptr<double[]> genrtimes( new double[niter] );
std::unique_ptr<double[]> rambtimes( new double[niter] );
std::unique_ptr<double[]> wavetimes( new double[niter] );
std::unique_ptr<fptype[]> matrixelementALL( new fptype[nevtALL] ); // FIXME: assume process.nprocesses == 1
std::unique_ptr<fptype[]> weightALL( new fptype[nevtALL] );
// --- 0c. Create hiprand or common generator
const std::string cgenKey = "0c GenCreat";
timermap.start( cgenKey );
#ifdef MGONGPU_COMMONRAND_ONHOST
std::vector<std::promise<std::vector<fptype>>> commonRandomPromises;
CommonRandomNumbers::startGenerateAsync(commonRandomPromises, nRnarray, niter);
#else
hiprandGenerator_t rnGen;
#ifdef __HIPCC__
grambo2toNm0::createGenerator( &rnGen );
#else
rambo2toNm0::createGenerator( &rnGen );
#endif
#endif
// **************************************
// *** START MAIN LOOP ON #ITERATIONS ***
// **************************************
for (int iiter = 0; iiter < niter; ++iiter)
{
//std::cout << "Iteration #" << iiter+1 << " of " << niter << std::endl;
// === STEP 1 OF 3
// *** START THE OLD-STYLE TIMER FOR RANDOM GEN ***
double genrtime = 0;
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_CURAND_ONDEVICE
// --- 1a. Seed hiprand generator (to get same results on host and device)
// [NB This should not be necessary using the host API: "Generation functions
// can be called multiple times on the same generator to generate successive
// blocks of results. For pseudorandom generators, multiple calls to generation
// functions will yield the same result as a single call with a large size."]
const std::string sgenKey = "1a GenSeed ";
timermap.start( sgenKey );
const unsigned long long seed = 20200805;
#ifdef __HIPCC__
grambo2toNm0::seedGenerator( rnGen, seed+iiter );
#else
rambo2toNm0::seedGenerator( rnGen, seed+iiter );
#endif
genrtime += timermap.stop();
#endif
// --- 1b. Generate all relevant numbers to build nevt events (i.e. nevt phase space points) on the host
const std::string rngnKey = "1b GenRnGen";
timermap.start( rngnKey );
#ifdef MGONGPU_COMMONRAND_ONHOST
std::vector<double> commonRnd = commonRandomPromises[iiter].get_future().get();
assert( nRnarray == static_cast<int>( commonRnd.size() ) );
// NB (PR #45): memcpy is strictly needed only in CUDA (copy to pinned memory), but keep it also in C++ for consistency
memcpy( hstRnarray.get(), commonRnd.data(), nRnarray * sizeof(hstRnarray[0]) );
#elif defined __HIPCC__
#ifdef MGONGPU_CURAND_ONDEVICE
grambo2toNm0::generateRnarray( rnGen, devRnarray.get(), nevt );
#elif defined MGONGPU_CURAND_ONHOST
grambo2toNm0::generateRnarray( rnGen, hstRnarray.get(), nevt );
#endif
#else
rambo2toNm0::generateRnarray( rnGen, hstRnarray.get(), nevt );
#endif
//std::cout << "Got random numbers" << std::endl;
#ifdef __HIPCC__
#ifndef MGONGPU_CURAND_ONDEVICE
// --- 1c. Copy rnarray from host to device
const std::string htodKey = "1c CpHTDrnd";
genrtime += timermap.start( htodKey );
// NB (PR #45): this hipMemcpy would involve an intermediate memcpy to pinned memory, if hstRnarray was not already hipMalloc'ed
checkCuda( hipMemcpy( devRnarray.get(), hstRnarray.get(), nbytesRnarray, hipMemcpyHostToDevice ) );
#endif
#endif
// *** STOP THE OLD-STYLE TIMER FOR RANDOM GEN ***
genrtime += timermap.stop();
// === STEP 2 OF 3
// Fill in particle momenta for each of nevt events on the device
// *** START THE OLD-STYLE TIMER FOR RAMBO ***
double rambtime = 0;
// --- 2a. Fill in momenta of initial state particles on the device
const std::string riniKey = "2a RamboIni";
timermap.start( riniKey );
#ifdef __HIPCC__
hipLaunchKernelGGL(( grambo2toNm0::getMomentaInitial), dim3(gpublocks), dim3(gputhreads), 0, 0, energy, devMomenta.get() );
#else
rambo2toNm0::getMomentaInitial( energy, hstMomenta.get(), nevt );
#endif
//std::cout << "Got initial momenta" << std::endl;
// --- 2b. Fill in momenta of final state particles using the RAMBO algorithm on the device
// (i.e. map random numbers to final-state particle momenta for each of nevt events)
const std::string rfinKey = "2b RamboFin";
rambtime += timermap.start( rfinKey );
#ifdef __HIPCC__
hipLaunchKernelGGL(( grambo2toNm0::getMomentaFinal), dim3(gpublocks), dim3(gputhreads), 0, 0, energy, devRnarray.get(), devMomenta.get(), devWeights.get() );
#else
rambo2toNm0::getMomentaFinal( energy, hstRnarray.get(), hstMomenta.get(), hstWeights.get(), nevt );
#endif
//std::cout << "Got final momenta" << std::endl;
#ifdef __HIPCC__
// --- 2c. CopyDToH Weights
const std::string cwgtKey = "2c CpDTHwgt";
rambtime += timermap.start( cwgtKey );
checkCuda( hipMemcpy( hstWeights.get(), devWeights.get(), nbytesWeights, hipMemcpyDeviceToHost ) );
// --- 2d. CopyDToH Momenta
const std::string cmomKey = "2d CpDTHmom";
rambtime += timermap.start( cmomKey );
checkCuda( hipMemcpy( hstMomenta.get(), devMomenta.get(), nbytesMomenta, hipMemcpyDeviceToHost ) );
#endif
// *** STOP THE OLD-STYLE TIMER FOR RAMBO ***
rambtime += timermap.stop();
// === STEP 3 OF 3
// Evaluate matrix elements for all nevt events
// 0d. (Only on the first iteration) Get good helicities [renamed as 0d: this is initialisation!]
// 3a. Evaluate MEs on the device
// 3b. Copy MEs back from device to host
// --- 0d. SGoodHel
#ifdef __HIPCC__
if ( iiter == 0 )
{
const std::string ghelKey = "0d SGoodHel";
timermap.start( ghelKey );
// ... 0d1. Compute good helicity mask on the device
hipLaunchKernelGGL(( gProc::sigmaKin_getGoodHel), dim3(gpublocks), dim3(gputhreads), 0, 0, devMomenta.get(), devIsGoodHel.get());
checkCuda( hipPeekAtLastError() );
// ... 0d2. Copy back good helicity mask to the host
checkCuda( hipMemcpy( hstIsGoodHel.get(), devIsGoodHel.get(), nbytesIsGoodHel, hipMemcpyDeviceToHost ) );
// ... 0d3. Copy back good helicity list to constant memory on the device
gProc::sigmaKin_setGoodHel(hstIsGoodHel.get());
}
#endif
// *** START THE OLD TIMER FOR MATRIX ELEMENTS (WAVEFUNCTIONS) ***
double wavetime = 0;
// --- 3a. SigmaKin
const std::string skinKey = "3a SigmaKin";
timermap.start( skinKey );
#ifdef __HIPCC__
#ifndef MGONGPU_NSIGHT_DEBUG
hipLaunchKernelGGL(( gProc::sigmaKin), dim3(gpublocks), dim3(gputhreads), 0, 0, devMomenta.get(), devMEs.get());
#else
hipLaunchKernelGGL(( gProc::sigmaKin), dim3(gpublocks), dim3(gputhreads), ntpbMAX*sizeof(float), 0, devMomenta.get(), devMEs.get());
#endif
checkCuda( hipPeekAtLastError() );
#else
Proc::sigmaKin(hstMomenta.get(), hstMEs.get(), nevt);
#endif
#ifdef __HIPCC__
// --- 3b. CopyDToH MEs
const std::string cmesKey = "3b CpDTHmes";
wavetime += timermap.start( cmesKey );
checkCuda( hipMemcpy( hstMEs.get(), devMEs.get(), nbytesMEs, hipMemcpyDeviceToHost ) );
#endif
// *** STOP THE OLD TIMER FOR MATRIX ELEMENTS (WAVEFUNCTIONS) ***
wavetime += timermap.stop();
// === STEP 4 FINALISE LOOP
// --- 4a Dump within the loop
const std::string loopKey = "4a DumpLoop";
timermap.start(loopKey);
genrtimes[iiter] = genrtime;
rambtimes[iiter] = rambtime;
wavetimes[iiter] = wavetime;
if (verbose)
{
std::cout << "***********************************************************************" << std::endl
<< "Iteration #" << iiter+1 << " of " << niter << std::endl;
if (perf) std::cout << "Wave function time: " << wavetime << std::endl;
}
for (int ievt = 0; ievt < nevt; ++ievt) // Loop over all events in this iteration
{
if (verbose)
{
// Display momenta
const int ipagM = ievt/neppM; // #eventpage in this iteration
const int ieppM = ievt%neppM; // #event in the current eventpage in this iteration
std::cout << "Momenta:" << std::endl;
for (int ipar = 0; ipar < npar; ipar++)
{
// NB: 'setw' affects only the next field (of any type)
std::cout << std::scientific // fixed format: affects all floats (default precision: 6)
<< std::setw(4) << ipar + 1
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 0*neppM + ieppM] // AOSOA[ipagM][ipar][0][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 1*neppM + ieppM] // AOSOA[ipagM][ipar][1][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 2*neppM + ieppM] // AOSOA[ipagM][ipar][2][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 3*neppM + ieppM] // AOSOA[ipagM][ipar][3][ieppM]
<< std::endl
<< std::defaultfloat; // default format: affects all floats
}
std::cout << std::string(80, '-') << std::endl;
// Display matrix elements
std::cout << " Matrix element = "
<< hstMEs[ievt] << " GeV^" << meGeVexponent << std::endl; // FIXME: assume process.nprocesses == 1
std::cout << std::string(80, '-') << std::endl;
}
// Fill the arrays with ALL MEs and weights
matrixelementALL[iiter*nevt + ievt] = hstMEs[ievt]; // FIXME: assume process.nprocesses == 1
weightALL[iiter*nevt + ievt] = hstWeights[ievt];
}
if (!(verbose || debug || perf))
{
std::cout << ".";
}
}
// **************************************
// *** END MAIN LOOP ON #ITERATIONS ***
// **************************************
// === STEP 8 ANALYSIS
// --- 8a Analysis: compute stats after the loop
const std::string statKey = "8a CompStat";
timermap.start(statKey);
double sumgtim = 0;
double sqsgtim = 0;
double mingtim = genrtimes[0];
double maxgtim = genrtimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumgtim += genrtimes[iiter];
sqsgtim += genrtimes[iiter]*genrtimes[iiter];
mingtim = ::min( mingtim, genrtimes[iiter] );
maxgtim = ::max( maxgtim, genrtimes[iiter] );
}
double sumrtim = 0;
double sqsrtim = 0;
double minrtim = rambtimes[0];
double maxrtim = rambtimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumrtim += rambtimes[iiter];
sqsrtim += rambtimes[iiter]*rambtimes[iiter];
minrtim = ::min( minrtim, rambtimes[iiter] );
maxrtim = ::max( maxrtim, rambtimes[iiter] );
}
double sumwtim = 0;
double sqswtim = 0;
double minwtim = wavetimes[0];
double maxwtim = wavetimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumwtim += wavetimes[iiter];
sqswtim += wavetimes[iiter]*wavetimes[iiter];
minwtim = ::min( minwtim, wavetimes[iiter] );
maxwtim = ::max( maxwtim, wavetimes[iiter] );
}
double meanwtim = sumwtim / niter;
double stdwtim = std::sqrt( sqswtim / niter - meanwtim * meanwtim );
int nnan = 0;
double minelem = matrixelementALL[0];
double maxelem = matrixelementALL[0];
double minweig = weightALL[0];
double maxweig = weightALL[0];
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute min/max
if ( std::isnan( matrixelementALL[ievtALL] ) )
{
if ( debug ) // only printed out with "-p -d" (matrixelementALL is not filled without -p)
std::cout << "WARNING! ME[" << ievtALL << "} is nan" << std::endl;
nnan++;
continue;
}
minelem = ::min( minelem, (double)matrixelementALL[ievtALL] );
maxelem = ::max( maxelem, (double)matrixelementALL[ievtALL] );
minweig = ::min( minweig, (double)weightALL[ievtALL] );
maxweig = ::max( maxweig, (double)weightALL[ievtALL] );
}
double sumelemdiff = 0;
double sumweigdiff = 0;
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute mean from the sum of diff to min
if ( std::isnan( matrixelementALL[ievtALL] ) ) continue;
sumelemdiff += ( matrixelementALL[ievtALL] - minelem );
sumweigdiff += ( weightALL[ievtALL] - minweig );
}
double meanelem = minelem + sumelemdiff / ( nevtALL - nnan );
double meanweig = minweig + sumweigdiff / ( nevtALL - nnan );
double sqselemdiff = 0;
double sqsweigdiff = 0;
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute stddev from the squared sum of diff to mean
if ( std::isnan( matrixelementALL[ievtALL] ) ) continue;
sqselemdiff += ::pow( matrixelementALL[ievtALL] - meanelem, 2 );
sqsweigdiff += ::pow( weightALL[ievtALL] - meanweig, 2 );
}
double stdelem = std::sqrt( sqselemdiff / ( nevtALL - nnan ) );
double stdweig = std::sqrt( sqsweigdiff / ( nevtALL - nnan ) );
// === STEP 9 FINALISE
// --- 9a. Destroy hiprand generator
const std::string dgenKey = "9a GenDestr";
timermap.start( dgenKey );
#ifndef MGONGPU_COMMONRAND_ONHOST
#ifdef __HIPCC__
grambo2toNm0::destroyGenerator( rnGen );
#else
rambo2toNm0::destroyGenerator( rnGen );
#endif
#endif
// --- 9b Dump to screen
const std::string dumpKey = "9b DumpScrn";
timermap.start(dumpKey);
if (!(verbose || debug || perf))
{
std::cout << std::endl;
}
if (perf)
{
std::cout << "***********************************************************************" << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumIterations = " << niter << std::endl
<< "-----------------------------------------------------------------------" << std::endl
#if defined MGONGPU_FPTYPE_DOUBLE
<< "FP precision = DOUBLE (nan=" << nnan << ")" << std::endl
#elif defined MGONGPU_FPTYPE_FLOAT
<< "FP precision = FLOAT (nan=" << nnan << ")" << std::endl
#endif
#ifdef __HIPCC__
#if defined MGONGPU_CXTYPE_CUCOMPLEX
<< "Complex type = CUCOMPLEX" << std::endl
#elif defined MGONGPU_CXTYPE_THRUST
<< "Complex type = THRUST::COMPLEX" << std::endl
#endif
#else
<< "Complex type = STD::COMPLEX" << std::endl
#endif
<< "RanNumb memory layout = AOSOA[" << neppR << "]"
<< ( neppR == 1 ? " == AOS" : "" ) << std::endl
<< "Momenta memory layout = AOSOA[" << neppM << "]"
<< ( neppM == 1 ? " == AOS" : "" ) << std::endl
#ifdef __HIPCC__
<< "Wavefunction GPU memory = LOCAL" << std::endl
#endif
#ifdef __HIPCC__
#if defined MGONGPU_COMMONRAND_ONHOST
<< "Random number generation = COMMON RANDOM HOST (CUDA code)" << std::endl
#elif defined MGONGPU_CURAND_ONDEVICE
<< "Random number generation = CURAND DEVICE (CUDA code)" << std::endl
#elif defined MGONGPU_CURAND_ONHOST
<< "Random number generation = CURAND HOST (CUDA code)" << std::endl
#endif
#else
#if defined MGONGPU_COMMONRAND_ONHOST
<< "Random number generation = COMMON RANDOM (C++ code)" << std::endl
#else
<< "Random number generation = CURAND (C++ code)" << std::endl
#endif
#endif
<< "-----------------------------------------------------------------------" << std::endl
<< "NumberOfEntries = " << niter << std::endl
<< std::scientific // fixed format: affects all floats (default precision: 6)
<< "TotalTime[Rnd+Rmb+ME] (123)= ( " << sumgtim+sumrtim+sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[Rambo+ME] (23)= ( " << sumrtim+sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[RndNumGen] (1)= ( " << sumgtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[Rambo] (2)= ( " << sumrtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[MatrixElems] (3)= ( " << sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "MeanTimeInMatrixElems = ( " << meanwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "[Min,Max]TimeInMatrixElems = [ " << minwtim
<< " , " << maxwtim << " ] sec" << std::endl
//<< "StdDevTimeInWaveFuncs = ( " << stdwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "-----------------------------------------------------------------------" << std::endl
//<< "ProcessID: = " << getpid() << std::endl
//<< "NProcesses = " << process.nprocesses << std::endl
<< "TotalEventsComputed = " << nevtALL << std::endl
<< "EvtsPerSec[Rnd+Rmb+ME](123)= ( " << nevtALL/(sumgtim+sumrtim+sumwtim)
<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< "EvtsPerSec[Rmb+ME] (23)= ( " << nevtALL/(sumrtim+sumwtim)
<< std::string(16, ' ') << " ) sec^-1" << std::endl
//<< "EvtsPerSec[RndNumbGen] (1)= ( " << nevtALL/sumgtim
//<< std::string(16, ' ') << " ) sec^-1" << std::endl
//<< "EvtsPerSec[Rambo] (2)= ( " << nevtALL/sumrtim
//<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< "EvtsPerSec[MatrixElems] (3)= ( " << nevtALL/sumwtim
<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< std::defaultfloat; // default format: affects all floats
std::cout << "***********************************************************************" << std::endl
<< "NumMatrixElements(notNan) = " << nevtALL - nnan << std::endl
<< std::scientific // fixed format: affects all floats (default precision: 6)
<< "MeanMatrixElemValue = ( " << meanelem
<< " +- " << stdelem/sqrt(nevtALL - nnan) << " ) GeV^" << meGeVexponent << std::endl // standard error
<< "[Min,Max]MatrixElemValue = [ " << minelem
<< " , " << maxelem << " ] GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = ( " << stdelem << std::string(16, ' ') << " ) GeV^" << meGeVexponent << std::endl
<< "MeanWeight = ( " << meanweig
<< " +- " << stdweig/sqrt(nevtALL - nnan) << " )" << std::endl // standard error
<< "[Min,Max]Weight = [ " << minweig
<< " , " << maxweig << " ]" << std::endl
<< "StdDevWeight = ( " << stdweig << std::string(16, ' ') << " )" << std::endl
<< std::defaultfloat; // default format: affects all floats
}
// --- 9c Dump to json
const std::string jsonKey = "9c DumpJson";
timermap.start(jsonKey);
if(json)
{
std::string jsonFileName = std::to_string(jsondate) + "-perf-test-run" + std::to_string(jsonrun) + ".json";
jsonFileName = "./perf/data/" + jsonFileName;
//Checks if file exists
std::ifstream fileCheck;
bool fileExists = false;
fileCheck.open(jsonFileName);
if(fileCheck){
fileExists = true;
fileCheck.close();
}
std::ofstream jsonFile;
jsonFile.open(jsonFileName, std::ios_base::app);
if(!fileExists){
jsonFile << "[" << std::endl;
}
else{
//deleting the last bracket and outputting a ", "
std::string temp = "truncate -s-1 " + jsonFileName;
const char *command = temp.c_str();
system(command);
jsonFile << ", " << std::endl;
}
jsonFile << "{" << std::endl
<< "\"NumIterations\": " << niter << ", " << std::endl
<< "\"NumThreadsPerBlock\": " << gputhreads << ", " << std::endl
<< "\"NumBlocksPerGrid\": " << gpublocks << ", " << std::endl
#if defined MGONGPU_FPTYPE_DOUBLE
<< "\"FP precision\": "
<< "\"DOUBLE (nan=" << nnan << ")\"," << std::endl
#elif defined MGONGPU_FPTYPE_FLOAT
<< "\"FP precision\": " << "FLOAT (nan=" << nnan << ")," << std::endl
#endif
<< "\"Complex type\": "
#ifdef __HIPCC__
#if defined MGONGPU_CXTYPE_CUCOMPLEX
<< "\"CUCOMPLEX\"," << std::endl
#elif defined MGONGPU_CXTYPE_THRUST
<< "\"THRUST::COMPLEX\"," << std::endl
#endif
#else
<< "\"STD::COMPLEX\"," << std::endl
#endif
<< "\"RanNumb memory layout\": " << "\"AOSOA[" << neppR << "]\""
<< ( neppR == 1 ? " == AOS" : "" ) << ", " << std::endl
<< "\"Momenta memory layout\": " << "\"AOSOA[" << neppM << "]\""
<< ( neppM == 1 ? " == AOS" : "" ) << ", " << std::endl
#ifdef __HIPCC__
<< "\"Wavefunction GPU memory\": " << "\"LOCAL\"," << std::endl
#endif
<< "\"Curand generation\": "
#ifdef __HIPCC__
#if defined MGONGPU_CURAND_ONDEVICE
<< "\"DEVICE (CUDA code)\"," << std::endl
#elif defined MGONGPU_CURAND_ONHOST
<< "\"HOST (CUDA code)\"," << std::endl
#endif
#else
<< "\"HOST (C++ code)\"," << std::endl
#endif
<< "\"NumberOfEntries\": " << niter << "," << std::endl
//<< std::scientific // Not sure about this
<< "\"TotalTimeInWaveFuncs\": "
<< "\"" << std::to_string(sumwtim) << " sec\"," << std::endl
<< "\"MeanTimeInWaveFuncs\": "
<< "\"" << std::to_string(meanwtim) << " sec\"," << std::endl
<< "\"StdDevTimeInWaveFuncs\": "
<< "\"" << std::to_string(stdwtim) << " sec\"," << std::endl
<< "\"MinTimeInWaveFuncs\": "
<< "\"" << std::to_string(minwtim) << " sec\"," << std::endl
<< "\"MaxTimeInWaveFuncs\": "
<< "\"" << std::to_string(maxwtim) << " sec\"," << std::endl
//<< "ProcessID: = " << getpid() << std::endl
//<< "NProcesses = " << process.nprocesses << std::endl
<< "\"TotalEventsComputed\": " << nevtALL << "," << std::endl
<< "\"RamboEventsPerSec\": "
<< "\"" << std::to_string(nevtALL/sumrtim) << " sec^-1\"," << std::endl
<< "\"MatrixElemEventsPerSec\": "
<< "\"" << std::to_string(nevtALL/sumwtim) << " sec^-1\"," << std::endl
<< "\"NumMatrixElements(notNan)\": " << nevtALL - nnan << "," << std::endl
<< std::scientific
<< "\"MeanMatrixElemValue\": "
<< "\"" << std::to_string(meanelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"StdErrMatrixElemValue\": "
<< "\"" << std::to_string(stdelem/sqrt(nevtALL)) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"StdDevMatrixElemValue\": "
<< "\"" << std::to_string(stdelem)
<< " GeV^" << std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"MinMatrixElemValue\": "
<< "\"" << std::to_string(minelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"MaxMatrixElemValue\": "
<< "\"" << std::to_string(maxelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl;
timermap.dump(jsonFile, true); // NB For the active json timer this dumps a partial total
jsonFile << "}" << std::endl << "]";
jsonFile.close();
}
// *** STOP THE NEW TIMERS ***
timermap.stop();
if (perf)
{
std::cout << "***********************************************************************" << std::endl;
timermap.dump();
std::cout << "***********************************************************************" << std::endl;
}
//std::cout << "ALL OK" << std::endl;
}
|
44c5bc289a4f8b63d7202b24deeb94e7140ed200.cu
|
#include <algorithm>
#include <cmath>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include <unistd.h>
#include "mgOnGpuConfig.h"
#include "mgOnGpuTypes.h"
#ifdef __CUDACC__
#include "rambo.cc"
#else
#include "rambo.h"
#endif
#ifdef MGONGPU_COMMONRAND_ONHOST
#include "CommonRandomNumbers.h"
#endif
#include "gCPPProcess.h"
#include "timermap.h"
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return (int)strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p] [--json|-j]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl << std::endl;
std::cout << "The number of events per iteration is #gpuBlocksPerGrid * #gpuThreadsPerBlock" << std::endl;
std::cout << "(also in CPU/C++ code, where only the product of these two parameters counts)" << std::endl << std::endl;
std::cout << "Summary stats are always computed: '-p' and '-j' only control their printout" << std::endl;
std::cout << "The '-d' flag only controls if nan's emit warnings" << std::endl;
return ret;
}
#ifdef __CUDACC__
template<typename T = fptype>
struct CudaDevDeleter {
void operator()(T* mem) {
checkCuda( cudaFree( mem ) );
}
};
template<typename T = fptype>
std::unique_ptr<T, CudaDevDeleter<T>> devMakeUnique(std::size_t N) {
T* tmp = nullptr;
checkCuda( cudaMalloc( &tmp, N * sizeof(T) ) );
return std::unique_ptr<T, CudaDevDeleter<T>>{ tmp };
}
template<typename T = fptype>
struct CudaHstDeleter {
void operator()(T* mem) {
checkCuda( cudaFreeHost( mem ) );
}
};
template<typename T = fptype>
std::unique_ptr<T[], CudaHstDeleter<T>> hstMakeUnique(std::size_t N) {
T* tmp = nullptr;
checkCuda( cudaMallocHost( &tmp, N * sizeof(T) ) );
return std::unique_ptr<T[], CudaHstDeleter<T>>{ tmp };
};
#else
template<typename T = fptype>
std::unique_ptr<T[]> hstMakeUnique(std::size_t N) { return std::unique_ptr<T[]>{ new T[N] }; };
#endif
int main(int argc, char **argv)
{
// READ COMMAND LINE ARGUMENTS
bool verbose = false;
bool debug = false;
bool perf = false;
bool json = false;
int niter = 0;
int gpublocks = 1;
int gputhreads = 32;
int jsondate = 0;
int jsonrun = 0;
int numvec[5] = {0,0,0,0,0};
int nnum = 0;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (strcmp(argv[argn], "--json") == 0 ||
strcmp(argv[argn], "-j") == 0)
json = true;
else if (is_number(argv[argn]) && nnum<5)
numvec[nnum++] = atoi(argv[argn]);
else
return usage(argv[0]);
}
if (nnum == 3 || nnum == 5) {
gpublocks = numvec[0];
gputhreads = numvec[1];
niter = numvec[2];
if (nnum == 5){
jsondate = numvec[3];
jsonrun = numvec[4];
}
} else if (nnum == 1) {
niter = numvec[0];
} else {
return usage(argv[0]);
}
if (niter == 0)
return usage(argv[0]);
const int neppR = mgOnGpu::neppR; // ASA layout: constant at compile-time
if ( gputhreads%neppR != 0 )
{
std::cout << "ERROR! #threads/block should be a multiple of neppR=" << neppR << std::endl;
return usage(argv[0]);
}
const int neppM = mgOnGpu::neppM; // ASA layout: constant at compile-time
if ( gputhreads%neppM != 0 )
{
std::cout << "ERROR! #threads/block should be a multiple of neppM=" << neppM << std::endl;
return usage(argv[0]);
}
using mgOnGpu::ntpbMAX;
if ( gputhreads > ntpbMAX )
{
std::cout << "ERROR! #threads/block should be <= " << ntpbMAX << std::endl;
return usage(argv[0]);
}
const int ndim = gpublocks * gputhreads; // number of threads in one GPU grid
const int nevt = ndim; // number of events in one iteration == number of GPU threads
const int nevtALL = niter*nevt; // total number of ALL events in all iterations
if (verbose)
std::cout << "# iterations: " << niter << std::endl;
// *** START THE NEW TIMERS ***
mgOnGpu::TimerMap timermap;
// === STEP 0 - INITIALISE
#ifdef __CUDACC__
// --- 00. Initialise cuda (call cudaFree to ease cuda profile analysis)
const std::string cdfrKey = "00 CudaFree";
timermap.start( cdfrKey );
//std::cout << "Calling cudaFree... " << std::endl;
checkCuda( cudaFree( 0 ) ); // SLOW!
//std::cout << "Calling cudaFree... done" << std::endl;
// --- Book the tear down at the end of main:
struct CudaTearDown {
CudaTearDown(bool print) : _print(print) { }
~CudaTearDown() {
if ( _print ) std::cout << "Calling cudaDeviceReset()." << std::endl;
checkCuda( cudaDeviceReset() ); // this is needed by cuda-memcheck --leak-check full
}
bool _print{false};
} cudaTearDown(debug);
#endif
// --- 0a. Initialise physics process
const std::string procKey = "0a ProcInit";
timermap.start( procKey );
// Create a process object
#ifdef __CUDACC__
gProc::CPPProcess process( niter, gpublocks, gputhreads, verbose );
#else
Proc::CPPProcess process( niter, gpublocks, gputhreads, verbose );
#endif
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
const fptype energy = 1500; // historical default, Ecms = 1500 GeV = 1.5 TeV (above the Z peak)
//const fptype energy = 91.2; // Ecms = 91.2 GeV (Z peak)
//const fptype energy = 0.100; // Ecms = 100 MeV (well below the Z peak, pure em scattering)
const int meGeVexponent = -(2 * process.nexternal - 8);
// --- 0b. Allocate memory structures
const std::string alloKey = "0b MemAlloc";
timermap.start( alloKey );
// Memory structures for random numbers, momenta, matrix elements and weights on host and device
using mgOnGpu::np4;
using mgOnGpu::nparf;
using mgOnGpu::npar;
using mgOnGpu::ncomb; // Number of helicity combinations
const int nRnarray = np4*nparf*nevt; // (NB: ASA layout with nevt=npagR*neppR events per iteration)
const int nMomenta = np4*npar*nevt; // (NB: nevt=npagM*neppM for ASA layouts)
const int nWeights = nevt;
const int nMEs = nevt;
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_COMMONRAND_ONHOST or not defined __CUDACC__
auto hstRnarray = hstMakeUnique<fptype>( nRnarray ); // AOSOA[npagR][nparf][np4][neppR] (NB: nevt=npagR*neppR)
#endif
auto hstMomenta = hstMakeUnique<fptype>( nMomenta ); // AOSOA[npagM][npar][np4][neppM] (previously was: lp)
auto hstIsGoodHel = hstMakeUnique<bool >( ncomb );
auto hstWeights = hstMakeUnique<fptype>( nWeights ); // (previously was: meHostPtr)
auto hstMEs = hstMakeUnique<fptype>( nMEs ); // (previously was: meHostPtr)
#ifdef __CUDACC__
auto devRnarray = devMakeUnique<fptype>( nRnarray ); // AOSOA[npagR][nparf][np4][neppR] (NB: nevt=npagR*neppR)
auto devMomenta = devMakeUnique<fptype>( nMomenta ); // (previously was: allMomenta)
auto devIsGoodHel = devMakeUnique<bool >( ncomb );
auto devWeights = devMakeUnique<fptype>( nWeights ); // (previously was: meDevPtr)
auto devMEs = devMakeUnique<fptype>( nMEs ); // (previously was: meDevPtr)
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_COMMONRAND_ONHOST
const int nbytesRnarray = nRnarray * sizeof(fptype);
#endif
const int nbytesMomenta = nMomenta * sizeof(fptype);
const int nbytesIsGoodHel = ncomb * sizeof(bool);
const int nbytesWeights = nWeights * sizeof(fptype);
const int nbytesMEs = nMEs * sizeof(fptype);
#endif
std::unique_ptr<double[]> genrtimes( new double[niter] );
std::unique_ptr<double[]> rambtimes( new double[niter] );
std::unique_ptr<double[]> wavetimes( new double[niter] );
std::unique_ptr<fptype[]> matrixelementALL( new fptype[nevtALL] ); // FIXME: assume process.nprocesses == 1
std::unique_ptr<fptype[]> weightALL( new fptype[nevtALL] );
// --- 0c. Create curand or common generator
const std::string cgenKey = "0c GenCreat";
timermap.start( cgenKey );
#ifdef MGONGPU_COMMONRAND_ONHOST
std::vector<std::promise<std::vector<fptype>>> commonRandomPromises;
CommonRandomNumbers::startGenerateAsync(commonRandomPromises, nRnarray, niter);
#else
curandGenerator_t rnGen;
#ifdef __CUDACC__
grambo2toNm0::createGenerator( &rnGen );
#else
rambo2toNm0::createGenerator( &rnGen );
#endif
#endif
// **************************************
// *** START MAIN LOOP ON #ITERATIONS ***
// **************************************
for (int iiter = 0; iiter < niter; ++iiter)
{
//std::cout << "Iteration #" << iiter+1 << " of " << niter << std::endl;
// === STEP 1 OF 3
// *** START THE OLD-STYLE TIMER FOR RANDOM GEN ***
double genrtime = 0;
#if defined MGONGPU_CURAND_ONHOST or defined MGONGPU_CURAND_ONDEVICE
// --- 1a. Seed curand generator (to get same results on host and device)
// [NB This should not be necessary using the host API: "Generation functions
// can be called multiple times on the same generator to generate successive
// blocks of results. For pseudorandom generators, multiple calls to generation
// functions will yield the same result as a single call with a large size."]
const std::string sgenKey = "1a GenSeed ";
timermap.start( sgenKey );
const unsigned long long seed = 20200805;
#ifdef __CUDACC__
grambo2toNm0::seedGenerator( rnGen, seed+iiter );
#else
rambo2toNm0::seedGenerator( rnGen, seed+iiter );
#endif
genrtime += timermap.stop();
#endif
// --- 1b. Generate all relevant numbers to build nevt events (i.e. nevt phase space points) on the host
const std::string rngnKey = "1b GenRnGen";
timermap.start( rngnKey );
#ifdef MGONGPU_COMMONRAND_ONHOST
std::vector<double> commonRnd = commonRandomPromises[iiter].get_future().get();
assert( nRnarray == static_cast<int>( commonRnd.size() ) );
// NB (PR #45): memcpy is strictly needed only in CUDA (copy to pinned memory), but keep it also in C++ for consistency
memcpy( hstRnarray.get(), commonRnd.data(), nRnarray * sizeof(hstRnarray[0]) );
#elif defined __CUDACC__
#ifdef MGONGPU_CURAND_ONDEVICE
grambo2toNm0::generateRnarray( rnGen, devRnarray.get(), nevt );
#elif defined MGONGPU_CURAND_ONHOST
grambo2toNm0::generateRnarray( rnGen, hstRnarray.get(), nevt );
#endif
#else
rambo2toNm0::generateRnarray( rnGen, hstRnarray.get(), nevt );
#endif
//std::cout << "Got random numbers" << std::endl;
#ifdef __CUDACC__
#ifndef MGONGPU_CURAND_ONDEVICE
// --- 1c. Copy rnarray from host to device
const std::string htodKey = "1c CpHTDrnd";
genrtime += timermap.start( htodKey );
// NB (PR #45): this cudaMemcpy would involve an intermediate memcpy to pinned memory, if hstRnarray was not already cudaMalloc'ed
checkCuda( cudaMemcpy( devRnarray.get(), hstRnarray.get(), nbytesRnarray, cudaMemcpyHostToDevice ) );
#endif
#endif
// *** STOP THE OLD-STYLE TIMER FOR RANDOM GEN ***
genrtime += timermap.stop();
// === STEP 2 OF 3
// Fill in particle momenta for each of nevt events on the device
// *** START THE OLD-STYLE TIMER FOR RAMBO ***
double rambtime = 0;
// --- 2a. Fill in momenta of initial state particles on the device
const std::string riniKey = "2a RamboIni";
timermap.start( riniKey );
#ifdef __CUDACC__
grambo2toNm0::getMomentaInitial<<<gpublocks, gputhreads>>>( energy, devMomenta.get() );
#else
rambo2toNm0::getMomentaInitial( energy, hstMomenta.get(), nevt );
#endif
//std::cout << "Got initial momenta" << std::endl;
// --- 2b. Fill in momenta of final state particles using the RAMBO algorithm on the device
// (i.e. map random numbers to final-state particle momenta for each of nevt events)
const std::string rfinKey = "2b RamboFin";
rambtime += timermap.start( rfinKey );
#ifdef __CUDACC__
grambo2toNm0::getMomentaFinal<<<gpublocks, gputhreads>>>( energy, devRnarray.get(), devMomenta.get(), devWeights.get() );
#else
rambo2toNm0::getMomentaFinal( energy, hstRnarray.get(), hstMomenta.get(), hstWeights.get(), nevt );
#endif
//std::cout << "Got final momenta" << std::endl;
#ifdef __CUDACC__
// --- 2c. CopyDToH Weights
const std::string cwgtKey = "2c CpDTHwgt";
rambtime += timermap.start( cwgtKey );
checkCuda( cudaMemcpy( hstWeights.get(), devWeights.get(), nbytesWeights, cudaMemcpyDeviceToHost ) );
// --- 2d. CopyDToH Momenta
const std::string cmomKey = "2d CpDTHmom";
rambtime += timermap.start( cmomKey );
checkCuda( cudaMemcpy( hstMomenta.get(), devMomenta.get(), nbytesMomenta, cudaMemcpyDeviceToHost ) );
#endif
// *** STOP THE OLD-STYLE TIMER FOR RAMBO ***
rambtime += timermap.stop();
// === STEP 3 OF 3
// Evaluate matrix elements for all nevt events
// 0d. (Only on the first iteration) Get good helicities [renamed as 0d: this is initialisation!]
// 3a. Evaluate MEs on the device
// 3b. Copy MEs back from device to host
// --- 0d. SGoodHel
#ifdef __CUDACC__
if ( iiter == 0 )
{
const std::string ghelKey = "0d SGoodHel";
timermap.start( ghelKey );
// ... 0d1. Compute good helicity mask on the device
gProc::sigmaKin_getGoodHel<<<gpublocks, gputhreads>>>(devMomenta.get(), devIsGoodHel.get());
checkCuda( cudaPeekAtLastError() );
// ... 0d2. Copy back good helicity mask to the host
checkCuda( cudaMemcpy( hstIsGoodHel.get(), devIsGoodHel.get(), nbytesIsGoodHel, cudaMemcpyDeviceToHost ) );
// ... 0d3. Copy back good helicity list to constant memory on the device
gProc::sigmaKin_setGoodHel(hstIsGoodHel.get());
}
#endif
// *** START THE OLD TIMER FOR MATRIX ELEMENTS (WAVEFUNCTIONS) ***
double wavetime = 0;
// --- 3a. SigmaKin
const std::string skinKey = "3a SigmaKin";
timermap.start( skinKey );
#ifdef __CUDACC__
#ifndef MGONGPU_NSIGHT_DEBUG
gProc::sigmaKin<<<gpublocks, gputhreads>>>(devMomenta.get(), devMEs.get());
#else
gProc::sigmaKin<<<gpublocks, gputhreads, ntpbMAX*sizeof(float)>>>(devMomenta.get(), devMEs.get());
#endif
checkCuda( cudaPeekAtLastError() );
#else
Proc::sigmaKin(hstMomenta.get(), hstMEs.get(), nevt);
#endif
#ifdef __CUDACC__
// --- 3b. CopyDToH MEs
const std::string cmesKey = "3b CpDTHmes";
wavetime += timermap.start( cmesKey );
checkCuda( cudaMemcpy( hstMEs.get(), devMEs.get(), nbytesMEs, cudaMemcpyDeviceToHost ) );
#endif
// *** STOP THE OLD TIMER FOR MATRIX ELEMENTS (WAVEFUNCTIONS) ***
wavetime += timermap.stop();
// === STEP 4 FINALISE LOOP
// --- 4a Dump within the loop
const std::string loopKey = "4a DumpLoop";
timermap.start(loopKey);
genrtimes[iiter] = genrtime;
rambtimes[iiter] = rambtime;
wavetimes[iiter] = wavetime;
if (verbose)
{
std::cout << "***********************************************************************" << std::endl
<< "Iteration #" << iiter+1 << " of " << niter << std::endl;
if (perf) std::cout << "Wave function time: " << wavetime << std::endl;
}
for (int ievt = 0; ievt < nevt; ++ievt) // Loop over all events in this iteration
{
if (verbose)
{
// Display momenta
const int ipagM = ievt/neppM; // #eventpage in this iteration
const int ieppM = ievt%neppM; // #event in the current eventpage in this iteration
std::cout << "Momenta:" << std::endl;
for (int ipar = 0; ipar < npar; ipar++)
{
// NB: 'setw' affects only the next field (of any type)
std::cout << std::scientific // fixed format: affects all floats (default precision: 6)
<< std::setw(4) << ipar + 1
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 0*neppM + ieppM] // AOSOA[ipagM][ipar][0][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 1*neppM + ieppM] // AOSOA[ipagM][ipar][1][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 2*neppM + ieppM] // AOSOA[ipagM][ipar][2][ieppM]
<< std::setw(14) << hstMomenta[ipagM*npar*np4*neppM + ipar*neppM*np4 + 3*neppM + ieppM] // AOSOA[ipagM][ipar][3][ieppM]
<< std::endl
<< std::defaultfloat; // default format: affects all floats
}
std::cout << std::string(80, '-') << std::endl;
// Display matrix elements
std::cout << " Matrix element = "
<< hstMEs[ievt] << " GeV^" << meGeVexponent << std::endl; // FIXME: assume process.nprocesses == 1
std::cout << std::string(80, '-') << std::endl;
}
// Fill the arrays with ALL MEs and weights
matrixelementALL[iiter*nevt + ievt] = hstMEs[ievt]; // FIXME: assume process.nprocesses == 1
weightALL[iiter*nevt + ievt] = hstWeights[ievt];
}
if (!(verbose || debug || perf))
{
std::cout << ".";
}
}
// **************************************
// *** END MAIN LOOP ON #ITERATIONS ***
// **************************************
// === STEP 8 ANALYSIS
// --- 8a Analysis: compute stats after the loop
const std::string statKey = "8a CompStat";
timermap.start(statKey);
double sumgtim = 0;
double sqsgtim = 0;
double mingtim = genrtimes[0];
double maxgtim = genrtimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumgtim += genrtimes[iiter];
sqsgtim += genrtimes[iiter]*genrtimes[iiter];
mingtim = std::min( mingtim, genrtimes[iiter] );
maxgtim = std::max( maxgtim, genrtimes[iiter] );
}
double sumrtim = 0;
double sqsrtim = 0;
double minrtim = rambtimes[0];
double maxrtim = rambtimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumrtim += rambtimes[iiter];
sqsrtim += rambtimes[iiter]*rambtimes[iiter];
minrtim = std::min( minrtim, rambtimes[iiter] );
maxrtim = std::max( maxrtim, rambtimes[iiter] );
}
double sumwtim = 0;
double sqswtim = 0;
double minwtim = wavetimes[0];
double maxwtim = wavetimes[0];
for ( int iiter = 0; iiter < niter; ++iiter )
{
sumwtim += wavetimes[iiter];
sqswtim += wavetimes[iiter]*wavetimes[iiter];
minwtim = std::min( minwtim, wavetimes[iiter] );
maxwtim = std::max( maxwtim, wavetimes[iiter] );
}
double meanwtim = sumwtim / niter;
double stdwtim = std::sqrt( sqswtim / niter - meanwtim * meanwtim );
int nnan = 0;
double minelem = matrixelementALL[0];
double maxelem = matrixelementALL[0];
double minweig = weightALL[0];
double maxweig = weightALL[0];
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute min/max
if ( std::isnan( matrixelementALL[ievtALL] ) )
{
if ( debug ) // only printed out with "-p -d" (matrixelementALL is not filled without -p)
std::cout << "WARNING! ME[" << ievtALL << "} is nan" << std::endl;
nnan++;
continue;
}
minelem = std::min( minelem, (double)matrixelementALL[ievtALL] );
maxelem = std::max( maxelem, (double)matrixelementALL[ievtALL] );
minweig = std::min( minweig, (double)weightALL[ievtALL] );
maxweig = std::max( maxweig, (double)weightALL[ievtALL] );
}
double sumelemdiff = 0;
double sumweigdiff = 0;
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute mean from the sum of diff to min
if ( std::isnan( matrixelementALL[ievtALL] ) ) continue;
sumelemdiff += ( matrixelementALL[ievtALL] - minelem );
sumweigdiff += ( weightALL[ievtALL] - minweig );
}
double meanelem = minelem + sumelemdiff / ( nevtALL - nnan );
double meanweig = minweig + sumweigdiff / ( nevtALL - nnan );
double sqselemdiff = 0;
double sqsweigdiff = 0;
for ( int ievtALL = 0; ievtALL < nevtALL; ++ievtALL )
{
// Compute stddev from the squared sum of diff to mean
if ( std::isnan( matrixelementALL[ievtALL] ) ) continue;
sqselemdiff += std::pow( matrixelementALL[ievtALL] - meanelem, 2 );
sqsweigdiff += std::pow( weightALL[ievtALL] - meanweig, 2 );
}
double stdelem = std::sqrt( sqselemdiff / ( nevtALL - nnan ) );
double stdweig = std::sqrt( sqsweigdiff / ( nevtALL - nnan ) );
// === STEP 9 FINALISE
// --- 9a. Destroy curand generator
const std::string dgenKey = "9a GenDestr";
timermap.start( dgenKey );
#ifndef MGONGPU_COMMONRAND_ONHOST
#ifdef __CUDACC__
grambo2toNm0::destroyGenerator( rnGen );
#else
rambo2toNm0::destroyGenerator( rnGen );
#endif
#endif
// --- 9b Dump to screen
const std::string dumpKey = "9b DumpScrn";
timermap.start(dumpKey);
if (!(verbose || debug || perf))
{
std::cout << std::endl;
}
if (perf)
{
std::cout << "***********************************************************************" << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumIterations = " << niter << std::endl
<< "-----------------------------------------------------------------------" << std::endl
#if defined MGONGPU_FPTYPE_DOUBLE
<< "FP precision = DOUBLE (nan=" << nnan << ")" << std::endl
#elif defined MGONGPU_FPTYPE_FLOAT
<< "FP precision = FLOAT (nan=" << nnan << ")" << std::endl
#endif
#ifdef __CUDACC__
#if defined MGONGPU_CXTYPE_CUCOMPLEX
<< "Complex type = CUCOMPLEX" << std::endl
#elif defined MGONGPU_CXTYPE_THRUST
<< "Complex type = THRUST::COMPLEX" << std::endl
#endif
#else
<< "Complex type = STD::COMPLEX" << std::endl
#endif
<< "RanNumb memory layout = AOSOA[" << neppR << "]"
<< ( neppR == 1 ? " == AOS" : "" ) << std::endl
<< "Momenta memory layout = AOSOA[" << neppM << "]"
<< ( neppM == 1 ? " == AOS" : "" ) << std::endl
#ifdef __CUDACC__
<< "Wavefunction GPU memory = LOCAL" << std::endl
#endif
#ifdef __CUDACC__
#if defined MGONGPU_COMMONRAND_ONHOST
<< "Random number generation = COMMON RANDOM HOST (CUDA code)" << std::endl
#elif defined MGONGPU_CURAND_ONDEVICE
<< "Random number generation = CURAND DEVICE (CUDA code)" << std::endl
#elif defined MGONGPU_CURAND_ONHOST
<< "Random number generation = CURAND HOST (CUDA code)" << std::endl
#endif
#else
#if defined MGONGPU_COMMONRAND_ONHOST
<< "Random number generation = COMMON RANDOM (C++ code)" << std::endl
#else
<< "Random number generation = CURAND (C++ code)" << std::endl
#endif
#endif
<< "-----------------------------------------------------------------------" << std::endl
<< "NumberOfEntries = " << niter << std::endl
<< std::scientific // fixed format: affects all floats (default precision: 6)
<< "TotalTime[Rnd+Rmb+ME] (123)= ( " << sumgtim+sumrtim+sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[Rambo+ME] (23)= ( " << sumrtim+sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[RndNumGen] (1)= ( " << sumgtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[Rambo] (2)= ( " << sumrtim << std::string(16, ' ') << " ) sec" << std::endl
<< "TotalTime[MatrixElems] (3)= ( " << sumwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "MeanTimeInMatrixElems = ( " << meanwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "[Min,Max]TimeInMatrixElems = [ " << minwtim
<< " , " << maxwtim << " ] sec" << std::endl
//<< "StdDevTimeInWaveFuncs = ( " << stdwtim << std::string(16, ' ') << " ) sec" << std::endl
<< "-----------------------------------------------------------------------" << std::endl
//<< "ProcessID: = " << getpid() << std::endl
//<< "NProcesses = " << process.nprocesses << std::endl
<< "TotalEventsComputed = " << nevtALL << std::endl
<< "EvtsPerSec[Rnd+Rmb+ME](123)= ( " << nevtALL/(sumgtim+sumrtim+sumwtim)
<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< "EvtsPerSec[Rmb+ME] (23)= ( " << nevtALL/(sumrtim+sumwtim)
<< std::string(16, ' ') << " ) sec^-1" << std::endl
//<< "EvtsPerSec[RndNumbGen] (1)= ( " << nevtALL/sumgtim
//<< std::string(16, ' ') << " ) sec^-1" << std::endl
//<< "EvtsPerSec[Rambo] (2)= ( " << nevtALL/sumrtim
//<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< "EvtsPerSec[MatrixElems] (3)= ( " << nevtALL/sumwtim
<< std::string(16, ' ') << " ) sec^-1" << std::endl
<< std::defaultfloat; // default format: affects all floats
std::cout << "***********************************************************************" << std::endl
<< "NumMatrixElements(notNan) = " << nevtALL - nnan << std::endl
<< std::scientific // fixed format: affects all floats (default precision: 6)
<< "MeanMatrixElemValue = ( " << meanelem
<< " +- " << stdelem/sqrt(nevtALL - nnan) << " ) GeV^" << meGeVexponent << std::endl // standard error
<< "[Min,Max]MatrixElemValue = [ " << minelem
<< " , " << maxelem << " ] GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = ( " << stdelem << std::string(16, ' ') << " ) GeV^" << meGeVexponent << std::endl
<< "MeanWeight = ( " << meanweig
<< " +- " << stdweig/sqrt(nevtALL - nnan) << " )" << std::endl // standard error
<< "[Min,Max]Weight = [ " << minweig
<< " , " << maxweig << " ]" << std::endl
<< "StdDevWeight = ( " << stdweig << std::string(16, ' ') << " )" << std::endl
<< std::defaultfloat; // default format: affects all floats
}
// --- 9c Dump to json
const std::string jsonKey = "9c DumpJson";
timermap.start(jsonKey);
if(json)
{
std::string jsonFileName = std::to_string(jsondate) + "-perf-test-run" + std::to_string(jsonrun) + ".json";
jsonFileName = "./perf/data/" + jsonFileName;
//Checks if file exists
std::ifstream fileCheck;
bool fileExists = false;
fileCheck.open(jsonFileName);
if(fileCheck){
fileExists = true;
fileCheck.close();
}
std::ofstream jsonFile;
jsonFile.open(jsonFileName, std::ios_base::app);
if(!fileExists){
jsonFile << "[" << std::endl;
}
else{
//deleting the last bracket and outputting a ", "
std::string temp = "truncate -s-1 " + jsonFileName;
const char *command = temp.c_str();
system(command);
jsonFile << ", " << std::endl;
}
jsonFile << "{" << std::endl
<< "\"NumIterations\": " << niter << ", " << std::endl
<< "\"NumThreadsPerBlock\": " << gputhreads << ", " << std::endl
<< "\"NumBlocksPerGrid\": " << gpublocks << ", " << std::endl
#if defined MGONGPU_FPTYPE_DOUBLE
<< "\"FP precision\": "
<< "\"DOUBLE (nan=" << nnan << ")\"," << std::endl
#elif defined MGONGPU_FPTYPE_FLOAT
<< "\"FP precision\": " << "FLOAT (nan=" << nnan << ")," << std::endl
#endif
<< "\"Complex type\": "
#ifdef __CUDACC__
#if defined MGONGPU_CXTYPE_CUCOMPLEX
<< "\"CUCOMPLEX\"," << std::endl
#elif defined MGONGPU_CXTYPE_THRUST
<< "\"THRUST::COMPLEX\"," << std::endl
#endif
#else
<< "\"STD::COMPLEX\"," << std::endl
#endif
<< "\"RanNumb memory layout\": " << "\"AOSOA[" << neppR << "]\""
<< ( neppR == 1 ? " == AOS" : "" ) << ", " << std::endl
<< "\"Momenta memory layout\": " << "\"AOSOA[" << neppM << "]\""
<< ( neppM == 1 ? " == AOS" : "" ) << ", " << std::endl
#ifdef __CUDACC__
<< "\"Wavefunction GPU memory\": " << "\"LOCAL\"," << std::endl
#endif
<< "\"Curand generation\": "
#ifdef __CUDACC__
#if defined MGONGPU_CURAND_ONDEVICE
<< "\"DEVICE (CUDA code)\"," << std::endl
#elif defined MGONGPU_CURAND_ONHOST
<< "\"HOST (CUDA code)\"," << std::endl
#endif
#else
<< "\"HOST (C++ code)\"," << std::endl
#endif
<< "\"NumberOfEntries\": " << niter << "," << std::endl
//<< std::scientific // Not sure about this
<< "\"TotalTimeInWaveFuncs\": "
<< "\"" << std::to_string(sumwtim) << " sec\"," << std::endl
<< "\"MeanTimeInWaveFuncs\": "
<< "\"" << std::to_string(meanwtim) << " sec\"," << std::endl
<< "\"StdDevTimeInWaveFuncs\": "
<< "\"" << std::to_string(stdwtim) << " sec\"," << std::endl
<< "\"MinTimeInWaveFuncs\": "
<< "\"" << std::to_string(minwtim) << " sec\"," << std::endl
<< "\"MaxTimeInWaveFuncs\": "
<< "\"" << std::to_string(maxwtim) << " sec\"," << std::endl
//<< "ProcessID: = " << getpid() << std::endl
//<< "NProcesses = " << process.nprocesses << std::endl
<< "\"TotalEventsComputed\": " << nevtALL << "," << std::endl
<< "\"RamboEventsPerSec\": "
<< "\"" << std::to_string(nevtALL/sumrtim) << " sec^-1\"," << std::endl
<< "\"MatrixElemEventsPerSec\": "
<< "\"" << std::to_string(nevtALL/sumwtim) << " sec^-1\"," << std::endl
<< "\"NumMatrixElements(notNan)\": " << nevtALL - nnan << "," << std::endl
<< std::scientific
<< "\"MeanMatrixElemValue\": "
<< "\"" << std::to_string(meanelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"StdErrMatrixElemValue\": "
<< "\"" << std::to_string(stdelem/sqrt(nevtALL)) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"StdDevMatrixElemValue\": "
<< "\"" << std::to_string(stdelem)
<< " GeV^" << std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"MinMatrixElemValue\": "
<< "\"" << std::to_string(minelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl
<< "\"MaxMatrixElemValue\": "
<< "\"" << std::to_string(maxelem) << " GeV^"
<< std::to_string(meGeVexponent) << "\"," << std::endl;
timermap.dump(jsonFile, true); // NB For the active json timer this dumps a partial total
jsonFile << "}" << std::endl << "]";
jsonFile.close();
}
// *** STOP THE NEW TIMERS ***
timermap.stop();
if (perf)
{
std::cout << "***********************************************************************" << std::endl;
timermap.dump();
std::cout << "***********************************************************************" << std::endl;
}
//std::cout << "ALL OK" << std::endl;
}
|
343d07a560734a614f38b4f0f7fa3f5aab14605b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cfloat>
#include "private.h"
#define MAX_BLOCK_SIZE 1024
/// The number of dimensions. Constant on every device.
__constant__ uint32_t d_dim;
/// Calculates the gamma distribution of the specified size from two uniform
/// distributions.
/// @param size The number of samples to write.
/// @param v1 in The first array with uniformly distributed values in [0, 1].
/// @param v2 in,out The second array with uniformly distributed values in [0, 1].
/// The output is written to it.
/// @note v1 and v2 must be independent (e.g., not the same), otherwise you will
/// get an invalid result.
__global__ void gamma_cuda(uint32_t size, const float *__restrict__ v1,
float *__restrict__ v2) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v2[index] = -logf(v1[index] * v2[index]);
}
/// Calculates the natural logarithm of the array.
/// @param size The length of the array.
/// @param v in,out The array to read and write.
__global__ void log_cuda(uint32_t size, float *v) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v[index] = logf(v[index]);
}
/// Weighted MinHash kernel. The argument names follow the paper:
/// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36928.pdf
/// @param rs Gamma(2,1)-random samples. The length must be the product of
/// number of processed samples (vectors) by the number of dimensions.
/// @param ln_cs Logarithm over the gamma(2,1) distribution. Same length as rs.
/// @param betas Uniformly [0, 1] distributed samples. Same length as rs.
/// @param weights CSR's data.
/// @param cols CSR's indices.
/// @param rows CSR's indptrs.
/// @param plan Execution plan, consists of 2 parts: the first is the offset
/// table and the second is the row indices
/// @param sample_delta How many hashes to process in a single thread. Depends
/// on the shared memory size.
/// @param device_row_offset Shard offset in rows. Specific to every device.
/// @param device_wc_offset Shard offset in weights and cols. Specific to every
/// device.
/// @param hashes The output of size number of vectors x number of hashes for
/// each x 2.
__global__ void weighted_minhash_cuda(
const float *__restrict__ rs, const float *__restrict__ ln_cs,
const float *__restrict__ betas, const float *__restrict__ weights,
const uint32_t *__restrict__ cols, const uint32_t *__restrict__ rows,
const int32_t *__restrict__ plan, const int sample_delta,
const uint32_t device_row_offset, const uint32_t device_wc_offset,
uint32_t *__restrict__ hashes) {
const uint32_t thread_index = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t sample_index = threadIdx.x;
int32_t row_offset = plan[thread_index];
int32_t row_border = plan[thread_index + 1];
if (row_offset == row_border) {
return;
}
const uint32_t sample_offset = sample_index * sample_delta;
const uint32_t samples = blockDim.x * sample_delta;
extern __shared__ float shmem[];
float *volatile lnmins = &shmem[(threadIdx.y * blockDim.x + sample_index) * 3 * sample_delta];
uint2 *volatile dtmins = reinterpret_cast<uint2 *>(lnmins + sample_delta);
int32_t row = -1;
for (uint32_t index = 0, border = 0;; index++) {
if (index >= border) {
for (uint32_t s = 0; s < sample_delta; s++) {
lnmins[s] = FLT_MAX;
}
if (row >= 0) {
for (int s = 0; s < sample_delta; s++) {
auto hash = reinterpret_cast<uint2 *>(hashes +
((row - device_row_offset) * samples + s + sample_offset) * 2);
*hash = dtmins[s];
}
}
if (row_offset >= row_border) {
break;
}
row = plan[row_offset++];
index = rows[row - device_row_offset];
border = rows[row - device_row_offset + 1];
}
const float w = logf(weights[index - device_wc_offset]);
const uint32_t d = cols[index - device_wc_offset];
volatile int64_t ci = static_cast<int64_t>(sample_offset) * d_dim + d;
#pragma unroll 4
for (int s = 0; s < sample_delta; s++, ci += d_dim) {
// We apply the logarithm trick here: log (a / z) = log a - log z
float r = rs[ci];
float beta = betas[ci];
float t = floorf(w / r + beta);
float ln_y = (t - beta) * r;
float ln_a = ln_cs[ci] - ln_y - r;
if (ln_a < lnmins[s]) {
lnmins[s] = ln_a;
dtmins[s] = {d, static_cast<uint32_t>(static_cast<int32_t>(t))};
}
}
}
}
extern "C" {
/// Calls gamma_cuda() kernel.
hipError_t gamma_(uint32_t size, const float *v1, float *v2) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
hipLaunchKernelGGL(( gamma_cuda), dim3(grid), dim3(block), 0, 0, size, v1, v2);
RETERR(hipDeviceSynchronize());
return hipSuccess;
}
/// Calls log_cuda() kernel.
hipError_t log_(uint32_t size, float *v) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
hipLaunchKernelGGL(( log_cuda), dim3(grid), dim3(block), 0, 0, size, v);
RETERR(hipDeviceSynchronize());
return hipSuccess;
}
/// Copies the number of dimensions (size of each sample) to a symbol on each
/// device.
MHCUDAResult setup_weighted_minhash(
uint32_t dim, const std::vector<int> &devs, int verbosity) {
FOR_EACH_DEV(
CUCH(hipMemcpyToSymbol(d_dim, &dim, sizeof(dim)),
mhcudaMemoryCopyError);
);
return mhcudaSuccess;
}
/// Calls the corresponding kernel.
MHCUDAResult weighted_minhash(
const udevptrs<float> &rs, const udevptrs<float> &ln_cs,
const udevptrs<float> &betas, const udevptrs<float> &weights,
const udevptrs<uint32_t> &cols, const udevptrs<uint32_t> &rows,
int samples, const std::vector<int> &sample_deltas,
const udevptrs<int32_t> &plan, const std::vector<uint32_t> &split,
const uint32_t *original_rows, const std::vector<uint32_t> &grid_sizes,
const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *hashes) {
FOR_EACH_DEVI(
int sample_delta = sample_deltas[devi];
int spt = samples / sample_delta;
assert(MINHASH_BLOCK_SIZE % spt == 0);
dim3 block(spt, MINHASH_BLOCK_SIZE / spt, 1);
dim3 grid(1, grid_sizes[devi], 1);
int shmem = 3 * sizeof(float) * MINHASH_BLOCK_SIZE * sample_delta;
uint32_t row_offset = (devi > 0)? split[devi - 1] : 0;
DEBUG("dev #%d: <<<%d, [%d, %d], %d>>>(%u, %u)\n",
devs[devi], grid.x, block.x, block.y, shmem,
static_cast<unsigned>(row_offset),
static_cast<unsigned>(original_rows[row_offset]));
hipLaunchKernelGGL(( weighted_minhash_cuda), dim3(grid), dim3(block), shmem, 0,
rs[devi].get(), ln_cs[devi].get(), betas[devi].get(),
weights[devi].get(), cols[devi].get(), rows[devi].get(),
plan[devi].get(), sample_delta, row_offset, original_rows[row_offset],
(*hashes)[devi].get());
);
return mhcudaSuccess;
}
} // extern "C"
|
343d07a560734a614f38b4f0f7fa3f5aab14605b.cu
|
#include <cassert>
#include <cfloat>
#include "private.h"
#define MAX_BLOCK_SIZE 1024
/// The number of dimensions. Constant on every device.
__constant__ uint32_t d_dim;
/// Calculates the gamma distribution of the specified size from two uniform
/// distributions.
/// @param size The number of samples to write.
/// @param v1 in The first array with uniformly distributed values in [0, 1].
/// @param v2 in,out The second array with uniformly distributed values in [0, 1].
/// The output is written to it.
/// @note v1 and v2 must be independent (e.g., not the same), otherwise you will
/// get an invalid result.
__global__ void gamma_cuda(uint32_t size, const float *__restrict__ v1,
float *__restrict__ v2) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v2[index] = -logf(v1[index] * v2[index]);
}
/// Calculates the natural logarithm of the array.
/// @param size The length of the array.
/// @param v in,out The array to read and write.
__global__ void log_cuda(uint32_t size, float *v) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= size) {
return;
}
v[index] = logf(v[index]);
}
/// Weighted MinHash kernel. The argument names follow the paper:
/// http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36928.pdf
/// @param rs Gamma(2,1)-random samples. The length must be the product of
/// number of processed samples (vectors) by the number of dimensions.
/// @param ln_cs Logarithm over the gamma(2,1) distribution. Same length as rs.
/// @param betas Uniformly [0, 1] distributed samples. Same length as rs.
/// @param weights CSR's data.
/// @param cols CSR's indices.
/// @param rows CSR's indptrs.
/// @param plan Execution plan, consists of 2 parts: the first is the offset
/// table and the second is the row indices
/// @param sample_delta How many hashes to process in a single thread. Depends
/// on the shared memory size.
/// @param device_row_offset Shard offset in rows. Specific to every device.
/// @param device_wc_offset Shard offset in weights and cols. Specific to every
/// device.
/// @param hashes The output of size number of vectors x number of hashes for
/// each x 2.
__global__ void weighted_minhash_cuda(
const float *__restrict__ rs, const float *__restrict__ ln_cs,
const float *__restrict__ betas, const float *__restrict__ weights,
const uint32_t *__restrict__ cols, const uint32_t *__restrict__ rows,
const int32_t *__restrict__ plan, const int sample_delta,
const uint32_t device_row_offset, const uint32_t device_wc_offset,
uint32_t *__restrict__ hashes) {
const uint32_t thread_index = blockIdx.y * blockDim.y + threadIdx.y;
const uint32_t sample_index = threadIdx.x;
int32_t row_offset = plan[thread_index];
int32_t row_border = plan[thread_index + 1];
if (row_offset == row_border) {
return;
}
const uint32_t sample_offset = sample_index * sample_delta;
const uint32_t samples = blockDim.x * sample_delta;
extern __shared__ float shmem[];
float *volatile lnmins = &shmem[(threadIdx.y * blockDim.x + sample_index) * 3 * sample_delta];
uint2 *volatile dtmins = reinterpret_cast<uint2 *>(lnmins + sample_delta);
int32_t row = -1;
for (uint32_t index = 0, border = 0;; index++) {
if (index >= border) {
for (uint32_t s = 0; s < sample_delta; s++) {
lnmins[s] = FLT_MAX;
}
if (row >= 0) {
for (int s = 0; s < sample_delta; s++) {
auto hash = reinterpret_cast<uint2 *>(hashes +
((row - device_row_offset) * samples + s + sample_offset) * 2);
*hash = dtmins[s];
}
}
if (row_offset >= row_border) {
break;
}
row = plan[row_offset++];
index = rows[row - device_row_offset];
border = rows[row - device_row_offset + 1];
}
const float w = logf(weights[index - device_wc_offset]);
const uint32_t d = cols[index - device_wc_offset];
volatile int64_t ci = static_cast<int64_t>(sample_offset) * d_dim + d;
#pragma unroll 4
for (int s = 0; s < sample_delta; s++, ci += d_dim) {
// We apply the logarithm trick here: log (a / z) = log a - log z
float r = rs[ci];
float beta = betas[ci];
float t = floorf(w / r + beta);
float ln_y = (t - beta) * r;
float ln_a = ln_cs[ci] - ln_y - r;
if (ln_a < lnmins[s]) {
lnmins[s] = ln_a;
dtmins[s] = {d, static_cast<uint32_t>(static_cast<int32_t>(t))};
}
}
}
}
extern "C" {
/// Calls gamma_cuda() kernel.
cudaError_t gamma_(uint32_t size, const float *v1, float *v2) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
gamma_cuda<<<grid, block>>>(size, v1, v2);
RETERR(cudaDeviceSynchronize());
return cudaSuccess;
}
/// Calls log_cuda() kernel.
cudaError_t log_(uint32_t size, float *v) {
dim3 block(MAX_BLOCK_SIZE, 1, 1);
dim3 grid(size / block.x + 1, 1, 1);
log_cuda<<<grid, block>>>(size, v);
RETERR(cudaDeviceSynchronize());
return cudaSuccess;
}
/// Copies the number of dimensions (size of each sample) to a symbol on each
/// device.
MHCUDAResult setup_weighted_minhash(
uint32_t dim, const std::vector<int> &devs, int verbosity) {
FOR_EACH_DEV(
CUCH(cudaMemcpyToSymbol(d_dim, &dim, sizeof(dim)),
mhcudaMemoryCopyError);
);
return mhcudaSuccess;
}
/// Calls the corresponding kernel.
MHCUDAResult weighted_minhash(
const udevptrs<float> &rs, const udevptrs<float> &ln_cs,
const udevptrs<float> &betas, const udevptrs<float> &weights,
const udevptrs<uint32_t> &cols, const udevptrs<uint32_t> &rows,
int samples, const std::vector<int> &sample_deltas,
const udevptrs<int32_t> &plan, const std::vector<uint32_t> &split,
const uint32_t *original_rows, const std::vector<uint32_t> &grid_sizes,
const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *hashes) {
FOR_EACH_DEVI(
int sample_delta = sample_deltas[devi];
int spt = samples / sample_delta;
assert(MINHASH_BLOCK_SIZE % spt == 0);
dim3 block(spt, MINHASH_BLOCK_SIZE / spt, 1);
dim3 grid(1, grid_sizes[devi], 1);
int shmem = 3 * sizeof(float) * MINHASH_BLOCK_SIZE * sample_delta;
uint32_t row_offset = (devi > 0)? split[devi - 1] : 0;
DEBUG("dev #%d: <<<%d, [%d, %d], %d>>>(%u, %u)\n",
devs[devi], grid.x, block.x, block.y, shmem,
static_cast<unsigned>(row_offset),
static_cast<unsigned>(original_rows[row_offset]));
weighted_minhash_cuda<<<grid, block, shmem>>>(
rs[devi].get(), ln_cs[devi].get(), betas[devi].get(),
weights[devi].get(), cols[devi].get(), rows[devi].get(),
plan[devi].get(), sample_delta, row_offset, original_rows[row_offset],
(*hashes)[devi].get());
);
return mhcudaSuccess;
}
} // extern "C"
|
1aa44fdc06db6c9e659ae65c568e462068de81c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "kernels_hip.cuh"
// Unroller
template <int N>
struct sequence {
template <typename Lambda>
static __forceinline__ __device__ void run(const Lambda& f) {
sequence<N-1>::run(f);
f(N-1);
}
template <typename Lambda>
static __forceinline__ __device__ void reverse(const Lambda& f) {
f(N-1);
sequence<N-1>::reverse(f);
}
};
template <>
struct sequence<0> {
template <typename Lambda>
static __forceinline__ __device__ void run(const Lambda& f) {}
template <typename Lambda>
static __forceinline__ __device__ void reverse(const Lambda& f) {}
};
template __global__ void reduce<0>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
template __global__ void reduce<1>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
template __global__ void reduce<2>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
const int NumPrefetch = CUB_QUOTIENT_FLOOR(4 * BlockDim, Pitch);
using shared_t = union
{
float block[BlockDim][4];
float linear[NumPrefetch][Pitch];
};
template <int step>
__global__
void reduce(Point* gs, Norm* gns, size_t g_size, const Point* hs, const Norm* hns, size_t h_size)
{
__shared__ bool check;
if (step == 0)
{
if (threadIdx.x == 0) check = false;
__syncthreads();
}
const int subidx = threadIdx.x % RakeWidth;
const int subinst = threadIdx.x / RakeWidth;
float* g_ptr = reinterpret_cast<float*>(gs);
const float* h_ptr = reinterpret_cast<const float*>(hs);
cub::CacheModifiedInputIterator<cub::LOAD_LDG, float> g_in(g_ptr);
cub::CacheModifiedInputIterator<cub::LOAD_LDG, float> h_in(h_ptr);
using BlockLoadT = cub::BlockLoad<decltype(g_in), BlockDim, NT, cub::BLOCK_LOAD_VECTORIZE>;
using BlockStoreT = cub::BlockStore<float*, BlockDim, NT, cub::BLOCK_STORE_VECTORIZE>;
using BlockLoadVT = cub::BlockLoad<decltype(h_in), BlockDim, 4, cub::BLOCK_LOAD_VECTORIZE>;
union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockLoadVT::TempStorage loadv;
} shared;
for (int g_base = blockIdx.x * InstPerBlock; g_base < g_size; g_base += GridDim * InstPerBlock)
{
const auto g_idx = g_base + subinst;
float g[NT], gg;
float reduced {}; // Flag: 0 <-> Not reduced.
float min_norm;
BlockLoadT(shared.load).Load(g_in + g_base * Pitch, g);
gg = gns[g_idx];
auto t = (g_in + g_base * Pitch)[0];
min_norm = gg + P * t * t;
__shared__ alignas(128) shared_t prefetch;
__shared__ float prefetch_n[BlockDim];
for (int h_base = 0; h_base < h_size; h_base += NumPrefetch)
{
BlockLoadVT(shared.loadv).Load(h_in + h_base * Pitch, prefetch.block[threadIdx.x]);
if (threadIdx.x < NumPrefetch)
prefetch_n[threadIdx.x] = hns[h_base + threadIdx.x];
__syncthreads();
if (step == 0)
{
if (__all(gg < prefetch_n[0]) && threadIdx.x == 0)
check = true;
__syncthreads();
if (check) break;
}
for (int i = 0; i < NumPrefetch && h_base + i < h_size; ++i) //
{
__syncthreads();
const int h_idx = h_base + i;
const float hh = prefetch_n[i];
if (step != 0 && hh < 10) continue; // h is already reduced
// h_buf has no zero padding
using sep = float[RakeWidth][NT];
__shared__ float h_buf[BlockDim]; // 126
h_buf[threadIdx.x] = threadIdx.x < P ? prefetch.linear[i][threadIdx.x] : 0;
for (int rot = 0; rot < CUB_ROUND_DOWN_NEAREST(P, ILP) - ILP; rot += ILP)
{
__syncthreads();
float q_best {};
float gh[ILP] {};
float h[NT + (ILP - 1)];
for (int j = 0; j < NT; ++j)
h[j] = (*(volatile sep*)(&h_buf[rot]))[subidx][j];
sequence<ILP - 1>::run([&](int k)
{
h[NT + k] = h_buf[rot + (subidx + 1) * NT + k]; //
});
sequence<ILP>::run([&](int k)
{
for (int j = 0; j < NT; ++j)
gh[k] += g[j] * h[j + k];
if (subidx == RakeWidth - 1)
h[NT - Padding + k] = h_buf[rot + k];
});
for (int j = 1; j < RakeWidth; j *= 2)
sequence<ILP>::run([&](int k)
{
gh[k] += __shfl_xor(gh[k], j);
});
int from {};
for (int j = 0; j < Times; ++j) // j 1 NT
{
float uu = gg + (P * g[j]) * g[j];
sequence<ILP>::run([&](int k)
{
float uv = gh[k] + (P * g[j]) * h[j + k],
vv = hh + P * h[j + k] * h[j + k];
float q = rintf(uv / uu);
if (step == 1 && gg < 0) q = 0;
if (step == 1 && g_idx == h_idx && rot == 0 && k == 0) q = 0;
float new_norm = uu + q * (q * vv - 2 * uv);
if (new_norm < min_norm) // j NT && subidx * NT + j < P)
{
q_best = q;
from = k + 1; // k 0
}
min_norm = min(new_norm, min_norm);
});
}
for (int j = 1; j < RakeWidth; j *= 2)
{
float min_norm_t = __shfl_xor(min_norm, j);
float q_best_t = __shfl_xor(q_best, j);
int from_t = __shfl_xor(from, j);
//
if (min_norm_t < min_norm) // || min_norm_t == min_norm && (subidx ^ j) >= subidx)
{
q_best = q_best_t;
from = from_t;
}
min_norm = min(min_norm_t, min_norm);
}
if (step == 0 || __any(q_best != 0)) //
{
sequence<ILP>::reverse([&](int k) //
{
if (from == k + 1)
{
gg += q_best * (q_best * hh - 2 * gh[k]);
for (int j = 0; j < NT; ++j)
g[j] -= q_best * h[j + k];
}
if (subidx == RakeWidth - 1)
h[NT - Padding + k] = 0;
});
reduced += q_best * q_best;
}
__syncthreads();
if (threadIdx.x == 0)
sequence<ILP>::run([&](int k)
{
h_buf[P + rot + k] = h[k];
});
}
}
__syncthreads();
}
BlockStoreT(shared.store).Store(g_ptr + g_base * Pitch, g);
if (reduced > 0.5) gns[g_idx] = -1;
__syncthreads();
if (step == 0 && check) break;
}
}
|
1aa44fdc06db6c9e659ae65c568e462068de81c8.cu
|
#include <cub/cub.cuh>
#include "kernels.cuh"
// Unroller
template <int N>
struct sequence {
template <typename Lambda>
static __forceinline__ __device__ void run(const Lambda& f) {
sequence<N-1>::run(f);
f(N-1);
}
template <typename Lambda>
static __forceinline__ __device__ void reverse(const Lambda& f) {
f(N-1);
sequence<N-1>::reverse(f);
}
};
template <>
struct sequence<0> {
template <typename Lambda>
static __forceinline__ __device__ void run(const Lambda& f) {}
template <typename Lambda>
static __forceinline__ __device__ void reverse(const Lambda& f) {}
};
template __global__ void reduce<0>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
template __global__ void reduce<1>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
template __global__ void reduce<2>(Point*, Norm*, size_t, const Point*, const Norm*, size_t);
const int NumPrefetch = CUB_QUOTIENT_FLOOR(4 * BlockDim, Pitch);
using shared_t = union
{
float block[BlockDim][4];
float linear[NumPrefetch][Pitch];
};
template <int step>
__global__
void reduce(Point* gs, Norm* gns, size_t g_size, const Point* hs, const Norm* hns, size_t h_size)
{
__shared__ bool check;
if (step == 0)
{
if (threadIdx.x == 0) check = false;
__syncthreads();
}
const int subidx = threadIdx.x % RakeWidth;
const int subinst = threadIdx.x / RakeWidth;
float* g_ptr = reinterpret_cast<float*>(gs);
const float* h_ptr = reinterpret_cast<const float*>(hs);
cub::CacheModifiedInputIterator<cub::LOAD_LDG, float> g_in(g_ptr);
cub::CacheModifiedInputIterator<cub::LOAD_LDG, float> h_in(h_ptr);
using BlockLoadT = cub::BlockLoad<decltype(g_in), BlockDim, NT, cub::BLOCK_LOAD_VECTORIZE>;
using BlockStoreT = cub::BlockStore<float*, BlockDim, NT, cub::BLOCK_STORE_VECTORIZE>;
using BlockLoadVT = cub::BlockLoad<decltype(h_in), BlockDim, 4, cub::BLOCK_LOAD_VECTORIZE>;
union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockLoadVT::TempStorage loadv;
} shared;
for (int g_base = blockIdx.x * InstPerBlock; g_base < g_size; g_base += GridDim * InstPerBlock)
{
const auto g_idx = g_base + subinst;
float g[NT], gg;
float reduced {}; // Flag: 0 <-> Not reduced.
float min_norm;
BlockLoadT(shared.load).Load(g_in + g_base * Pitch, g);
gg = gns[g_idx];
auto t = (g_in + g_base * Pitch)[0];
min_norm = gg + P * t * t;
__shared__ alignas(128) shared_t prefetch;
__shared__ float prefetch_n[BlockDim];
for (int h_base = 0; h_base < h_size; h_base += NumPrefetch)
{
BlockLoadVT(shared.loadv).Load(h_in + h_base * Pitch, prefetch.block[threadIdx.x]);
if (threadIdx.x < NumPrefetch)
prefetch_n[threadIdx.x] = hns[h_base + threadIdx.x];
__syncthreads();
if (step == 0)
{
if (__all(gg < prefetch_n[0]) && threadIdx.x == 0)
check = true;
__syncthreads();
if (check) break;
}
for (int i = 0; i < NumPrefetch && h_base + i < h_size; ++i) // 可省?
{
__syncthreads();
const int h_idx = h_base + i;
const float hh = prefetch_n[i];
if (step != 0 && hh < 10) continue; // h is already reduced
// h_buf has no zero padding
using sep = float[RakeWidth][NT];
__shared__ float h_buf[BlockDim]; // 可能不夠 126 維?
h_buf[threadIdx.x] = threadIdx.x < P ? prefetch.linear[i][threadIdx.x] : 0;
for (int rot = 0; rot < CUB_ROUND_DOWN_NEAREST(P, ILP) - ILP; rot += ILP)
{
__syncthreads();
float q_best {};
float gh[ILP] {};
float h[NT + (ILP - 1)];
for (int j = 0; j < NT; ++j)
h[j] = (*(volatile sep*)(&h_buf[rot]))[subidx][j];
sequence<ILP - 1>::run([&](int k)
{
h[NT + k] = h_buf[rot + (subidx + 1) * NT + k]; // 小心出界
});
sequence<ILP>::run([&](int k)
{
for (int j = 0; j < NT; ++j)
gh[k] += g[j] * h[j + k];
if (subidx == RakeWidth - 1)
h[NT - Padding + k] = h_buf[rot + k];
});
for (int j = 1; j < RakeWidth; j *= 2)
sequence<ILP>::run([&](int k)
{
gh[k] += __shfl_xor(gh[k], j);
});
int from {};
for (int j = 0; j < Times; ++j) // j 可以 1 至 NT
{
float uu = gg + (P * g[j]) * g[j];
sequence<ILP>::run([&](int k)
{
float uv = gh[k] + (P * g[j]) * h[j + k],
vv = hh + P * h[j + k] * h[j + k];
float q = rintf(uv / uu);
if (step == 1 && gg < 0) q = 0;
if (step == 1 && g_idx == h_idx && rot == 0 && k == 0) q = 0;
float new_norm = uu + q * (q * vv - 2 * uv);
if (new_norm < min_norm) // 若 j 快到 NT,要加 && subidx * NT + j < P)
{
q_best = q;
from = k + 1; // 因為 k 的預設值是 0,後會面多做事
}
min_norm = min(new_norm, min_norm);
});
}
for (int j = 1; j < RakeWidth; j *= 2)
{
float min_norm_t = __shfl_xor(min_norm, j);
float q_best_t = __shfl_xor(q_best, j);
int from_t = __shfl_xor(from, j);
// 最好加後面那句避免非常非常小的機率錯誤
if (min_norm_t < min_norm) // || min_norm_t == min_norm && (subidx ^ j) >= subidx)
{
q_best = q_best_t;
from = from_t;
}
min_norm = min(min_norm_t, min_norm);
}
if (step == 0 || __any(q_best != 0)) // 這行舊版沒有
{
sequence<ILP>::reverse([&](int k) // 倒著跑
{
if (from == k + 1)
{
gg += q_best * (q_best * hh - 2 * gh[k]);
for (int j = 0; j < NT; ++j)
g[j] -= q_best * h[j + k];
}
if (subidx == RakeWidth - 1)
h[NT - Padding + k] = 0;
});
reduced += q_best * q_best;
}
__syncthreads();
if (threadIdx.x == 0)
sequence<ILP>::run([&](int k)
{
h_buf[P + rot + k] = h[k];
});
}
}
__syncthreads();
}
BlockStoreT(shared.store).Store(g_ptr + g_base * Pitch, g);
if (reduced > 0.5) gns[g_idx] = -1;
__syncthreads();
if (step == 0 && check) break;
}
}
|
1bad212a469a4864de6a1ed808c82ab6306ced89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <numeric>
#include <map>
#include <time.h>
#include <valarray>
#include <string>
#include <hdf5.h>
#include "range.hpp"
#include "utils.hpp"
#define NUM_ROWS 28
#define NUM_COLS 28
#define NUM_CHANNELS 1
#define NUM_DIGITS 10
#define NUM_STREAMS 32
#define TILEDIM 32
#define POOL_SIZE 2
#define KERNEL_WIDTH 5
#define TILE_WIDTH 12
static int FLAGS_batch_size = 10000;
static std::string FLAGS_testdata{};
static std::string FLAGS_model{};
// Data and reference data dimensions
static int xdims[] = {FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS};
static int rdims[] = {FLAGS_batch_size, NUM_DIGITS};
// Model dimensions
static int conv1dims[] = {5, 5, 1, 32};
static int conv2dims[] = {5, 5, 32, 64};
static int fc1dims[] = {1024, 128};
static int fc2dims[] = {128, 10};
__constant__ float filter1[KERNEL_WIDTH * KERNEL_WIDTH * TILEDIM];
static int loadData(float *x, float *y) {
// Open the data file
const auto file_id =
H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset x and y
const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT);
const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT);
// Get the dataset x dimensions
const auto xspace = H5Dget_space(x_id);
const auto xndims = H5Sget_simple_extent_ndims(xspace);
assert(xndims == 4);
hsize_t *input_dims = allocate<hsize_t>(xdims);
H5Sget_simple_extent_dims(xspace, input_dims, NULL);
if (input_dims[0] != FLAGS_batch_size) {
std::cout << "data size does not match batch size specified!\n";
delete[] input_dims;
return 1; // return error
}
std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1]
<< " x " << input_dims[2] << " x " << input_dims[3] << "\n";
// Read the dataset x and y
check_success(
H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x));
check_success(
H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y));
// Close the dataset x and y
check_success(H5Dclose(x_id));
check_success(H5Dclose(y_id));
// Close the file
check_success(H5Fclose(file_id));
// return success
delete[] input_dims;
return 0;
}
static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) {
// Open the model file
const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset
const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT);
const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT);
const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT);
const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT);
// Read the dataset
check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv1));
check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv2));
check_success(
H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1));
check_success(
H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2));
// Close the dataset x and y
check_success(H5Dclose(conv1_id));
check_success(H5Dclose(conv2_id));
check_success(H5Dclose(fc1_id));
check_success(H5Dclose(fc2_id));
// Close the file
check_success(H5Fclose(file_id));
}
/*----------------------------START OF KERNELS---------------------------------*/
/*
* convolution
* DESCRIPTION: Performs the convolution of the input map with the given filters
* INPUTS: in_width, out_width, C, out_channel, X, W, Y
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void convolution(int in_width, int out_width, int C, int out_channel,
float* X, float* W, float* Y)
{
int i, j, n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + KERNEL_WIDTH-1;
extern __shared__ float shmem[];
float* X_shared = &shmem[0];
float* W_shared = &shmem[X_tile_width * X_tile_width];
n = blockIdx.x;
m = blockIdx.y;
h0 = threadIdx.x;
w0 = threadIdx.y;
h_base = (blockIdx.z / POOL_SIZE) * TILE_WIDTH;
w_base = (blockIdx.z % POOL_SIZE) * TILE_WIDTH;
h = h_base+ h0;
w = w_base+ w0;
float acc = 0.0;
for (int c = 0; c < C; c++)
{
if (( h0 < KERNEL_WIDTH) && ( w0 < KERNEL_WIDTH))
W_shared[h0 * KERNEL_WIDTH + w0]= W[ (h0 * KERNEL_WIDTH * C * out_channel) + (w0 * C * out_channel) + (c * out_channel) + m];
__syncthreads();
for (i = h; i < h_base+ X_tile_width; i += TILE_WIDTH)
{
for (j = w; j < w_base + X_tile_width; j += TILE_WIDTH)
{
if(i < in_width && j < in_width)
X_shared[(i-h_base) * X_tile_width + (j-w_base)] = X[(n * in_width * in_width * C) + (i * in_width * C) + (j * C) + c];
}
}
__syncthreads();
for (i = 0; i < KERNEL_WIDTH; i++)
{
for (j = 0; j < KERNEL_WIDTH; j++)
{
if(h < out_width && w < out_width)
acc = acc + X_shared[(h0 + i) * X_tile_width + (w0 + j)] * W_shared[i * KERNEL_WIDTH + j];
}
}
__syncthreads();
}
if(h < out_width && w < out_width)
{
int Yoffset = ((n * out_width + h) * out_width + w) * out_channel + m;
Y[Yoffset] = (int)(acc > 0) * acc;
}
}
/*
* unroll_input
* DESCRIPTION: The kernel to perform the unrolling of the input map.
* INPUTS: H_out, W_out, C, H, M, unrolled_height, unrolled_width, in, unrolled
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void unroll_input(int H_out, int W_out, int C, int H, int W, int unrolled_height, int unrolled_width, const float * in, float * unrolled) {
int c, s, h_out, w_out, h_unroll, w_unroll, w_base, p, q;
int t = blockIdx.x * blockDim.x + threadIdx.x;
int W_unroll = H_out * W_out;
if (t < C * W_unroll) {
c = t / W_unroll;
s = t % W_unroll;
h_out = s / W_out;
w_out = s % W_out;
h_unroll = h_out * W_out + w_out;
w_base = c * KERNEL_WIDTH * KERNEL_WIDTH;
for(p = 0; p < KERNEL_WIDTH; p++)
{
for(q = 0; q < KERNEL_WIDTH; q++)
{
w_unroll = w_base + p * KERNEL_WIDTH + q;
unrolled[w_unroll * W_unroll + h_unroll] = in[(h_out + p) * W * C + (w_out + q) * C + c];
}
}
}
}
/*
* reroll
* DESCRIPTION: The kernel to perform the rerolling of the output map.
* INPUTS: y_unroll, y_reroll, H_out, W_out, M
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void reroll(float* y_unroll, float* y_reroll, int H_out, int W_out, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int m = idx / (H_out*W_out);
int position = idx % (H_out*W_out);
int row = position / W_out;
int col = position % W_out;
if(idx < H_out * W_out * M)
{
y_reroll[row * W_out * M + col * M + m] = y_unroll[m * H_out * W_out + row * W_out + col];
}
}
/*
* pooling
* DESCRIPTION: The kernel to perform the average pooling required for the forward step
* INPUTS: in_width, out_width, in_channel, out_channel, X, Y
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void pooling(int in_width, int out_width, int in_channel, int out_channel, float* X, float* Y)
{
int p, q, bx, by, tx, ty;
bx = blockIdx.x;
by = blockIdx.y;
tx = threadIdx.x;
ty = threadIdx.y;
float acc = 0.0;
int Yoffset = (bx * out_width * out_width * out_channel) + (ty * out_width * out_channel) + (tx * out_channel) + by;
int pool_s = 2;
for (p = 0; p < POOL_SIZE; p++)
{
for (q = 0; q < POOL_SIZE; q++)
acc += X[(bx * in_width * in_width * in_channel) + (((POOL_SIZE * ty) + p) * in_width * in_channel)
+ ((POOL_SIZE * tx + q) * in_channel) + by]/(1.0f * pool_s * pool_s);
}
Y[Yoffset] = acc;
}
/*
* matrixMultiplyShared
* DESCRIPTION: The kernel to perform matrix multiplication using shared memory
* INPUTS: A, B, C, numARows, numAColumns, numCRows, numBColumns
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = A[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns && CValue < 0)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns)+ (blockIdx.x * blockDim.x) + threadIdx.x] = 0;
else if(Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
/*
* matrixMultiplyShared1
* DESCRIPTION: The kernel to perform matrix multiplication using shared memory and global memory
* INPUTS: B, C, numARows, numAColumns, numCRows, numBColumns
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void matrixMultiplyShared1(float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = filter1[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns && CValue < 0)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = 0;
else if(Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
__global__ void matrixMultiplyShared_norm(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = A[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns)+(blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
/*-----------------------------END OF KERNELS---------------------------------*/
/*----------------------START OF SEQUENTIAL FUNCTIONS-------------------------*/
void unroll_filter(const float * W, float * w, int M, int C) {
for(int row = 0; row < KERNEL_WIDTH; row++)
{
for(int col = 0; col< KERNEL_WIDTH; col++)
{
for(int i = 0; i < C; i++)
{
for(int j = 0; j < M; j++)
w[j * C * KERNEL_WIDTH * KERNEL_WIDTH + i * KERNEL_WIDTH * KERNEL_WIDTH + row * KERNEL_WIDTH + col] = W[row * KERNEL_WIDTH * C * M + col * C * M + i * M + j];
}
}
}
}
// Recified linear unit 4d
static void relu4(float *X, const int xdims[4])
{
for (const auto i : range(0, xdims[0] * xdims[1] * xdims[2] * xdims[3]))
X[i] = (X[i] < 0) ? 0 : X[i];
}
// Choose the guess with largest score
void argmax(const float *X, const int xdims[2], int *Y)
{
for (const auto i : range(0, xdims[0])) {
auto max_idx = 0;
auto max = X[i * xdims[1]];
for (const auto j : range(0, xdims[1])) {
const auto elem = X[(i * xdims[1]) + j];
if (elem > max) {
max_idx = j;
max = elem;
}
}
Y[i] = max_idx;
}
}
/*------------------------END OF SEQUENTIAL FUNCTIONS-------------------------*/
/*------------------------- START OF KERNEL CALLS ----------------------------*/
/*
* conv_forward_valid
* DESCRIPTION: Calls the kernel for unrolling the input map, matrix multiplication
* and rerolling of the output map to perform the convolution.
* Only used if the input map is above a given threshold.
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
static void conv_forward_valid(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4])
{
int i, j;
int C = wdims[2];
int M = wdims[3];
int H = xdims[1];
int w = xdims[2];
int H_out = ydims[1];
int W_out = ydims[2];
int x_batch_size = xdims[1] * xdims[2] * xdims[3];
int y_batch_size = ydims[1] * ydims[2] * ydims[3];
int x_unrolled_height = C * KERNEL_WIDTH * KERNEL_WIDTH;
int x_unrolled_width = H_out * W_out;
int filter_height = M;
int filter_width = C * KERNEL_WIDTH * KERNEL_WIDTH;
float * device_input[NUM_STREAMS];
float * device_output;
float * device_X[NUM_STREAMS];
float * device_Y[NUM_STREAMS];
float * device_roll_Y[NUM_STREAMS];
float * filter;
hipStream_t streams[NUM_STREAMS];
for(i=0; i < NUM_STREAMS; i++)
{
hipStreamCreate(&streams[i]);
hipMalloc((void **) &device_X[i], x_unrolled_height * x_unrolled_width * sizeof(float));
hipMalloc((void **) &device_Y[i], y_batch_size * sizeof(float));
hipMalloc((void **) &device_roll_Y[i], y_batch_size * sizeof(float));
hipMalloc((void **) &device_input[i], x_batch_size * sizeof(float));
}
hipMalloc((void**) &device_output, ydims[0] * y_batch_size * sizeof(float));
float * w_unrolled = (float *) malloc(filter_height * filter_width * sizeof(float));
unroll_filter(W, w_unrolled, M, C);
if(wdims[2] == 1)
hipMemcpyToSymbol(filter1, w_unrolled, filter_height * filter_width * sizeof(float));
else
{
hipMalloc((void **) &filter, filter_height * filter_width * sizeof(float));
hipMemcpy(filter, w_unrolled, filter_height * filter_width * sizeof(float), hipMemcpyHostToDevice);
}
int unroll_block = 1024;
int unroll_grid = ceil( (float) (C * x_unrolled_width) / 1024);
dim3 mult_block(TILEDIM, TILEDIM, 1);
dim3 mult_grid(ceil( (float) (H_out * W_out) / TILEDIM), ceil((float) M / TILEDIM), 1);
int reroll_block = 1024;
int reroll_grid = ceil( (float) y_batch_size / 1024);
for (i = 0; i < ydims[0]; i += NUM_STREAMS)
{
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
int xoffset = (i + j) * x_batch_size;
hipMemcpyAsync(device_input[j], &X[xoffset], x_batch_size * sizeof(float), hipMemcpyHostToDevice, streams[j]);
hipLaunchKernelGGL(( unroll_input), dim3(unroll_grid), dim3(unroll_block), 0, streams[j] , H_out, W_out, C, H, w,
x_unrolled_height, x_unrolled_width,
device_input[j], device_X[j]);
}
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
if(wdims[2] == 1)
hipLaunchKernelGGL(( matrixMultiplyShared1), dim3(mult_grid), dim3(mult_block), 0, streams[j] , device_X[j], device_Y[j],
filter_height, filter_width,
x_unrolled_height, x_unrolled_width);
else
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(mult_grid), dim3(mult_block), 0, streams[j] , filter, device_X[j], device_Y[j],
filter_height, filter_width,
x_unrolled_height, x_unrolled_width);
}
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
device_roll_Y[j] = device_output + (i + j) * y_batch_size;
hipLaunchKernelGGL(( reroll), dim3(reroll_grid), dim3(reroll_block), 0, streams[j] , device_Y[j], device_roll_Y[j],
ydims[1], ydims[2], ydims[3]);
}
}
hipMemcpy(Y, device_output, ydims[0] * y_batch_size * sizeof(float), hipMemcpyDeviceToHost);
free(w_unrolled);
for(i = 0; i < NUM_STREAMS; i++)
{
hipFree(device_input[i]);
hipFree(device_X[i]);
hipFree(device_Y[i]);
hipFree(device_roll_Y[i]);
}
if(wdims[2] == 1)
hipFree(filter);
}
/*
* conv_forward_valid2
* DESCRIPTION: Calls the kernel for basic convolution for the forward step.
* Only used if the input map is below a given threshold.
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void conv_forward_valid2(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4])
{
float* device_input;
float* device_output;
float* filter_conv;
hipMalloc((void **) &filter_conv, sizeof(float) * conv2dims[0] * conv2dims[1] * conv2dims[2] * conv2dims[3]);
hipMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3]);
hipMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3]);
hipMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3], hipMemcpyHostToDevice);
dim3 dimGrid(xdims[0], ydims[3], ceil(ydims[1]/(float)TILE_WIDTH) * ceil(ydims[1]/(float)TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
size_t shmem_size = sizeof(float) * ( (TILE_WIDTH + KERNEL_WIDTH-1)*(TILE_WIDTH + KERNEL_WIDTH-1) + KERNEL_WIDTH*KERNEL_WIDTH );
hipMemcpy(filter_conv, W, sizeof(float) * conv2dims[0] * conv2dims[1] * conv2dims[2] * conv2dims[3], hipMemcpyHostToDevice);
hipLaunchKernelGGL(( convolution), dim3(dimGrid), dim3(dimBlock), shmem_size, 0, xdims[1], ydims[1], xdims[3], ydims[3],
device_input, filter_conv, device_output);
hipMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3], hipMemcpyDeviceToHost);
hipFree(filter_conv);
hipFree(device_output);
hipFree(device_input);
return;
}
/*
* average_pool
* DESCRIPTION: Calls the kernel for pooling
* INPUTS: X, xdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void average_pool(const float *X, const int xdims[4],
float *Y, const int ydims[4])
{
float* device_input;
float* device_output;
hipMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3]);
hipMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3]);
hipMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3], hipMemcpyHostToDevice);
dim3 dimGrid(xdims[0], ydims[3], 1);
dim3 dimBlock(ydims[1], ydims[1]);
hipLaunchKernelGGL(( pooling), dim3(dimGrid), dim3(dimBlock), 0, 0, xdims[1], ydims[1], xdims[3], ydims[3], device_input, device_output);
hipMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3], hipMemcpyDeviceToHost);
hipFree(device_output);
hipFree(device_input);
return;
}
/*
* fully_forward
* DESCRIPTION: Calls the kernel for matrix multiplication
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void fully_forward(const float *X, const int xdims[2], float *W,
const int wdims[2], float *Y, const int ydims[2], int ver)
{
float* device_input;
float* device_output;
float* device_w;
hipMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1]);
hipMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1]);
hipMalloc((void **) &device_w, sizeof(float)*wdims[0] * wdims[1]);
hipMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1], hipMemcpyHostToDevice);
hipMemcpy(device_w, W, sizeof(float) * wdims[0]*wdims[1], hipMemcpyHostToDevice);
dim3 dimGrid(ceil(ydims[1]/(float)TILEDIM), ceil(ydims[0]/(float)TILEDIM));
dim3 dimBlock(TILEDIM, TILEDIM);
if(ver == 1)
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid),dim3(dimBlock), 0, 0, device_input, device_w, device_output,
xdims[0], xdims[1],
wdims[0], wdims[1]);
else
hipLaunchKernelGGL(( matrixMultiplyShared_norm), dim3(dimGrid),dim3(dimBlock), 0, 0, device_input, device_w, device_output,
xdims[0], xdims[1],
wdims[0], wdims[1]);
hipMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1], hipMemcpyDeviceToHost);
hipFree(device_input);
hipFree(device_output);
hipFree(device_w);
}
/*------------------------- END OF KERNEL CALLS ----------------------------*/
// Forward operation for the CNN, a combination of conv layer + average pooling
// + relu
void forward_operation(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out)
{
// conv layer
const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]};
auto a = zeros<float>(adims);
if(xdims[0] >= 100)
conv_forward_valid(x, xdims, conv1, conv1dims, a, adims);
else
{
conv_forward_valid2(x, xdims, conv1, conv1dims, a, adims);
// relu layer
relu4(a, adims);
}
// average pooling
const int pool_size = 2;
const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]};
auto b = zeros<float>(bdims);
average_pool(a, adims, b, bdims);
// conv layer
const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]};
auto c = zeros<float>(cdims);
if(bdims[0] >= 100)
conv_forward_valid(b, bdims, conv2, conv2dims, c, cdims);
else
{
conv_forward_valid2(b, bdims, conv2, conv2dims, c, cdims);
// relu
relu4(c, cdims);
}
// average pooling
const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]};
auto d = zeros<float>(ddims);
average_pool(c, cdims, d, ddims);
// reshape
const int ddims2[] = {ddims[0], ddims[1] * ddims[2] * ddims[3]};
// matrix multiplication
const int edims[] = {ddims[0], fc1dims[1]};
auto e = zeros<float>(edims);
fully_forward(d, ddims2, fc1, fc1dims, e, edims, 1);
// relu
//relu2(e, edims);
// matrix multiplication
const int fdims[] = {edims[0], fc2dims[1]};
auto f = zeros<float>(fdims);
fully_forward(e, edims, fc2, fc2dims, f, fdims, 0);
argmax(f, fdims, out);
delete[] a;
delete[] b;
delete[] c;
delete[] d;
delete[] e;
delete[] f;
}
int main(int argc, char **argv)
{
if (argc != 3 && argc != 4) {
std::cerr << "\n"
<< "This program performs the forward opertion step for "
"Convolutional Neural Network(CNN). "
"Sample usage: \n"
<< argv[0]
<< " [../data/test10.hdf5] [../data/model.hdf5] [10]\n";
return -1;
}
FLAGS_testdata = std::string(argv[1]);
FLAGS_model = std::string(argv[2]);
if (argc == 3) {
const std::map<std::string, int> default_batch_sizes{
{"../data/test2.hdf5", 2},
{"../data/test10.hdf5", 10},
{"../data/test100.hdf5", 100},
{"../data/testfull.hdf5", 10000}};
const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata);
if (batch_size_in_map == default_batch_sizes.end()) {
std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n";
return -1;
}
FLAGS_batch_size = batch_size_in_map->second;
} else if (argc == 4) {
FLAGS_batch_size = atoi(argv[3]);
}
xdims[0] = FLAGS_batch_size;
rdims[0] = FLAGS_batch_size;
// Load data into x and y
float *x = allocate<float>(xdims);
float *y = allocate<float>(rdims);
loadData(x, y);
// Load model
float *conv1 = allocate<float>(conv1dims);
float *conv2 = allocate<float>(conv2dims);
float *fc1 = allocate<float>(fc1dims);
float *fc2 = allocate<float>(fc2dims);
loadModel(conv1, conv2, fc1, fc2);
// Perform foward opertion
int *out = zeros<int>(FLAGS_batch_size);
// get start time
const auto start = now();
forward_operation(x, conv1, conv2, fc1, fc2, out);
// get end time
const auto end = now();
// get elapsed time in milliseconds
const auto elapsed =
std::chrono::duration<double, std::milli>(end - start).count();
// Get reference
int *ref = zeros<int>(FLAGS_batch_size);
argmax(y, rdims, ref);
// Calculate correctness
int num_correct = 0;
for (const auto i : range(0, FLAGS_batch_size)) {
if (out[i] == ref[i]) {
num_correct++;
}
}
std::cout << "Done with " << FLAGS_batch_size << " queries in "
<< "elapsed = " << elapsed << " milliseconds. Correctness: "
<< static_cast<float>(num_correct) / FLAGS_batch_size << "\n";
delete[] x;
delete[] y;
delete[] conv1;
delete[] conv2;
delete[] fc1;
delete[] fc2;
delete[] out;
delete[] ref;
return 0;
}
|
1bad212a469a4864de6a1ed808c82ab6306ced89.cu
|
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iostream>
#include <numeric>
#include <map>
#include <time.h>
#include <valarray>
#include <string>
#include <hdf5.h>
#include "range.hpp"
#include "utils.hpp"
#define NUM_ROWS 28
#define NUM_COLS 28
#define NUM_CHANNELS 1
#define NUM_DIGITS 10
#define NUM_STREAMS 32
#define TILEDIM 32
#define POOL_SIZE 2
#define KERNEL_WIDTH 5
#define TILE_WIDTH 12
static int FLAGS_batch_size = 10000;
static std::string FLAGS_testdata{};
static std::string FLAGS_model{};
// Data and reference data dimensions
static int xdims[] = {FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS};
static int rdims[] = {FLAGS_batch_size, NUM_DIGITS};
// Model dimensions
static int conv1dims[] = {5, 5, 1, 32};
static int conv2dims[] = {5, 5, 32, 64};
static int fc1dims[] = {1024, 128};
static int fc2dims[] = {128, 10};
__constant__ float filter1[KERNEL_WIDTH * KERNEL_WIDTH * TILEDIM];
static int loadData(float *x, float *y) {
// Open the data file
const auto file_id =
H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset x and y
const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT);
const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT);
// Get the dataset x dimensions
const auto xspace = H5Dget_space(x_id);
const auto xndims = H5Sget_simple_extent_ndims(xspace);
assert(xndims == 4);
hsize_t *input_dims = allocate<hsize_t>(xdims);
H5Sget_simple_extent_dims(xspace, input_dims, NULL);
if (input_dims[0] != FLAGS_batch_size) {
std::cout << "data size does not match batch size specified!\n";
delete[] input_dims;
return 1; // return error
}
std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1]
<< " x " << input_dims[2] << " x " << input_dims[3] << "\n";
// Read the dataset x and y
check_success(
H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x));
check_success(
H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y));
// Close the dataset x and y
check_success(H5Dclose(x_id));
check_success(H5Dclose(y_id));
// Close the file
check_success(H5Fclose(file_id));
// return success
delete[] input_dims;
return 0;
}
static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) {
// Open the model file
const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
// Open the dataset
const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT);
const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT);
const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT);
const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT);
// Read the dataset
check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv1));
check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, conv2));
check_success(
H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1));
check_success(
H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2));
// Close the dataset x and y
check_success(H5Dclose(conv1_id));
check_success(H5Dclose(conv2_id));
check_success(H5Dclose(fc1_id));
check_success(H5Dclose(fc2_id));
// Close the file
check_success(H5Fclose(file_id));
}
/*----------------------------START OF KERNELS---------------------------------*/
/*
* convolution
* DESCRIPTION: Performs the convolution of the input map with the given filters
* INPUTS: in_width, out_width, C, out_channel, X, W, Y
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void convolution(int in_width, int out_width, int C, int out_channel,
float* X, float* W, float* Y)
{
int i, j, n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + KERNEL_WIDTH-1;
extern __shared__ float shmem[];
float* X_shared = &shmem[0];
float* W_shared = &shmem[X_tile_width * X_tile_width];
n = blockIdx.x;
m = blockIdx.y;
h0 = threadIdx.x;
w0 = threadIdx.y;
h_base = (blockIdx.z / POOL_SIZE) * TILE_WIDTH;
w_base = (blockIdx.z % POOL_SIZE) * TILE_WIDTH;
h = h_base+ h0;
w = w_base+ w0;
float acc = 0.0;
for (int c = 0; c < C; c++)
{
if (( h0 < KERNEL_WIDTH) && ( w0 < KERNEL_WIDTH))
W_shared[h0 * KERNEL_WIDTH + w0]= W[ (h0 * KERNEL_WIDTH * C * out_channel) + (w0 * C * out_channel) + (c * out_channel) + m];
__syncthreads();
for (i = h; i < h_base+ X_tile_width; i += TILE_WIDTH)
{
for (j = w; j < w_base + X_tile_width; j += TILE_WIDTH)
{
if(i < in_width && j < in_width)
X_shared[(i-h_base) * X_tile_width + (j-w_base)] = X[(n * in_width * in_width * C) + (i * in_width * C) + (j * C) + c];
}
}
__syncthreads();
for (i = 0; i < KERNEL_WIDTH; i++)
{
for (j = 0; j < KERNEL_WIDTH; j++)
{
if(h < out_width && w < out_width)
acc = acc + X_shared[(h0 + i) * X_tile_width + (w0 + j)] * W_shared[i * KERNEL_WIDTH + j];
}
}
__syncthreads();
}
if(h < out_width && w < out_width)
{
int Yoffset = ((n * out_width + h) * out_width + w) * out_channel + m;
Y[Yoffset] = (int)(acc > 0) * acc;
}
}
/*
* unroll_input
* DESCRIPTION: The kernel to perform the unrolling of the input map.
* INPUTS: H_out, W_out, C, H, M, unrolled_height, unrolled_width, in, unrolled
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void unroll_input(int H_out, int W_out, int C, int H, int W, int unrolled_height, int unrolled_width, const float * in, float * unrolled) {
int c, s, h_out, w_out, h_unroll, w_unroll, w_base, p, q;
int t = blockIdx.x * blockDim.x + threadIdx.x;
int W_unroll = H_out * W_out;
if (t < C * W_unroll) {
c = t / W_unroll;
s = t % W_unroll;
h_out = s / W_out;
w_out = s % W_out;
h_unroll = h_out * W_out + w_out;
w_base = c * KERNEL_WIDTH * KERNEL_WIDTH;
for(p = 0; p < KERNEL_WIDTH; p++)
{
for(q = 0; q < KERNEL_WIDTH; q++)
{
w_unroll = w_base + p * KERNEL_WIDTH + q;
unrolled[w_unroll * W_unroll + h_unroll] = in[(h_out + p) * W * C + (w_out + q) * C + c];
}
}
}
}
/*
* reroll
* DESCRIPTION: The kernel to perform the rerolling of the output map.
* INPUTS: y_unroll, y_reroll, H_out, W_out, M
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void reroll(float* y_unroll, float* y_reroll, int H_out, int W_out, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int m = idx / (H_out*W_out);
int position = idx % (H_out*W_out);
int row = position / W_out;
int col = position % W_out;
if(idx < H_out * W_out * M)
{
y_reroll[row * W_out * M + col * M + m] = y_unroll[m * H_out * W_out + row * W_out + col];
}
}
/*
* pooling
* DESCRIPTION: The kernel to perform the average pooling required for the forward step
* INPUTS: in_width, out_width, in_channel, out_channel, X, Y
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void pooling(int in_width, int out_width, int in_channel, int out_channel, float* X, float* Y)
{
int p, q, bx, by, tx, ty;
bx = blockIdx.x;
by = blockIdx.y;
tx = threadIdx.x;
ty = threadIdx.y;
float acc = 0.0;
int Yoffset = (bx * out_width * out_width * out_channel) + (ty * out_width * out_channel) + (tx * out_channel) + by;
int pool_s = 2;
for (p = 0; p < POOL_SIZE; p++)
{
for (q = 0; q < POOL_SIZE; q++)
acc += X[(bx * in_width * in_width * in_channel) + (((POOL_SIZE * ty) + p) * in_width * in_channel)
+ ((POOL_SIZE * tx + q) * in_channel) + by]/(1.0f * pool_s * pool_s);
}
Y[Yoffset] = acc;
}
/*
* matrixMultiplyShared
* DESCRIPTION: The kernel to perform matrix multiplication using shared memory
* INPUTS: A, B, C, numARows, numAColumns, numCRows, numBColumns
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = A[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns && CValue < 0)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns)+ (blockIdx.x * blockDim.x) + threadIdx.x] = 0;
else if(Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
/*
* matrixMultiplyShared1
* DESCRIPTION: The kernel to perform matrix multiplication using shared memory and global memory
* INPUTS: B, C, numARows, numAColumns, numCRows, numBColumns
* OUTPUTS: none
* RETURN VALUE: none
*/
__global__ void matrixMultiplyShared1(float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = filter1[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns && CValue < 0)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = 0;
else if(Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns) + (blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
__global__ void matrixMultiplyShared_norm(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns)
{
float CValue = 0;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTileM[TILEDIM][TILEDIM];
__shared__ float subTileN[TILEDIM][TILEDIM];
for (int i = 0; i < (ceil((float)numBRows/TILEDIM)); i++)
{
if (i*TILEDIM + threadIdx.x < numAColumns && Row < numARows)
subTileM[threadIdx.y][threadIdx.x] = A[Row * numAColumns + i * TILEDIM + threadIdx.x];
else
subTileM[threadIdx.y][threadIdx.x] = 0.0;
if (i*TILEDIM + threadIdx.y < numBRows && Col < numBColumns)
subTileN[threadIdx.y][threadIdx.x] = B[(i * TILEDIM + threadIdx.y) * numBColumns + Col];
else
subTileN[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int j = 0; j < TILEDIM; j++)
CValue += subTileM[threadIdx.y][j] * subTileN[j][threadIdx.x];
__syncthreads();
}
if (Row < numARows && Col < numBColumns)
C[((blockIdx.y * blockDim.y + threadIdx.y) * numBColumns)+(blockIdx.x * blockDim.x) + threadIdx.x] = CValue;
}
/*-----------------------------END OF KERNELS---------------------------------*/
/*----------------------START OF SEQUENTIAL FUNCTIONS-------------------------*/
void unroll_filter(const float * W, float * w, int M, int C) {
for(int row = 0; row < KERNEL_WIDTH; row++)
{
for(int col = 0; col< KERNEL_WIDTH; col++)
{
for(int i = 0; i < C; i++)
{
for(int j = 0; j < M; j++)
w[j * C * KERNEL_WIDTH * KERNEL_WIDTH + i * KERNEL_WIDTH * KERNEL_WIDTH + row * KERNEL_WIDTH + col] = W[row * KERNEL_WIDTH * C * M + col * C * M + i * M + j];
}
}
}
}
// Recified linear unit 4d
static void relu4(float *X, const int xdims[4])
{
for (const auto i : range(0, xdims[0] * xdims[1] * xdims[2] * xdims[3]))
X[i] = (X[i] < 0) ? 0 : X[i];
}
// Choose the guess with largest score
void argmax(const float *X, const int xdims[2], int *Y)
{
for (const auto i : range(0, xdims[0])) {
auto max_idx = 0;
auto max = X[i * xdims[1]];
for (const auto j : range(0, xdims[1])) {
const auto elem = X[(i * xdims[1]) + j];
if (elem > max) {
max_idx = j;
max = elem;
}
}
Y[i] = max_idx;
}
}
/*------------------------END OF SEQUENTIAL FUNCTIONS-------------------------*/
/*------------------------- START OF KERNEL CALLS ----------------------------*/
/*
* conv_forward_valid
* DESCRIPTION: Calls the kernel for unrolling the input map, matrix multiplication
* and rerolling of the output map to perform the convolution.
* Only used if the input map is above a given threshold.
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
static void conv_forward_valid(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4])
{
int i, j;
int C = wdims[2];
int M = wdims[3];
int H = xdims[1];
int w = xdims[2];
int H_out = ydims[1];
int W_out = ydims[2];
int x_batch_size = xdims[1] * xdims[2] * xdims[3];
int y_batch_size = ydims[1] * ydims[2] * ydims[3];
int x_unrolled_height = C * KERNEL_WIDTH * KERNEL_WIDTH;
int x_unrolled_width = H_out * W_out;
int filter_height = M;
int filter_width = C * KERNEL_WIDTH * KERNEL_WIDTH;
float * device_input[NUM_STREAMS];
float * device_output;
float * device_X[NUM_STREAMS];
float * device_Y[NUM_STREAMS];
float * device_roll_Y[NUM_STREAMS];
float * filter;
cudaStream_t streams[NUM_STREAMS];
for(i=0; i < NUM_STREAMS; i++)
{
cudaStreamCreate(&streams[i]);
cudaMalloc((void **) &device_X[i], x_unrolled_height * x_unrolled_width * sizeof(float));
cudaMalloc((void **) &device_Y[i], y_batch_size * sizeof(float));
cudaMalloc((void **) &device_roll_Y[i], y_batch_size * sizeof(float));
cudaMalloc((void **) &device_input[i], x_batch_size * sizeof(float));
}
cudaMalloc((void**) &device_output, ydims[0] * y_batch_size * sizeof(float));
float * w_unrolled = (float *) malloc(filter_height * filter_width * sizeof(float));
unroll_filter(W, w_unrolled, M, C);
if(wdims[2] == 1)
cudaMemcpyToSymbol(filter1, w_unrolled, filter_height * filter_width * sizeof(float));
else
{
cudaMalloc((void **) &filter, filter_height * filter_width * sizeof(float));
cudaMemcpy(filter, w_unrolled, filter_height * filter_width * sizeof(float), cudaMemcpyHostToDevice);
}
int unroll_block = 1024;
int unroll_grid = ceil( (float) (C * x_unrolled_width) / 1024);
dim3 mult_block(TILEDIM, TILEDIM, 1);
dim3 mult_grid(ceil( (float) (H_out * W_out) / TILEDIM), ceil((float) M / TILEDIM), 1);
int reroll_block = 1024;
int reroll_grid = ceil( (float) y_batch_size / 1024);
for (i = 0; i < ydims[0]; i += NUM_STREAMS)
{
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
int xoffset = (i + j) * x_batch_size;
cudaMemcpyAsync(device_input[j], &X[xoffset], x_batch_size * sizeof(float), cudaMemcpyHostToDevice, streams[j]);
unroll_input<<< unroll_grid, unroll_block, 0, streams[j] >>> (H_out, W_out, C, H, w,
x_unrolled_height, x_unrolled_width,
device_input[j], device_X[j]);
}
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
if(wdims[2] == 1)
matrixMultiplyShared1<<< mult_grid, mult_block, 0, streams[j] >>>(device_X[j], device_Y[j],
filter_height, filter_width,
x_unrolled_height, x_unrolled_width);
else
matrixMultiplyShared<<< mult_grid, mult_block, 0, streams[j] >>>(filter, device_X[j], device_Y[j],
filter_height, filter_width,
x_unrolled_height, x_unrolled_width);
}
for(j = 0; (i + j < ydims[0]) && (j < NUM_STREAMS); j++)
{
device_roll_Y[j] = device_output + (i + j) * y_batch_size;
reroll<<< reroll_grid, reroll_block, 0, streams[j] >>> (device_Y[j], device_roll_Y[j],
ydims[1], ydims[2], ydims[3]);
}
}
cudaMemcpy(Y, device_output, ydims[0] * y_batch_size * sizeof(float), cudaMemcpyDeviceToHost);
free(w_unrolled);
for(i = 0; i < NUM_STREAMS; i++)
{
cudaFree(device_input[i]);
cudaFree(device_X[i]);
cudaFree(device_Y[i]);
cudaFree(device_roll_Y[i]);
}
if(wdims[2] == 1)
cudaFree(filter);
}
/*
* conv_forward_valid2
* DESCRIPTION: Calls the kernel for basic convolution for the forward step.
* Only used if the input map is below a given threshold.
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void conv_forward_valid2(const float *X, const int xdims[4],
const float *W, const int wdims[4], float *Y,
const int ydims[4])
{
float* device_input;
float* device_output;
float* filter_conv;
cudaMalloc((void **) &filter_conv, sizeof(float) * conv2dims[0] * conv2dims[1] * conv2dims[2] * conv2dims[3]);
cudaMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3]);
cudaMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3]);
cudaMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3], cudaMemcpyHostToDevice);
dim3 dimGrid(xdims[0], ydims[3], ceil(ydims[1]/(float)TILE_WIDTH) * ceil(ydims[1]/(float)TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
size_t shmem_size = sizeof(float) * ( (TILE_WIDTH + KERNEL_WIDTH-1)*(TILE_WIDTH + KERNEL_WIDTH-1) + KERNEL_WIDTH*KERNEL_WIDTH );
cudaMemcpy(filter_conv, W, sizeof(float) * conv2dims[0] * conv2dims[1] * conv2dims[2] * conv2dims[3], cudaMemcpyHostToDevice);
convolution<<<dimGrid, dimBlock, shmem_size>>>(xdims[1], ydims[1], xdims[3], ydims[3],
device_input, filter_conv, device_output);
cudaMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3], cudaMemcpyDeviceToHost);
cudaFree(filter_conv);
cudaFree(device_output);
cudaFree(device_input);
return;
}
/*
* average_pool
* DESCRIPTION: Calls the kernel for pooling
* INPUTS: X, xdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void average_pool(const float *X, const int xdims[4],
float *Y, const int ydims[4])
{
float* device_input;
float* device_output;
cudaMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3]);
cudaMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3]);
cudaMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1] * xdims[2] * xdims[3], cudaMemcpyHostToDevice);
dim3 dimGrid(xdims[0], ydims[3], 1);
dim3 dimBlock(ydims[1], ydims[1]);
pooling<<<dimGrid, dimBlock>>>(xdims[1], ydims[1], xdims[3], ydims[3], device_input, device_output);
cudaMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1] * ydims[2] * ydims[3], cudaMemcpyDeviceToHost);
cudaFree(device_output);
cudaFree(device_input);
return;
}
/*
* fully_forward
* DESCRIPTION: Calls the kernel for matrix multiplication
* INPUTS: X, xdims, W, wdims, Y, ydims
* OUTPUTS: none
* RETURN VALUE: none
*/
void fully_forward(const float *X, const int xdims[2], float *W,
const int wdims[2], float *Y, const int ydims[2], int ver)
{
float* device_input;
float* device_output;
float* device_w;
cudaMalloc((void **) &device_input, sizeof(float) * xdims[0] * xdims[1]);
cudaMalloc((void **) &device_output, sizeof(float) * ydims[0] * ydims[1]);
cudaMalloc((void **) &device_w, sizeof(float)*wdims[0] * wdims[1]);
cudaMemcpy(device_input, X, sizeof(float) * xdims[0] * xdims[1], cudaMemcpyHostToDevice);
cudaMemcpy(device_w, W, sizeof(float) * wdims[0]*wdims[1], cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(ydims[1]/(float)TILEDIM), ceil(ydims[0]/(float)TILEDIM));
dim3 dimBlock(TILEDIM, TILEDIM);
if(ver == 1)
matrixMultiplyShared<<<dimGrid,dimBlock>>>(device_input, device_w, device_output,
xdims[0], xdims[1],
wdims[0], wdims[1]);
else
matrixMultiplyShared_norm<<<dimGrid,dimBlock>>>(device_input, device_w, device_output,
xdims[0], xdims[1],
wdims[0], wdims[1]);
cudaMemcpy(Y, device_output, sizeof(float) * ydims[0] * ydims[1], cudaMemcpyDeviceToHost);
cudaFree(device_input);
cudaFree(device_output);
cudaFree(device_w);
}
/*------------------------- END OF KERNEL CALLS ----------------------------*/
// Forward operation for the CNN, a combination of conv layer + average pooling
// + relu
void forward_operation(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out)
{
// conv layer
const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]};
auto a = zeros<float>(adims);
if(xdims[0] >= 100)
conv_forward_valid(x, xdims, conv1, conv1dims, a, adims);
else
{
conv_forward_valid2(x, xdims, conv1, conv1dims, a, adims);
// relu layer
relu4(a, adims);
}
// average pooling
const int pool_size = 2;
const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]};
auto b = zeros<float>(bdims);
average_pool(a, adims, b, bdims);
// conv layer
const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]};
auto c = zeros<float>(cdims);
if(bdims[0] >= 100)
conv_forward_valid(b, bdims, conv2, conv2dims, c, cdims);
else
{
conv_forward_valid2(b, bdims, conv2, conv2dims, c, cdims);
// relu
relu4(c, cdims);
}
// average pooling
const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]};
auto d = zeros<float>(ddims);
average_pool(c, cdims, d, ddims);
// reshape
const int ddims2[] = {ddims[0], ddims[1] * ddims[2] * ddims[3]};
// matrix multiplication
const int edims[] = {ddims[0], fc1dims[1]};
auto e = zeros<float>(edims);
fully_forward(d, ddims2, fc1, fc1dims, e, edims, 1);
// relu
//relu2(e, edims);
// matrix multiplication
const int fdims[] = {edims[0], fc2dims[1]};
auto f = zeros<float>(fdims);
fully_forward(e, edims, fc2, fc2dims, f, fdims, 0);
argmax(f, fdims, out);
delete[] a;
delete[] b;
delete[] c;
delete[] d;
delete[] e;
delete[] f;
}
int main(int argc, char **argv)
{
if (argc != 3 && argc != 4) {
std::cerr << "\n"
<< "This program performs the forward opertion step for "
"Convolutional Neural Network(CNN). "
"Sample usage: \n"
<< argv[0]
<< " [../data/test10.hdf5] [../data/model.hdf5] [10]\n";
return -1;
}
FLAGS_testdata = std::string(argv[1]);
FLAGS_model = std::string(argv[2]);
if (argc == 3) {
const std::map<std::string, int> default_batch_sizes{
{"../data/test2.hdf5", 2},
{"../data/test10.hdf5", 10},
{"../data/test100.hdf5", 100},
{"../data/testfull.hdf5", 10000}};
const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata);
if (batch_size_in_map == default_batch_sizes.end()) {
std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n";
return -1;
}
FLAGS_batch_size = batch_size_in_map->second;
} else if (argc == 4) {
FLAGS_batch_size = atoi(argv[3]);
}
xdims[0] = FLAGS_batch_size;
rdims[0] = FLAGS_batch_size;
// Load data into x and y
float *x = allocate<float>(xdims);
float *y = allocate<float>(rdims);
loadData(x, y);
// Load model
float *conv1 = allocate<float>(conv1dims);
float *conv2 = allocate<float>(conv2dims);
float *fc1 = allocate<float>(fc1dims);
float *fc2 = allocate<float>(fc2dims);
loadModel(conv1, conv2, fc1, fc2);
// Perform foward opertion
int *out = zeros<int>(FLAGS_batch_size);
// get start time
const auto start = now();
forward_operation(x, conv1, conv2, fc1, fc2, out);
// get end time
const auto end = now();
// get elapsed time in milliseconds
const auto elapsed =
std::chrono::duration<double, std::milli>(end - start).count();
// Get reference
int *ref = zeros<int>(FLAGS_batch_size);
argmax(y, rdims, ref);
// Calculate correctness
int num_correct = 0;
for (const auto i : range(0, FLAGS_batch_size)) {
if (out[i] == ref[i]) {
num_correct++;
}
}
std::cout << "Done with " << FLAGS_batch_size << " queries in "
<< "elapsed = " << elapsed << " milliseconds. Correctness: "
<< static_cast<float>(num_correct) / FLAGS_batch_size << "\n";
delete[] x;
delete[] y;
delete[] conv1;
delete[] conv2;
delete[] fc1;
delete[] fc2;
delete[] out;
delete[] ref;
return 0;
}
|
a55502797bb8628d8fa1bd387e861b0e304be4be.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ztranspose_inplace_even for description of threads.
__global__ void ztranspose_inplace_odd(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ztranspose_inplace_even(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
ztranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ztranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_ztranspose_inplace(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
hipLaunchKernelGGL(( ztranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
hipLaunchKernelGGL(( ztranspose_inplace_even), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
}
|
a55502797bb8628d8fa1bd387e861b0e304be4be.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ztranspose_inplace_even for description of threads.
__global__ void ztranspose_inplace_odd(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ztranspose_inplace_even(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
ztranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ztranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_ztranspose_inplace(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
ztranspose_inplace_odd<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
ztranspose_inplace_even<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
}
|
8144208564385790f9b825550255310bfe645442.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
8144208564385790f9b825550255310bfe645442.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
0ea290c140ec7c72ff3088ad631749a865745dc9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include "../test_utils.h"
#include <raft/sparse/utils.h>
#include <raft/sparse/selection/selection.cuh>
namespace raft {
namespace sparse {
namespace selection {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct SparseSelectionInputs {
value_idx n_rows;
value_idx n_cols;
std::vector<value_t> dists_h;
std::vector<value_t> out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h;
int k;
bool select_min;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(
::std::ostream &os, const SparseSelectionInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class SparseSelectionTest
: public ::testing::TestWithParam<SparseSelectionInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_t> dists_h = params.dists_h;
allocate(dists, n_rows * n_cols);
update_device(dists, dists_h.data(), dists_h.size(), stream);
allocate(inds, n_rows * n_cols);
iota_fill(inds, n_rows, n_cols, stream);
std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h;
allocate(out_indices_ref, out_indices_ref_h.size());
allocate(out_dists_ref, out_dists_ref_h.size());
update_device(out_indices_ref, out_indices_ref_h.data(),
out_indices_ref_h.size(), stream);
update_device(out_dists_ref, out_dists_ref_h.data(), out_dists_ref_h.size(),
stream);
allocate(out_dists, n_rows * k);
allocate(out_indices, n_rows * k);
}
void SetUp() override {
params = ::testing::TestWithParam<
SparseSelectionInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(hipStreamCreate(&stream));
n_rows = params.n_rows;
n_cols = params.n_cols;
k = params.k;
make_data();
raft::sparse::selection::select_k(dists, inds, n_rows, n_cols, out_dists,
out_indices, params.select_min, k,
stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(dists));
CUDA_CHECK(hipFree(inds));
CUDA_CHECK(hipFree(out_indices));
CUDA_CHECK(hipFree(out_dists));
CUDA_CHECK(hipFree(out_indices_ref));
CUDA_CHECK(hipFree(out_dists_ref));
CUDA_CHECK(hipStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out_dists_ref, out_dists, n_rows * k, Compare<value_t>()));
ASSERT_TRUE(devArrMatch(out_indices_ref, out_indices, n_rows * k,
Compare<value_idx>()));
}
protected:
hipStream_t stream;
int n_rows, n_cols, k;
// input data
value_t *dists;
value_idx *inds;
// output data
value_idx *out_indices;
value_t *out_dists;
value_idx *out_indices_ref;
value_t *out_dists_ref;
SparseSelectionInputs<value_idx, value_t> params;
};
const std::vector<SparseSelectionInputs<int, float>> inputs_i32_f = {
{5,
5,
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
{1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0,
4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0},
{4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3},
5,
true}};
typedef SparseSelectionTest<int, float> SparseSelectionTestF;
TEST_P(SparseSelectionTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(SparseSelectionTest, SparseSelectionTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace selection
}; // end namespace sparse
}; // end namespace raft
|
0ea290c140ec7c72ff3088ad631749a865745dc9.cu
|
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include "../test_utils.h"
#include <raft/sparse/utils.h>
#include <raft/sparse/selection/selection.cuh>
namespace raft {
namespace sparse {
namespace selection {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct SparseSelectionInputs {
value_idx n_rows;
value_idx n_cols;
std::vector<value_t> dists_h;
std::vector<value_t> out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h;
int k;
bool select_min;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(
::std::ostream &os, const SparseSelectionInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class SparseSelectionTest
: public ::testing::TestWithParam<SparseSelectionInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_t> dists_h = params.dists_h;
allocate(dists, n_rows * n_cols);
update_device(dists, dists_h.data(), dists_h.size(), stream);
allocate(inds, n_rows * n_cols);
iota_fill(inds, n_rows, n_cols, stream);
std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h;
allocate(out_indices_ref, out_indices_ref_h.size());
allocate(out_dists_ref, out_dists_ref_h.size());
update_device(out_indices_ref, out_indices_ref_h.data(),
out_indices_ref_h.size(), stream);
update_device(out_dists_ref, out_dists_ref_h.data(), out_dists_ref_h.size(),
stream);
allocate(out_dists, n_rows * k);
allocate(out_indices, n_rows * k);
}
void SetUp() override {
params = ::testing::TestWithParam<
SparseSelectionInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(cudaStreamCreate(&stream));
n_rows = params.n_rows;
n_cols = params.n_cols;
k = params.k;
make_data();
raft::sparse::selection::select_k(dists, inds, n_rows, n_cols, out_dists,
out_indices, params.select_min, k,
stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(dists));
CUDA_CHECK(cudaFree(inds));
CUDA_CHECK(cudaFree(out_indices));
CUDA_CHECK(cudaFree(out_dists));
CUDA_CHECK(cudaFree(out_indices_ref));
CUDA_CHECK(cudaFree(out_dists_ref));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out_dists_ref, out_dists, n_rows * k, Compare<value_t>()));
ASSERT_TRUE(devArrMatch(out_indices_ref, out_indices, n_rows * k,
Compare<value_idx>()));
}
protected:
cudaStream_t stream;
int n_rows, n_cols, k;
// input data
value_t *dists;
value_idx *inds;
// output data
value_idx *out_indices;
value_t *out_dists;
value_idx *out_indices_ref;
value_t *out_dists_ref;
SparseSelectionInputs<value_idx, value_t> params;
};
const std::vector<SparseSelectionInputs<int, float>> inputs_i32_f = {
{5,
5,
{5.0, 4.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0, 2.0, 3.0, 5.0,
1.0, 4.0, 5.0, 3.0, 2.0, 4.0, 1.0, 1.0, 3.0, 2.0, 5.0, 4.0},
{1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0,
4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0},
{4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 3, 0, 1, 4, 2, 4, 2, 1, 3, 0, 0, 2, 1, 4, 3},
5,
true}};
typedef SparseSelectionTest<int, float> SparseSelectionTestF;
TEST_P(SparseSelectionTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(SparseSelectionTest, SparseSelectionTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace selection
}; // end namespace sparse
}; // end namespace raft
|
8a4f1dcece2f890acd39119b28f7b6a2b9629994.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2017-2023 by XGBoost Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <any> // for any, any_cast
#include <memory>
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost::predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx, common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments = HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>(
h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::CategoricalSplitMatrix::Segment>& h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->OutputLength();
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::CategoricalSplitMatrix::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::CategoricalSplitMatrix::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(hipMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), hipMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(Context const* ctx) : Predictor::Predictor{ctx} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(std::any const& x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = std::any_cast<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy)<< "Inplace predict accepts only DMatrixProxy as input.";
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](Context const* ctx) { return new GPUPredictor(ctx); });
} // namespace xgboost::predictor
|
8a4f1dcece2f890acd39119b28f7b6a2b9629994.cu
|
/**
* Copyright 2017-2023 by XGBoost Contributors
*/
#include <GPUTreeShap/gpu_treeshap.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <any> // for any, any_cast
#include <memory>
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../data/device_adapter.cuh"
#include "../data/ellpack_page.cuh"
#include "../data/proxy_dmatrix.h"
#include "../gbm/gbtree_model.h"
#include "predict_fn.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
namespace xgboost::predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx, common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, bst_feature_t, bst_row_t,
size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::CategoricalSplitMatrix::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments = HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>(
h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::CategoricalSplitMatrix::Segment>& h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->OutputLength();
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{static_cast<int64_t>(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::CategoricalSplitMatrix::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::CategoricalSplitMatrix::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(cudaMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), cudaMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(Context const* ctx) : Predictor::Predictor{ctx} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(std::any const& x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = std::any_cast<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(std::shared_ptr<DMatrix> p_m, const gbm::GBTreeModel& model, float missing,
PredictionCacheEntry* out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
auto proxy = dynamic_cast<data::DMatrixProxy*>(p_m.get());
CHECK(proxy)<< "Inplace predict accepts only DMatrixProxy as input.";
auto x = proxy->Adapter();
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<data::CupyAdapter,
DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<data::CudfAdapter,
DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
auto base_score = model.learner_model_param->BaseScore(ctx_);
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(row_idx, ngroup, group, n_features,
n_features, n_features)] +=
margin.empty() ? base_score(0) : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](Context const* ctx) { return new GPUPredictor(ctx); });
} // namespace xgboost::predictor
|
4e49b1b1fc4075528f39f7955145ff01d5cf7c62.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <assert.h>
/// Get the (batch,row,col) indices corresponding to a given thread index (3D point index)
__device__ void getCoordinates_1(const int tid, const int nrows, const int ncols,
int &batch, int &row, int &col)
{
// Get col id
int id = tid;
col = id % ncols;
id = id / ncols;
// Get row id
row = id % nrows;
id = id / nrows;
// Get batch id
batch = id;
}
/*
* Projects points and does the depth test for each of the input points. For each output point:
* (xout,yout)_i = (xpix,ypix)_i for each input point i
* zout_i = z of the closest point that projects onto it (After projection & depth test)
*/
__global__ void projectPointsAndDepthTest(const float *input_data, float *output_data,
const float fx, const float fy, const float cx, const float cy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
// Get the 3D input point
long vali = b*is0 + r*is2 + c*is3; // Don't add stride along 3D dim
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
if (z <= 0) return; // No valid projection : Z <= 0
// Do a perspective transform, scale by focal length & add principal point
float xpix = ((x/z) * fx) + cx;// + 1; // Points go from [0, row-1] & [0, col-1] in original data
float ypix = ((y/z) * fy) + cy;// + 1;
// Check projection success / Check limits / Do the depth test
float xpixr = round(xpix); // Rounded off pixel col
float ypixr = round(ypix); // Rounded off pixel row
if (xpixr >= 0 && xpixr < ncols && ypixr >= 0 && ypixr < nrows)
{
// Do depth test:
// If z >= z at pixel, discard this point
// Else z at pixel = z
// Note: We use ATOMICMIN here considering the float as an int
// This works since our float values are always positive
// See: https://devtalk.nvidia.com/default/topic/492068/atomicmin-with-float/
// See: http://stereopsis.com/radix.html
long valo = b*is0 + ypixr*is2 + xpixr*is3; // y = row, x = col
atomicMin((unsigned int*)(output_data + 2*is1 + valo), __float_as_int(z));
//fatomicMin(output_data + 2*is1 + valo, z);
}
}
/*
* Refines the projected points. For each input point, this finds if that point has a valid projection:
* i.e. if that point is closest to the camera and visible. If so, this point has its index set.
* If not, that point's values are set to (0,0,0)
*/
__global__ void refineOutput(const float *input_data, float *output_data, float *indexMap_data,
const float fx, const float fy, const float cx, const float cy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3,
const int iMs0, const int iMs1, const int iMs2, const int iMs3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
long vali = b*is0 + r*is2 + c*is3; // Don't add stride along 3D dim
// Check the z-value of the output at the present point. If it is HUGE_VAL, set (x,y,z) to zero
if (*(output_data + 2*is1 + vali) == HUGE_VALF)
{
*(output_data + 2*is1 + vali) = 0;
}
// Get the 3D input point
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
if (z <= 0) return; // No valid projection : Z <= 0
// Do a perspective transform, scale by focal length & add principal point
float xpix = ((x/z) * fx) + cx;// + 1; // Points go from [0, row-1] & [0, col-1] in original data
float ypix = ((y/z) * fy) + cy;// + 1;
// Check projection success / Check limits / Do the depth test
float xpixr = round(xpix); // Rounded off pixel col
float ypixr = round(ypix); // Rounded off pixel row
if (xpixr >= 0 && xpixr < ncols && ypixr >= 0 && ypixr < nrows)
{
// Get the z-value at the pixel corresponding to this input point
long valo = b*is0 + ypixr*is2 + xpixr*is3; // y = row, x = col
float zo = *(output_data + 2*is1 + valo); // z at output
// If the z values do not match, this point is not visible. Else:
// Update the index map (at the output pixel)
if (zo == z)
{
// Set X and Y values to the interpolated pixel values
*(output_data + 0*is1 + valo) = xpix;
*(output_data + 1*is1 + valo) = ypix;
// Set index map value
long valim = b*iMs0 + ypixr*iMs2 + xpixr*iMs3; // y = row, x = col
*(indexMap_data + valim) = vali; // ID of input point for that pixel
}
}
}
/*
* Computes the gradient for the perspective projection + depth test function
*/
__global__ void projectionGradient(const float *input_data, const float *gradOutput_data,
const float *indexMap_data, float *gradInput_data,
const float fx, const float fy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3,
const int iMs0, const int iMs1, const int iMs2, const int iMs3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
// Get the index map value (for that output pixel)
long valim = b*iMs0 + r*iMs2 + c*iMs3; // y = row, x = col
long vali = (long)(*(indexMap_data + valim));
if (vali == -1) return; // In case this point has no corresponding output index, return
// Get input point (from set of all input points)
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
// Get gradOutput value (for that output pixel)
long valgo = b*is0 + r*is2 + c*is3; // y = row, x = col
float gx = *(gradOutput_data + 0*is1 + valgo);
float gy = *(gradOutput_data + 1*is1 + valgo);
float gz = *(gradOutput_data + 2*is1 + valgo);
// Gradient w.r.t x = (fx/z) * gx
// Gradient w.r.t y = (fy/z) * gy
// Gradient w.r.t z = (-x/z^2) * fx * gx + (-y/z^2) * fy * gy + gz
*(gradInput_data + 0*is1 + vali) = (fx/z) * gx;
*(gradInput_data + 1*is1 + vali) = (fy/z) * gy;
*(gradInput_data + 2*is1 + vali) = ((-x/pow(z,2)) * fx * gx) + ((-y/pow(z,2)) * fy * gy) + gz;
}
// =============== FWD PASS ================== //
int Project3DPointsToSubPixelDepth_ForwardLauncher(const float *input, float *indexMap, float *output,
int batchSize, int nrows, int ncols,
float fx, float fy, float cx, float cy,
const long *is, const long *iMs,
hipStream_t stream)
{
// Block and thread structure - we have one large set of points, so use 1d block/threads
long npoints = batchSize * nrows * ncols;
int numBlocks = ceil(npoints * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
// Project the points and run the depth test first (parallelize across number of points)
hipLaunchKernelGGL(( projectPointsAndDepthTest) , dim3(blocks), dim3(threads), 0, stream ,
input, output,
fx, fy, cx, cy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3]);
// Refine the output - only visible points get valid projections. Other points are all zeros.
hipLaunchKernelGGL(( refineOutput) , dim3(blocks), dim3(threads), 0, stream ,
input, output, indexMap,
fx, fy, cx, cy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3],
(int) iMs[0], (int) iMs[1], (int) iMs[2], (int)iMs[3]);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Project3DPointsToSubPixelDepth_ForwardLauncher: %s\n", hipGetErrorString(err));
assert(false);
}
return 1;
}
// =============== BWD PASS ================== //
int Project3DPointsToSubPixelDepth_BackwardLauncher(const float *input, const float *indexMap,
float *gradInput, const float *gradOutput,
int batchSize, int nrows, int ncols,
float fx, float fy, float cx, float cy,
const long *is, const long *iMs,
hipStream_t stream)
{
// Block and thread structure - we have one large set of points, so use 1d block/threads
long npoints = batchSize * nrows * ncols;
int numBlocks = ceil(npoints * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
// Run the kernel (parallelize across number of points)
hipLaunchKernelGGL(( projectionGradient) , dim3(blocks), dim3(threads), 0, stream ,
input, gradOutput, indexMap, gradInput,
fx, fy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3],
(int) iMs[0], (int) iMs[1], (int) iMs[2], (int)iMs[3]);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Project3DPointsToSubPixelDepth_BackwardLauncher: %s\n", hipGetErrorString(err));
assert(false);
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
4e49b1b1fc4075528f39f7955145ff01d5cf7c62.cu
|
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <assert.h>
/// Get the (batch,row,col) indices corresponding to a given thread index (3D point index)
__device__ void getCoordinates_1(const int tid, const int nrows, const int ncols,
int &batch, int &row, int &col)
{
// Get col id
int id = tid;
col = id % ncols;
id = id / ncols;
// Get row id
row = id % nrows;
id = id / nrows;
// Get batch id
batch = id;
}
/*
* Projects points and does the depth test for each of the input points. For each output point:
* (xout,yout)_i = (xpix,ypix)_i for each input point i
* zout_i = z of the closest point that projects onto it (After projection & depth test)
*/
__global__ void projectPointsAndDepthTest(const float *input_data, float *output_data,
const float fx, const float fy, const float cx, const float cy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
// Get the 3D input point
long vali = b*is0 + r*is2 + c*is3; // Don't add stride along 3D dim
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
if (z <= 0) return; // No valid projection : Z <= 0
// Do a perspective transform, scale by focal length & add principal point
float xpix = ((x/z) * fx) + cx;// + 1; // Points go from [0, row-1] & [0, col-1] in original data
float ypix = ((y/z) * fy) + cy;// + 1;
// Check projection success / Check limits / Do the depth test
float xpixr = round(xpix); // Rounded off pixel col
float ypixr = round(ypix); // Rounded off pixel row
if (xpixr >= 0 && xpixr < ncols && ypixr >= 0 && ypixr < nrows)
{
// Do depth test:
// If z >= z at pixel, discard this point
// Else z at pixel = z
// Note: We use ATOMICMIN here considering the float as an int
// This works since our float values are always positive
// See: https://devtalk.nvidia.com/default/topic/492068/atomicmin-with-float/
// See: http://stereopsis.com/radix.html
long valo = b*is0 + ypixr*is2 + xpixr*is3; // y = row, x = col
atomicMin((unsigned int*)(output_data + 2*is1 + valo), __float_as_int(z));
//fatomicMin(output_data + 2*is1 + valo, z);
}
}
/*
* Refines the projected points. For each input point, this finds if that point has a valid projection:
* i.e. if that point is closest to the camera and visible. If so, this point has its index set.
* If not, that point's values are set to (0,0,0)
*/
__global__ void refineOutput(const float *input_data, float *output_data, float *indexMap_data,
const float fx, const float fy, const float cx, const float cy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3,
const int iMs0, const int iMs1, const int iMs2, const int iMs3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
long vali = b*is0 + r*is2 + c*is3; // Don't add stride along 3D dim
// Check the z-value of the output at the present point. If it is HUGE_VAL, set (x,y,z) to zero
if (*(output_data + 2*is1 + vali) == HUGE_VALF)
{
*(output_data + 2*is1 + vali) = 0;
}
// Get the 3D input point
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
if (z <= 0) return; // No valid projection : Z <= 0
// Do a perspective transform, scale by focal length & add principal point
float xpix = ((x/z) * fx) + cx;// + 1; // Points go from [0, row-1] & [0, col-1] in original data
float ypix = ((y/z) * fy) + cy;// + 1;
// Check projection success / Check limits / Do the depth test
float xpixr = round(xpix); // Rounded off pixel col
float ypixr = round(ypix); // Rounded off pixel row
if (xpixr >= 0 && xpixr < ncols && ypixr >= 0 && ypixr < nrows)
{
// Get the z-value at the pixel corresponding to this input point
long valo = b*is0 + ypixr*is2 + xpixr*is3; // y = row, x = col
float zo = *(output_data + 2*is1 + valo); // z at output
// If the z values do not match, this point is not visible. Else:
// Update the index map (at the output pixel)
if (zo == z)
{
// Set X and Y values to the interpolated pixel values
*(output_data + 0*is1 + valo) = xpix;
*(output_data + 1*is1 + valo) = ypix;
// Set index map value
long valim = b*iMs0 + ypixr*iMs2 + xpixr*iMs3; // y = row, x = col
*(indexMap_data + valim) = vali; // ID of input point for that pixel
}
}
}
/*
* Computes the gradient for the perspective projection + depth test function
*/
__global__ void projectionGradient(const float *input_data, const float *gradOutput_data,
const float *indexMap_data, float *gradInput_data,
const float fx, const float fy,
const int batchSize, const int nrows, const int ncols, const int npoints,
const int is0, const int is1, const int is2, const int is3,
const int iMs0, const int iMs1, const int iMs2, const int iMs3)
{
// Get the index of the point
int id = blockIdx.x * blockDim.x + threadIdx.x; // Since they are 1D only
if (id >= npoints) return;
// Get the batch, row and column indices
int b,r,c;
getCoordinates_1(id, nrows, ncols, b, r, c);
// Get the index map value (for that output pixel)
long valim = b*iMs0 + r*iMs2 + c*iMs3; // y = row, x = col
long vali = (long)(*(indexMap_data + valim));
if (vali == -1) return; // In case this point has no corresponding output index, return
// Get input point (from set of all input points)
float x = *(input_data + 0*is1 + vali);
float y = *(input_data + 1*is1 + vali);
float z = *(input_data + 2*is1 + vali);
// Get gradOutput value (for that output pixel)
long valgo = b*is0 + r*is2 + c*is3; // y = row, x = col
float gx = *(gradOutput_data + 0*is1 + valgo);
float gy = *(gradOutput_data + 1*is1 + valgo);
float gz = *(gradOutput_data + 2*is1 + valgo);
// Gradient w.r.t x = (fx/z) * gx
// Gradient w.r.t y = (fy/z) * gy
// Gradient w.r.t z = (-x/z^2) * fx * gx + (-y/z^2) * fy * gy + gz
*(gradInput_data + 0*is1 + vali) = (fx/z) * gx;
*(gradInput_data + 1*is1 + vali) = (fy/z) * gy;
*(gradInput_data + 2*is1 + vali) = ((-x/pow(z,2)) * fx * gx) + ((-y/pow(z,2)) * fy * gy) + gz;
}
// =============== FWD PASS ================== //
int Project3DPointsToSubPixelDepth_ForwardLauncher(const float *input, float *indexMap, float *output,
int batchSize, int nrows, int ncols,
float fx, float fy, float cx, float cy,
const long *is, const long *iMs,
cudaStream_t stream)
{
// Block and thread structure - we have one large set of points, so use 1d block/threads
long npoints = batchSize * nrows * ncols;
int numBlocks = ceil(npoints * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
// Project the points and run the depth test first (parallelize across number of points)
projectPointsAndDepthTest <<< blocks, threads, 0, stream >>>(
input, output,
fx, fy, cx, cy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3]);
// Refine the output - only visible points get valid projections. Other points are all zeros.
refineOutput <<< blocks, threads, 0, stream >>>(
input, output, indexMap,
fx, fy, cx, cy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3],
(int) iMs[0], (int) iMs[1], (int) iMs[2], (int)iMs[3]);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Project3DPointsToSubPixelDepth_ForwardLauncher: %s\n", cudaGetErrorString(err));
assert(false);
}
return 1;
}
// =============== BWD PASS ================== //
int Project3DPointsToSubPixelDepth_BackwardLauncher(const float *input, const float *indexMap,
float *gradInput, const float *gradOutput,
int batchSize, int nrows, int ncols,
float fx, float fy, float cx, float cy,
const long *is, const long *iMs,
cudaStream_t stream)
{
// Block and thread structure - we have one large set of points, so use 1d block/threads
long npoints = batchSize * nrows * ncols;
int numBlocks = ceil(npoints * (1.0/256));
dim3 blocks(numBlocks);
dim3 threads(256);
// Run the kernel (parallelize across number of points)
projectionGradient <<< blocks, threads, 0, stream >>>(
input, gradOutput, indexMap, gradInput,
fx, fy,
batchSize, nrows, ncols, (int)npoints,
(int) is[0], (int) is[1], (int) is[2], (int)is[3],
(int) iMs[0], (int) iMs[1], (int) iMs[2], (int)iMs[3]);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Project3DPointsToSubPixelDepth_BackwardLauncher: %s\n", cudaGetErrorString(err));
assert(false);
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
06e896fc8a8a0237045d125e0df7c298909549d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
void print_matrix(float *A,int m,int n)
{
for(int i =0;i<m;i++)
{
for(int j=0;j<n;j++)
printf("%.1f ",A[i*n+j]);
printf("\n");
}
}
__global__ void swapReflect(float *input, float *output, int M, int N)
{
int j = threadIdx.x;
for(int i=0; i<M; i++)
{
if(j%2 == 0)
{
output[i*N+j] = input[i*N+j+1];
output[i*N+j+1] = input[i*N+j];
}
}
__syncthreads();
for(int i = 0; i<j; i++)
{
int val = output[j*N + i];
output[j*N + i] = output[i*N + j];
output[i*N + j] = val;
}
}
int main(void)
{
hipError_t err = hipSuccess;
int t; // No of test Cases
scanf("%d", &t);
while(t--)
{
int m, n;
scanf("%d %d", &m, &n);
size_t size = m*n * sizeof(float);
//Allocate host input
float *h_input = (float*)malloc(size);
//Allocate host output
float *h_output = (float*)malloc(size);
// Verify that allocations succeeded
if (h_input == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input
for (int i = 0; i < n*m; ++i)
{
scanf("%f",&h_input[i]);
}
float *d_input = NULL, *d_output = NULL;
//Allocate device input
hipMalloc((void**)&d_input, size);
//Allocate device output
hipMalloc((void**)&d_output, size);
//Copy data from host to device
hipMemcpy(d_input, h_input, size, hipMemcpyHostToDevice);
hipMemcpy(d_output, h_output, size, hipMemcpyHostToDevice);
dim3 grid(1, 1, 1);
dim3 block(n, 1, 1);
hipLaunchKernelGGL(( swapReflect), dim3(grid), dim3(block), 0, 0, d_input, d_output, m, n);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy output of device to host
hipMemcpy(h_output, d_output, size, hipMemcpyDeviceToHost);
print_matrix(h_output, m, n);
}
return 0;
}
|
06e896fc8a8a0237045d125e0df7c298909549d8.cu
|
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
void print_matrix(float *A,int m,int n)
{
for(int i =0;i<m;i++)
{
for(int j=0;j<n;j++)
printf("%.1f ",A[i*n+j]);
printf("\n");
}
}
__global__ void swapReflect(float *input, float *output, int M, int N)
{
int j = threadIdx.x;
for(int i=0; i<M; i++)
{
if(j%2 == 0)
{
output[i*N+j] = input[i*N+j+1];
output[i*N+j+1] = input[i*N+j];
}
}
__syncthreads();
for(int i = 0; i<j; i++)
{
int val = output[j*N + i];
output[j*N + i] = output[i*N + j];
output[i*N + j] = val;
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
int t; // No of test Cases
scanf("%d", &t);
while(t--)
{
int m, n;
scanf("%d %d", &m, &n);
size_t size = m*n * sizeof(float);
//Allocate host input
float *h_input = (float*)malloc(size);
//Allocate host output
float *h_output = (float*)malloc(size);
// Verify that allocations succeeded
if (h_input == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input
for (int i = 0; i < n*m; ++i)
{
scanf("%f",&h_input[i]);
}
float *d_input = NULL, *d_output = NULL;
//Allocate device input
cudaMalloc((void**)&d_input, size);
//Allocate device output
cudaMalloc((void**)&d_output, size);
//Copy data from host to device
cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, h_output, size, cudaMemcpyHostToDevice);
dim3 grid(1, 1, 1);
dim3 block(n, 1, 1);
swapReflect<<<grid, block>>>(d_input, d_output, m, n);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy output of device to host
cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
print_matrix(h_output, m, n);
}
return 0;
}
|
0e9fecc8e396fa0f7a3edce945467a88d4fd4392.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MandelbrotExplorerCuda.cuh"
__global__
void cudaFractalLoop(int *buffer, int iterations, double scaleX, double scaleY, double offsetX, double offsetY) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int it = 0;
double coordX = (double)(col) / scaleX + offsetX;
double coordY = (double)(row) / scaleY + offsetY;
double x = 0.0f;
double y = 0.0f;
double xtemp;
while (x * x + y * y <= 4 && it < iterations)
{
xtemp = x * x - y * y + coordX;
y = 2 * x * y + coordY;
x = xtemp;
it++;
}
int index = row * WIDTH + col;
buffer[index] = it;
}
__global__
void cudaJuliaLoop(int* buffer, int iterations, double scaleX, double scaleY, double offsetX, double offsetY) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int it = 0;
double coordX = (double)(col) / scaleX + offsetX;
double coordY = (double)(row) / scaleY + offsetY;
double x = 0.0f;
double y = 0.0f;
double xtemp;
while (coordX * coordX + coordY + coordY < 4 && it < iterations) {
xtemp = coordX * coordX - coordY * coordY;
coordY = 2 * coordX * coordY + 0.58;
coordX = xtemp + 0.282;
it++;
}
int index = row * WIDTH + col;
buffer[index] = it;
}
void MandelbrotExplorer::cudaSimpleFractal(int tlX, int tlY, int brX, int brY, double f_tlX, double f_tlY, double f_brX, double f_brY, int iterations)
{
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
hipLaunchKernelGGL(( cudaFractalLoop), dim3(grid_size), dim3(block_size), 0, 0, m_pFractal, iterations, vScale.x, vScale.y, vOffset.x, vOffset.y);
hipDeviceSynchronize();
}
void MandelbrotExplorer::cudaSimpleJulia(int tlX, int tlY, int brX, int brY, double f_tlX, double f_tlY, double f_brX, double f_brY, int iterations)
{
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
cudaJuliaLoop << <grid_size, block_size >> > (m_pFractal, iterations, vScale.x, vScale.y, vOffset.x, vOffset.y);
hipDeviceSynchronize();
}
bool MandelbrotExplorer::OnUserCreate()
{
hipMallocManaged(&m_pFractal, size_t(ScreenWidth()) * size_t(ScreenHeight()) * sizeof(int));
m_pVector = new std::vector<std::vector<int>> (ScreenWidth(), std::vector<int>(ScreenHeight()));
for (int i = 0; i < ScreenWidth(); i++) {
for (int j = 0; j < ScreenHeight(); j++) {
m_pVector->at(i).at(j) = 0;
}
}
return true;
}
bool MandelbrotExplorer::OnUserDestroy()
{
hipFree(m_pFractal);
return true;
}
bool MandelbrotExplorer::OnUserUpdate(float fElapsedTime)
{
olc::vd2d vMouse = { (double)GetMouseX(), (double)GetMouseY() };
if (GetMouse(0).bPressed)
{
vStartPan = vMouse;
}
if (GetMouse(0).bHeld)
{
vOffset -= (vMouse - vStartPan) / vScale;
vStartPan = vMouse;
}
olc::vd2d vMouseBeforeZoom;
ScreenToWorld(vMouse, vMouseBeforeZoom);
if (GetKey(olc::Key::Q).bHeld || GetMouseWheel() > 0) vScale *= 1.1;
if (GetKey(olc::Key::A).bHeld || GetMouseWheel() < 0) vScale *= 0.9;
if (GetKey(olc::Key::K).bPressed) m_bDebug = !m_bDebug;
if (GetKey(olc::Key::J).bPressed) m_Set = 1;
if (GetKey(olc::Key::M).bPressed) m_Set = 0;
if (GetKey(olc::Key::K0).bPressed) m_Mode = 0;
if (GetKey(olc::Key::K1).bPressed) m_Mode = 1;
if (GetKey(olc::Key::K2).bPressed) m_Mode = 2;
if (GetKey(olc::Key::K7).bPressed) m_drawMode = 0;
if (GetKey(olc::Key::K8).bPressed) m_drawMode = 1;
if (GetKey(olc::Key::K9).bPressed) m_drawMode = 2;
if (GetKey(olc::UP).bPressed) m_Iterations += 64;
if (GetKey(olc::DOWN).bPressed) m_Iterations -= 64;
if (m_Iterations < 64) m_Iterations = 64;
olc::vd2d vMouseAfterZoom;
ScreenToWorld(vMouse, vMouseAfterZoom);
vOffset += (vMouseBeforeZoom - vMouseAfterZoom);
olc::vi2d c_pixTopLeft = { 0,0 };
olc::vi2d c_pixBotRight = { ScreenWidth(), ScreenHeight() };
olc::vd2d c_fracTopLeft = { -2.0, -1.0 };
olc::vd2d c_fracBotRight = { 1.0, 1.0 };
ScreenToWorld(c_pixTopLeft, c_fracTopLeft);
ScreenToWorld(c_pixBotRight, c_fracBotRight);
//Start timer
auto tp1 = std::chrono::high_resolution_clock::now();
switch (m_Set) {
case 0:
switch (m_Mode) {
case 0:
simpleFractal(c_pixTopLeft, c_pixBotRight, c_fracTopLeft, c_fracBotRight, m_Iterations);
break;
case 1:
fractalWithThreads(c_pixTopLeft, c_pixBotRight, c_fracTopLeft, c_fracBotRight, m_Iterations);
break;
case 2:
int tlX, tlY, brX, brY;
double f_tlX, f_tlY, f_brX, f_brY;
tlX = c_pixTopLeft.x;
tlY = c_pixTopLeft.y;
brX = c_pixBotRight.x;
brY = c_pixBotRight.y;
f_tlX = c_fracTopLeft.x;
f_tlY = c_fracTopLeft.y;
f_brX = c_fracBotRight.x;
f_brY = c_fracBotRight.y;
cudaSimpleFractal(tlX, tlY, brX, brY, f_tlX, f_tlY, f_brX, f_brY, m_Iterations);
break;
}
break;
case 1:
int tlX, tlY, brX, brY;
double f_tlX, f_tlY, f_brX, f_brY;
tlX = c_pixTopLeft.x;
tlY = c_pixTopLeft.y;
brX = c_pixBotRight.x;
brY = c_pixBotRight.y;
f_tlX = c_fracTopLeft.x;
f_tlY = c_fracTopLeft.y;
f_brX = c_fracBotRight.x;
f_brY = c_fracBotRight.y;
cudaSimpleJulia(tlX, tlY, brX, brY, f_tlX, f_tlY, f_brX, f_brY, m_Iterations);
break;
}
for (int x = 0; x < ScreenWidth(); x++)
{
for (int y = 0; y < ScreenHeight(); y++)
{
float a = 0.1f;
float n;
if (m_Mode != 2) {
n = m_pVector->at(x).at(y);
}
else {
n = (float)m_pFractal[y * ScreenWidth() + x];
}
switch (m_drawMode) {
case 0:
Draw(x, y, olc::PixelF(0.5f * sin(a * n) + 0.5f, 0.5f * sin(a * n + 2.094f) + 0.5f, 0.5f * sin(a * n + 4.188f) + 0.5f));
break;
case 1:
Draw(x, y, olc::PixelF(255 - n, 255 - n, 255 - n));
break;
case 2:
break;
Draw(x, y, olc::PixelF(n, n, n));
}
}
}
auto tp2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedTime = tp2 - tp1;
if (m_bDebug) {
DrawString(0, 20, "Time Taken: " + std::to_string(elapsedTime.count()) + "s", olc::WHITE, 2);
DrawString(0, 70, "Iterations: " + std::to_string(m_Iterations));
DrawString(0, 80, "Top Left Fractal: " + std::to_string(c_fracTopLeft.x) + " - " + std::to_string(c_fracTopLeft.y));
DrawString(0, 90, "Bot Right Fractal: " + std::to_string(c_fracBotRight.x) + " - " + std::to_string(c_fracBotRight.y));
DrawString(0, 100, "Top Left Fractal: " + std::to_string(c_pixTopLeft.x) + " - " + std::to_string(c_pixTopLeft.y));
DrawString(0, 110, "Bot Right Fractal: " + std::to_string(c_pixBotRight.x) + " - " + std::to_string(c_pixBotRight.y));
if (m_Set == 0) {
DrawString(0, 50, "Mandelbrot Set ");
if (m_Mode == 0) {
DrawString(0, 60, "Mode: Simple");
}
else if (m_Mode == 1) {
DrawString(0, 60, "Mode: Threads");
}
else {
DrawString(0, 60, "Mode: Cuda");
}
}
else {
DrawString(0, 60, "Julia Set ");
}
}
return true;
}
void MandelbrotExplorer::simpleFractal(const olc::vi2d& t_pixTopLeft, const olc::vi2d& t_pixBotRight, const olc::vd2d& t_fracTopLeft, const olc::vd2d& t_fracBotRight, const int& iterations)
{
double x0 = (t_fracBotRight.x - t_fracTopLeft.x) / (double(t_pixBotRight.x) - double(t_pixTopLeft.x));
double y0 = (t_fracBotRight.y - t_fracTopLeft.y) / (double(t_pixBotRight.y) - double(t_pixTopLeft.y));
double coordX = t_fracTopLeft.x, coordY = t_fracTopLeft.y;
double x = 0, y = 0, cIteration = 0, xtemp;
for (int i = t_pixTopLeft.x; i < t_pixBotRight.x; i++) {
coordY = t_fracTopLeft.y;
for (int j = t_pixTopLeft.y; j < t_pixBotRight.y; j++) {
x = 0;
y = 0;
cIteration = 0;
while (x * x + y * y <= 4 && cIteration < iterations)
{
xtemp = x * x - y * y + coordX;
y = 2 * x * y + coordY;
x = xtemp;
cIteration += 1;
}
m_pVector->at(i).at(j) = cIteration;
coordY += y0;
}
coordX += x0;
}
}
void MandelbrotExplorer::fractalWithThreads(const olc::vi2d& t_pixTopLeft, const olc::vi2d& t_pixBotRight, const olc::vd2d& t_fracTopLeft, const olc::vd2d& t_fracBotRight, const int& iterations)
{
const int maxThreads = 32;
std::thread threads[maxThreads];
int widthFactor = (t_pixBotRight.x - t_pixTopLeft.x) / maxThreads;
double fracWidthFactor = (t_fracBotRight.x - t_fracTopLeft.x) / double(maxThreads);
for (int i = 0; i < maxThreads; i++) {
threads[i] = std::thread(&MandelbrotExplorer::simpleFractal, this,
olc::vi2d(t_pixTopLeft.x + widthFactor * (i), t_pixTopLeft.y),
olc::vi2d(t_pixTopLeft.x + widthFactor * (i + 1), t_pixBotRight.y),
olc::vd2d(t_fracTopLeft.x + fracWidthFactor * double(i), t_fracTopLeft.y),
olc::vd2d(t_fracTopLeft.x + fracWidthFactor * double(i + 1), t_fracBotRight.y),
iterations);
}
for (int i = 0; i < maxThreads; i++) {
threads[i].join();
}
}
|
0e9fecc8e396fa0f7a3edce945467a88d4fd4392.cu
|
#include "MandelbrotExplorerCuda.cuh"
__global__
void cudaFractalLoop(int *buffer, int iterations, double scaleX, double scaleY, double offsetX, double offsetY) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int it = 0;
double coordX = (double)(col) / scaleX + offsetX;
double coordY = (double)(row) / scaleY + offsetY;
double x = 0.0f;
double y = 0.0f;
double xtemp;
while (x * x + y * y <= 4 && it < iterations)
{
xtemp = x * x - y * y + coordX;
y = 2 * x * y + coordY;
x = xtemp;
it++;
}
int index = row * WIDTH + col;
buffer[index] = it;
}
__global__
void cudaJuliaLoop(int* buffer, int iterations, double scaleX, double scaleY, double offsetX, double offsetY) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int it = 0;
double coordX = (double)(col) / scaleX + offsetX;
double coordY = (double)(row) / scaleY + offsetY;
double x = 0.0f;
double y = 0.0f;
double xtemp;
while (coordX * coordX + coordY + coordY < 4 && it < iterations) {
xtemp = coordX * coordX - coordY * coordY;
coordY = 2 * coordX * coordY + 0.58;
coordX = xtemp + 0.282;
it++;
}
int index = row * WIDTH + col;
buffer[index] = it;
}
void MandelbrotExplorer::cudaSimpleFractal(int tlX, int tlY, int brX, int brY, double f_tlX, double f_tlY, double f_brX, double f_brY, int iterations)
{
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
cudaFractalLoop<<<grid_size, block_size>>>(m_pFractal, iterations, vScale.x, vScale.y, vOffset.x, vOffset.y);
cudaDeviceSynchronize();
}
void MandelbrotExplorer::cudaSimpleJulia(int tlX, int tlY, int brX, int brY, double f_tlX, double f_tlY, double f_brX, double f_brY, int iterations)
{
dim3 block_size(16, 16);
dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y);
cudaJuliaLoop << <grid_size, block_size >> > (m_pFractal, iterations, vScale.x, vScale.y, vOffset.x, vOffset.y);
cudaDeviceSynchronize();
}
bool MandelbrotExplorer::OnUserCreate()
{
cudaMallocManaged(&m_pFractal, size_t(ScreenWidth()) * size_t(ScreenHeight()) * sizeof(int));
m_pVector = new std::vector<std::vector<int>> (ScreenWidth(), std::vector<int>(ScreenHeight()));
for (int i = 0; i < ScreenWidth(); i++) {
for (int j = 0; j < ScreenHeight(); j++) {
m_pVector->at(i).at(j) = 0;
}
}
return true;
}
bool MandelbrotExplorer::OnUserDestroy()
{
cudaFree(m_pFractal);
return true;
}
bool MandelbrotExplorer::OnUserUpdate(float fElapsedTime)
{
olc::vd2d vMouse = { (double)GetMouseX(), (double)GetMouseY() };
if (GetMouse(0).bPressed)
{
vStartPan = vMouse;
}
if (GetMouse(0).bHeld)
{
vOffset -= (vMouse - vStartPan) / vScale;
vStartPan = vMouse;
}
olc::vd2d vMouseBeforeZoom;
ScreenToWorld(vMouse, vMouseBeforeZoom);
if (GetKey(olc::Key::Q).bHeld || GetMouseWheel() > 0) vScale *= 1.1;
if (GetKey(olc::Key::A).bHeld || GetMouseWheel() < 0) vScale *= 0.9;
if (GetKey(olc::Key::K).bPressed) m_bDebug = !m_bDebug;
if (GetKey(olc::Key::J).bPressed) m_Set = 1;
if (GetKey(olc::Key::M).bPressed) m_Set = 0;
if (GetKey(olc::Key::K0).bPressed) m_Mode = 0;
if (GetKey(olc::Key::K1).bPressed) m_Mode = 1;
if (GetKey(olc::Key::K2).bPressed) m_Mode = 2;
if (GetKey(olc::Key::K7).bPressed) m_drawMode = 0;
if (GetKey(olc::Key::K8).bPressed) m_drawMode = 1;
if (GetKey(olc::Key::K9).bPressed) m_drawMode = 2;
if (GetKey(olc::UP).bPressed) m_Iterations += 64;
if (GetKey(olc::DOWN).bPressed) m_Iterations -= 64;
if (m_Iterations < 64) m_Iterations = 64;
olc::vd2d vMouseAfterZoom;
ScreenToWorld(vMouse, vMouseAfterZoom);
vOffset += (vMouseBeforeZoom - vMouseAfterZoom);
olc::vi2d c_pixTopLeft = { 0,0 };
olc::vi2d c_pixBotRight = { ScreenWidth(), ScreenHeight() };
olc::vd2d c_fracTopLeft = { -2.0, -1.0 };
olc::vd2d c_fracBotRight = { 1.0, 1.0 };
ScreenToWorld(c_pixTopLeft, c_fracTopLeft);
ScreenToWorld(c_pixBotRight, c_fracBotRight);
//Start timer
auto tp1 = std::chrono::high_resolution_clock::now();
switch (m_Set) {
case 0:
switch (m_Mode) {
case 0:
simpleFractal(c_pixTopLeft, c_pixBotRight, c_fracTopLeft, c_fracBotRight, m_Iterations);
break;
case 1:
fractalWithThreads(c_pixTopLeft, c_pixBotRight, c_fracTopLeft, c_fracBotRight, m_Iterations);
break;
case 2:
int tlX, tlY, brX, brY;
double f_tlX, f_tlY, f_brX, f_brY;
tlX = c_pixTopLeft.x;
tlY = c_pixTopLeft.y;
brX = c_pixBotRight.x;
brY = c_pixBotRight.y;
f_tlX = c_fracTopLeft.x;
f_tlY = c_fracTopLeft.y;
f_brX = c_fracBotRight.x;
f_brY = c_fracBotRight.y;
cudaSimpleFractal(tlX, tlY, brX, brY, f_tlX, f_tlY, f_brX, f_brY, m_Iterations);
break;
}
break;
case 1:
int tlX, tlY, brX, brY;
double f_tlX, f_tlY, f_brX, f_brY;
tlX = c_pixTopLeft.x;
tlY = c_pixTopLeft.y;
brX = c_pixBotRight.x;
brY = c_pixBotRight.y;
f_tlX = c_fracTopLeft.x;
f_tlY = c_fracTopLeft.y;
f_brX = c_fracBotRight.x;
f_brY = c_fracBotRight.y;
cudaSimpleJulia(tlX, tlY, brX, brY, f_tlX, f_tlY, f_brX, f_brY, m_Iterations);
break;
}
for (int x = 0; x < ScreenWidth(); x++)
{
for (int y = 0; y < ScreenHeight(); y++)
{
float a = 0.1f;
float n;
if (m_Mode != 2) {
n = m_pVector->at(x).at(y);
}
else {
n = (float)m_pFractal[y * ScreenWidth() + x];
}
switch (m_drawMode) {
case 0:
Draw(x, y, olc::PixelF(0.5f * sin(a * n) + 0.5f, 0.5f * sin(a * n + 2.094f) + 0.5f, 0.5f * sin(a * n + 4.188f) + 0.5f));
break;
case 1:
Draw(x, y, olc::PixelF(255 - n, 255 - n, 255 - n));
break;
case 2:
break;
Draw(x, y, olc::PixelF(n, n, n));
}
}
}
auto tp2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedTime = tp2 - tp1;
if (m_bDebug) {
DrawString(0, 20, "Time Taken: " + std::to_string(elapsedTime.count()) + "s", olc::WHITE, 2);
DrawString(0, 70, "Iterations: " + std::to_string(m_Iterations));
DrawString(0, 80, "Top Left Fractal: " + std::to_string(c_fracTopLeft.x) + " - " + std::to_string(c_fracTopLeft.y));
DrawString(0, 90, "Bot Right Fractal: " + std::to_string(c_fracBotRight.x) + " - " + std::to_string(c_fracBotRight.y));
DrawString(0, 100, "Top Left Fractal: " + std::to_string(c_pixTopLeft.x) + " - " + std::to_string(c_pixTopLeft.y));
DrawString(0, 110, "Bot Right Fractal: " + std::to_string(c_pixBotRight.x) + " - " + std::to_string(c_pixBotRight.y));
if (m_Set == 0) {
DrawString(0, 50, "Mandelbrot Set ");
if (m_Mode == 0) {
DrawString(0, 60, "Mode: Simple");
}
else if (m_Mode == 1) {
DrawString(0, 60, "Mode: Threads");
}
else {
DrawString(0, 60, "Mode: Cuda");
}
}
else {
DrawString(0, 60, "Julia Set ");
}
}
return true;
}
void MandelbrotExplorer::simpleFractal(const olc::vi2d& t_pixTopLeft, const olc::vi2d& t_pixBotRight, const olc::vd2d& t_fracTopLeft, const olc::vd2d& t_fracBotRight, const int& iterations)
{
double x0 = (t_fracBotRight.x - t_fracTopLeft.x) / (double(t_pixBotRight.x) - double(t_pixTopLeft.x));
double y0 = (t_fracBotRight.y - t_fracTopLeft.y) / (double(t_pixBotRight.y) - double(t_pixTopLeft.y));
double coordX = t_fracTopLeft.x, coordY = t_fracTopLeft.y;
double x = 0, y = 0, cIteration = 0, xtemp;
for (int i = t_pixTopLeft.x; i < t_pixBotRight.x; i++) {
coordY = t_fracTopLeft.y;
for (int j = t_pixTopLeft.y; j < t_pixBotRight.y; j++) {
x = 0;
y = 0;
cIteration = 0;
while (x * x + y * y <= 4 && cIteration < iterations)
{
xtemp = x * x - y * y + coordX;
y = 2 * x * y + coordY;
x = xtemp;
cIteration += 1;
}
m_pVector->at(i).at(j) = cIteration;
coordY += y0;
}
coordX += x0;
}
}
void MandelbrotExplorer::fractalWithThreads(const olc::vi2d& t_pixTopLeft, const olc::vi2d& t_pixBotRight, const olc::vd2d& t_fracTopLeft, const olc::vd2d& t_fracBotRight, const int& iterations)
{
const int maxThreads = 32;
std::thread threads[maxThreads];
int widthFactor = (t_pixBotRight.x - t_pixTopLeft.x) / maxThreads;
double fracWidthFactor = (t_fracBotRight.x - t_fracTopLeft.x) / double(maxThreads);
for (int i = 0; i < maxThreads; i++) {
threads[i] = std::thread(&MandelbrotExplorer::simpleFractal, this,
olc::vi2d(t_pixTopLeft.x + widthFactor * (i), t_pixTopLeft.y),
olc::vi2d(t_pixTopLeft.x + widthFactor * (i + 1), t_pixBotRight.y),
olc::vd2d(t_fracTopLeft.x + fracWidthFactor * double(i), t_fracTopLeft.y),
olc::vd2d(t_fracTopLeft.x + fracWidthFactor * double(i + 1), t_fracBotRight.y),
iterations);
}
for (int i = 0; i < maxThreads; i++) {
threads[i].join();
}
}
|
35325386d97a57a51a258c406b2529308e13aaff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[28];
template <typename T>
dim3 getOptimalDimensions(int n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if(n % num_threads) ++num_blocks;
return dim3(num_blocks,num_threads, (num_threads * sizeof(T)) + (attributes.sharedSizeBytes < 1024 ? 1024 : attributes.sharedSizeBytes));
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
//printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = shape::MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( indexReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 4, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( indexReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( broadcastDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, int n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformStridedDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformDoubleIndex) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( reduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( reduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( reduce3Double), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( reduce3Double), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Double), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
int n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
int n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarShapeInformation = new ScalarInfo<double>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarShapeInformation->getDevicePointer(),
scalarShapeInformation->getDeviceShapeInfo(),
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarShapeInformation->getFinalResultFromDevice();
delete scalarShapeInformation;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarShapeInformation = new ScalarInfo<double>(*stream);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarShapeInformation;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
int n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[15], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
hipLaunchKernelGGL(( indexReduceFloat), dim3(1),dim3(launchDims.y), launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( indexReduceFloat), dim3(1),dim3(launchDims.y),launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( broadcastFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, int n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformStridedFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformFloatIndex), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
int n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[6], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloat), dim3(launchDims.x), dim3(launchDims.y),launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarShapeInformation = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarShapeInformation->getDevicePointer(),
scalarShapeInformation->getDeviceShapeInfo(),
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = scalarShapeInformation->getFinalResultFromDevice();
delete scalarShapeInformation;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarShapeInformation = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
delete scalarShapeInformation;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
int n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
printf("Cuda no op atm\n");
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
printf("Cuda no op atm\n");
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipGetDeviceProperties(&deviceProperties[i], i);
}
hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, float *params, float *result,int *resultShapeInfo) = transformFloat;
hipFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, int n, float *dy, int incy, float *params, float *result,int resultStride) = transformFloat;
hipFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
hipFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo,float *params, float *result,int *resultShapeInfo) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, int n,float dx, float *dy, int incy, float *params, float *result,int resultStride) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
hipFuncGetAttributes(&funcAttributes[7], reduce3Float);
hipFuncGetAttributes(&funcAttributes[8], reduceFloat);
hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
hipFuncGetAttributes(&funcAttributes[12], broadcastFloat);
hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, double *params, double *result,int *resultShapeInfo) = transformDouble;
hipFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, int n, double *dy, int incy, double *params, double *result,int resultStride) = transformDouble;
hipFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
hipFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo,double *params, double *result,int *resultShapeInfo) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, int n,double dx, double *dy, int incy, double *params, double *result,int resultStride) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
hipFuncGetAttributes(&funcAttributes[21], reduce3Double);
hipFuncGetAttributes(&funcAttributes[22], reduceDouble);
hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
hipFuncGetAttributes(&funcAttributes[26], broadcastDouble);
hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(long memorySize, int flags) {
Nd4jPointer pointer;
hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocMapped |hipHostMallocPortable );
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(long memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
Nd4jPointer NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
Nd4jPointer NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
|
35325386d97a57a51a258c406b2529308e13aaff.cu
|
#include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[28];
template <typename T>
dim3 getOptimalDimensions(int n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if(n % num_threads) ++num_blocks;
return dim3(num_blocks,num_threads, (num_threads * sizeof(T)) + (attributes.sharedSizeBytes < 1024 ? 1024 : attributes.sharedSizeBytes));
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
//printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = shape::MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
indexReduceDouble<<<1,launchDims.y,launchDims.z * 4, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
indexReduceDouble<<<1,launchDims.y,launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[(int) extraPointers[2]]);
broadcastDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, int n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformStridedDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>> (
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
reduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
reduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
reduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
reduce3Double<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarInfo = new ScalarInfo<double>(*stream);
reduce3Double<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
reduce3Double<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
int n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[(int) extraPointers[2]]);
scalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[(int) extraPointers[2]]);
scalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
int n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[(int) extraPointers[2]]);
scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarShapeInformation = new ScalarInfo<double>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarShapeInformation->getDevicePointer(),
scalarShapeInformation->getDeviceShapeInfo(),
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarShapeInformation->getFinalResultFromDevice();
delete scalarShapeInformation;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<double> *scalarShapeInformation = new ScalarInfo<double>(*stream);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarShapeInformation;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
int n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[(int) extraPointers[2]]);
transformDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[15], deviceProperties[(int) extraPointers[2]]);
transformDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[(int) extraPointers[2]]);
transformDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
indexReduceFloat<<<1,launchDims.y, launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
indexReduceFloat<<<1,launchDims.y,launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[(int) extraPointers[2]]);
broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, int n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformStridedFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
reduceFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
reduceFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
reduceFloat<<< 1,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarInfo;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarInfo = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
scalarInfo->getDevicePointer(),
scalarInfo->getDeviceShapeInfo(),
scalarInfo->getDimensionDevicePointer(),
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = scalarInfo->getFinalResultFromDevice();
delete scalarInfo;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
int n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[6], deviceProperties[(int) extraPointers[2]]);
scalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
scalarFloat<<<launchDims.x, launchDims.y,launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
int n = shape::length(hostShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[(int) extraPointers[2]]);
scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarShapeInformation = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
scalarShapeInformation->getDevicePointer(),
scalarShapeInformation->getDeviceShapeInfo(),
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = scalarShapeInformation->getFinalResultFromDevice();
delete scalarShapeInformation;
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
ScalarInfo<float> *scalarShapeInformation = new ScalarInfo<float>(*stream);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
scalarShapeInformation->getDimensionDevicePointer(),
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
delete scalarShapeInformation;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
int n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[(int) extraPointers[2]]);
transformFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[(int) extraPointers[2]]);
transformFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[(int) extraPointers[2]]);
transformFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
printf("Cuda no op atm\n");
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
printf("Cuda no op atm\n");
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaGetDeviceProperties(&deviceProperties[i], i);
}
cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, float *params, float *result,int *resultShapeInfo) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, int n, float *dy, int incy, float *params, float *result,int resultStride) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo,float *params, float *result,int *resultShapeInfo) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, int n,float dx, float *dy, int incy, float *params, float *result,int resultStride) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[7], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[8], reduceFloat);
cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
cudaFuncGetAttributes(&funcAttributes[12], broadcastFloat);
cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, double *params, double *result,int *resultShapeInfo) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, int n, double *dy, int incy, double *params, double *result,int resultStride) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo,double *params, double *result,int *resultShapeInfo) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, int n,double dx, double *dy, int incy, double *params, double *result,int resultStride) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[21], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[22], reduceDouble);
cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
cudaFuncGetAttributes(&funcAttributes[26], broadcastDouble);
cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(long memorySize, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocMapped |cudaHostAllocPortable );
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(long memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
Nd4jPointer NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
Nd4jPointer NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
|
3faf123fdf6008b3ea611a3c7402a4f0ebc41b2d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
double a_mux1;
double a_mux2;
double a_mux3;
double a_mux4;
double a_muy1;
double a_muy2;
double a_muy3;
double _t_7_;
double _t_9_;
double a_muy4;
double _t_11_;
double _t_13_;
double _t_1_;
double a_r2;
double _t_10_;
double _t_6_;
double _t_12_;
double _t_14_;
double _t_8_;
double a_muz3;
double a_muz2;
double a_muz4;
double a_muz1;
double _t_15_;
double _t_21_;
double _t_34_;
double _t_47_;
double _t_60_;
double _t_22_;
double _t_35_;
double _t_48_;
double _t_61_;
double _t_53_;
double _t_64_;
double _t_69_;
double _t_56_;
double _t_51_;
double _t_59_;
double _t_72_;
double _t_66_;
double _t_20_;
double _t_27_;
double _t_38_;
double _t_30_;
double _t_40_;
double _t_43_;
double _t_46_;
double _t_25_;
double _t_33_;
double uacc_1kc0jc0ic0;
double uacc_1kp1jc0ic0;
double b_mux1;
double b_mux2;
double b_mux3;
double b_mux4;
double b_muy1;
double b_muy2;
double b_muy3;
double _t_80_;
double _t_82_;
double b_muy4;
double _t_84_;
double _t_86_;
double _t_74_;
double b_r2;
double _t_81_;
double _t_79_;
double _t_83_;
double _t_85_;
double _t_87_;
double b_muz2;
double _t_88_;
double b_muz1;
double b_muz3;
double b_muz4;
double _t_142_;
double _t_134_;
double _t_121_;
double _t_126_;
double _t_129_;
double _t_137_;
double _t_124_;
double _t_132_;
double _t_139_;
double _t_145_;
double _t_107_;
double _t_94_;
double _t_120_;
double _t_93_;
double _t_133_;
double _t_95_;
double _t_108_;
double _t_100_;
double _t_111_;
double _t_103_;
double _t_113_;
double _t_116_;
double _t_119_;
double _t_106_;
double _t_98_;
a_mux1 = -3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux1 += mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 += mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
_t_7_ = 2.0 * a_muy1;
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
_t_9_ = 2.0 * a_muy2;
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
_t_11_ = 2.0 * a_muy3;
_t_13_ = 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j][i] * stry[j];
_t_11_ += 3.0 * la[k][j][i] * stry[j];
_t_13_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_7_ += la[k][j-1][i] * stry[j-1];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_11_ += la[k][j-1][i] * stry[j-1];
_t_9_ += la[k][j+1][i] * stry[j+1];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_1_ = a_mux1 * u_1[k][j][i-2];
_t_1_ -= a_mux1 * u_1[k][j][i];
_t_1_ -= a_mux2 * u_1[k][j][i];
_t_1_ -= a_mux3 * u_1[k][j][i];
_t_1_ -= a_mux4 * u_1[k][j][i];
_t_1_ += a_mux2 * u_1[k][j][i-1];
_t_1_ += a_mux3 * u_1[k][j][i+1];
_t_1_ += a_mux4 * u_1[k][j][i+2];
a_r2 = 1.0 / 6.0 * strx[i] * _t_1_;
_t_10_ = -u_1[k][j][i];
_t_10_ += u_1[k][j-1][i];
_t_6_ = _t_9_ * _t_10_;
_t_12_ = -u_1[k][j][i];
_t_12_ += u_1[k][j+1][i];
_t_6_ += _t_11_ * _t_12_;
_t_14_ = -u_1[k][j][i];
_t_14_ += u_1[k][j+2][i];
_t_6_ += _t_13_ * _t_14_;
_t_8_ = -u_1[k][j][i];
_t_8_ += u_1[k][j-2][i];
_t_6_ += _t_7_ * _t_8_;
a_r2 += 1.0 / 6.0 * stry[j] * _t_6_;
a_muz3 = 3.0 * mu[k][j][i] * strz[k];
a_muz2 = 3.0 * mu[k][j][i] * strz[k];
a_muz4 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz4 += mu[k+1][j][i] * strz[k+1];
a_muz1 += mu[k-1][j][i] * strz[k-1];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k-1][j][i] * strz[k-1];
_t_15_ = -a_muz1 * u_1[k][j][i];
_t_15_ -= a_muz2 * u_1[k][j][i];
_t_15_ += a_muz1 * u_1[k-2][j][i];
_t_15_ += a_muz2 * u_1[k-1][j][i];
a_muz3 += mu[k+2][j][i] * strz[k+2];
_t_15_ -= a_muz3 * u_1[k][j][i];
_t_15_ += a_muz3 * u_1[k+1][j][i];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
_t_15_ -= a_muz4 * u_1[k][j][i];
_t_15_ += a_muz4 * u_1[k+2][j][i];
a_r2 += 1.0 / 6.0 * strz[k] * _t_15_;
_t_21_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_34_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_47_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_60_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_22_ = mu[k][j][i-2] * u_0[k][j-2][i-2];
_t_35_ = la[k][j-2][i] * u_0[k][j-2][i-2];
_t_22_ -= mu[k][j][i+2] * u_0[k][j-2][i+2];
_t_35_ -= la[k][j-2][i] * u_0[k][j-2][i+2];
_t_22_ -= mu[k][j][i-2] * u_0[k][j+2][i-2];
_t_35_ -= la[k][j+2][i] * u_0[k][j+2][i-2];
_t_22_ += mu[k][j][i+2] * u_0[k][j+2][i+2];
_t_35_ += la[k][j+2][i] * u_0[k][j+2][i+2];
_t_48_ = la[k][j-2][i] * u_2[k-2][j-2][i];
_t_61_ = mu[k-2][j][i] * u_2[k-2][j-2][i];
_t_48_ -= la[k][j+2][i] * u_2[k-2][j+2][i];
_t_61_ -= mu[k-2][j][i] * u_2[k-2][j+2][i];
_t_48_ -= la[k][j-2][i] * u_2[k+2][j-2][i];
_t_61_ -= mu[k+2][j][i] * u_2[k+2][j-2][i];
_t_48_ += la[k][j+2][i] * u_2[k+2][j+2][i];
_t_61_ += mu[k+2][j][i] * u_2[k+2][j+2][i];
_t_53_ = -u_2[k+2][j-1][i];
_t_53_ += u_2[k-2][j-1][i];
_t_64_ = -u_2[k-2][j-1][i];
_t_53_ += 8.0 * u_2[k+1][j-1][i];
_t_69_ = 8.0 * -u_2[k+1][j-1][i];
_t_56_ = u_2[k-2][j+1][i];
_t_64_ += u_2[k-2][j+1][i];
_t_61_ += mu[k-2][j][i] * 8.0 * _t_64_;
_t_56_ += 8.0 * u_2[k+1][j+1][i];
_t_69_ += 8.0 * u_2[k+1][j+1][i];
_t_69_ += u_2[k+1][j-2][i];
_t_51_ = u_2[k+1][j-2][i];
_t_69_ -= u_2[k+1][j+2][i];
_t_61_ += 8.0 * mu[k+1][j][i] * _t_69_;
_t_59_ = u_2[k+1][j+2][i];
_t_56_ -= u_2[k+2][j+1][i];
_t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
_t_61_ -= mu[k+2][j][i] * 8.0 * _t_72_;
_t_53_ += 8.0 * -u_2[k-1][j-1][i];
_t_48_ -= 8.0 * la[k][j-1][i] * _t_53_;
_t_66_ = 8.0 * -u_2[k-1][j-1][i];
_t_56_ += 8.0 * -u_2[k-1][j+1][i];
_t_48_ += 8.0 * la[k][j+1][i] * _t_56_;
_t_66_ += 8.0 * u_2[k-1][j+1][i];
_t_51_ += -u_2[k-1][j-2][i];
_t_48_ += la[k][j-2][i] * 8.0 * _t_51_;
_t_66_ += u_2[k-1][j-2][i];
_t_59_ += -u_2[k-1][j+2][i];
_t_48_ -= la[k][j+2][i] * 8.0 * _t_59_;
_t_66_ -= u_2[k-1][j+2][i];
_t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_;
_t_20_ = _t_47_ * _t_48_;
_t_20_ += _t_60_ * _t_61_;
_t_27_ = u_0[k][j-2][i-1];
_t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
_t_35_ += la[k][j-2][i] * 8.0 * _t_38_;
_t_30_ = u_0[k][j-2][i+1];
_t_27_ += 8.0 * -u_0[k][j-1][i-1];
_t_40_ = 8.0 * -u_0[k][j-1][i-1];
_t_30_ += 8.0 * -u_0[k][j-1][i+1];
_t_40_ += 8.0 * u_0[k][j-1][i+1];
_t_27_ += 8.0 * u_0[k][j+1][i-1];
_t_43_ = 8.0 * -u_0[k][j+1][i-1];
_t_30_ += 8.0 * u_0[k][j+1][i+1];
_t_43_ += 8.0 * u_0[k][j+1][i+1];
_t_27_ -= u_0[k][j+2][i-1];
_t_22_ -= 8.0 * mu[k][j][i-1] * _t_27_;
_t_46_ = -u_0[k][j+2][i-1];
_t_30_ -= u_0[k][j+2][i+1];
_t_22_ += 8.0 * mu[k][j][i+1] * _t_30_;
_t_46_ += u_0[k][j+2][i+1];
_t_35_ -= la[k][j+2][i] * 8.0 * _t_46_;
_t_40_ += u_0[k][j-1][i-2];
_t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
_t_22_ += mu[k][j][i-2] * 8.0 * _t_25_;
_t_43_ += u_0[k][j+1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
_t_35_ -= 8.0 * la[k][j-1][i] * _t_40_;
_t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
_t_22_ -= mu[k][j][i+2] * 8.0 * _t_33_;
_t_20_ += _t_21_ * _t_22_;
_t_43_ -= u_0[k][j+1][i+2];
_t_35_ += 8.0 * la[k][j+1][i] * _t_43_;
_t_20_ += _t_34_ * _t_35_;
a_r2 += _t_20_;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * a_r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1kp1jc0ic0 = a1 * uacc_1[k+1][j][i];
b_mux1 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux1 += mu[k+1][j][i-1] * strx[i-1];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = -3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy1 += mu[k+1][j-1][i] * stry[j-1];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
_t_80_ = 2.0 * b_muy1;
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
_t_82_ = 2.0 * b_muy2;
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
_t_84_ = 2.0 * b_muy3;
_t_86_ = 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
_t_82_ += la[k+1][j-2][i] * stry[j-2];
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
_t_80_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_84_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_86_ += la[k+1][j+1][i] * stry[j+1];
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 3.0 * la[k+1][j][i] * stry[j];
_t_86_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_74_ = -b_mux1 * u_1[k+1][j][i];
_t_74_ -= b_mux2 * u_1[k+1][j][i];
_t_74_ -= b_mux3 * u_1[k+1][j][i];
_t_74_ -= b_mux4 * u_1[k+1][j][i];
_t_74_ += b_mux1 * u_1[k+1][j][i-2];
_t_74_ += b_mux2 * u_1[k+1][j][i-1];
_t_74_ += b_mux3 * u_1[k+1][j][i+1];
_t_74_ += b_mux4 * u_1[k+1][j][i+2];
b_r2 = 1.0 / 6.0 * strx[i] * _t_74_;
_t_81_ = -u_1[k+1][j][i];
_t_81_ += u_1[k+1][j-2][i];
_t_79_ = _t_80_ * _t_81_;
_t_83_ = -u_1[k+1][j][i];
_t_83_ += u_1[k+1][j-1][i];
_t_79_ += _t_82_ * _t_83_;
_t_85_ = -u_1[k+1][j][i];
_t_85_ += u_1[k+1][j+1][i];
_t_79_ += _t_84_ * _t_85_;
_t_87_ = -u_1[k+1][j][i];
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
b_r2 += 1.0 / 6.0 * stry[j] * _t_79_;
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
_t_88_ = -b_muz2 * u_1[k+1][j][i];
_t_88_ += b_muz2 * u_1[k][j][i];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
_t_88_ -= b_muz1 * u_1[k+1][j][i];
_t_88_ += b_muz1 * u_1[k-1][j][i];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz3 += mu[k+3][j][i] * strz[k+3];
_t_88_ -= b_muz3 * u_1[k+1][j][i];
_t_88_ += b_muz3 * u_1[k+2][j][i];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
_t_88_ -= b_muz4 * u_1[k+1][j][i];
_t_88_ += b_muz4 * u_1[k+3][j][i];
b_r2 += 1.0 / 6.0 * strz[k+1] * _t_88_;
_t_142_ = u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
_t_142_ += 8.0 * -u_2[k+2][j-1][i];
_t_142_ += 8.0 * u_2[k+2][j+1][i];
_t_134_ = mu[k-1][j][i] * u_2[k-1][j-2][i];
_t_134_ -= mu[k-1][j][i] * u_2[k-1][j+2][i];
_t_134_ += 8.0 * mu[k+2][j][i] * _t_142_;
_t_121_ = la[k+1][j-2][i] * u_2[k-1][j-2][i];
_t_121_ -= la[k+1][j+2][i] * u_2[k-1][j+2][i];
_t_126_ = u_2[k-1][j-1][i];
_t_126_ += 8.0 * u_2[k+2][j-1][i];
_t_129_ = u_2[k-1][j+1][i];
_t_129_ += 8.0 * u_2[k+2][j+1][i];
_t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
_t_134_ += mu[k-1][j][i] * 8.0 * _t_137_;
_t_124_ = u_2[k+2][j-2][i];
_t_132_ = u_2[k+2][j+2][i];
_t_121_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i];
_t_134_ -= mu[k+3][j][i] * u_2[k+3][j-2][i];
_t_121_ += la[k+1][j+2][i] * u_2[k+3][j+2][i];
_t_134_ += mu[k+3][j][i] * u_2[k+3][j+2][i];
_t_126_ += 8.0 * -u_2[k][j-1][i];
_t_139_ = 8.0 * -u_2[k][j-1][i];
_t_129_ += 8.0 * -u_2[k][j+1][i];
_t_139_ += 8.0 * u_2[k][j+1][i];
_t_124_ += -u_2[k][j-2][i];
_t_121_ += la[k+1][j-2][i] * 8.0 * _t_124_;
_t_139_ += u_2[k][j-2][i];
_t_132_ += -u_2[k][j+2][i];
_t_121_ -= la[k+1][j+2][i] * 8.0 * _t_132_;
_t_139_ -= u_2[k][j+2][i];
_t_134_ -= 8.0 * mu[k][j][i] * _t_139_;
_t_126_ -= u_2[k+3][j-1][i];
_t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_;
_t_145_ = -u_2[k+3][j-1][i];
_t_129_ -= u_2[k+3][j+1][i];
_t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_;
_t_145_ += u_2[k+3][j+1][i];
_t_134_ -= mu[k+3][j][i] * 8.0 * _t_145_;
_t_107_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_94_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_120_ = 1.0 / 144.0 * stry[j] * strz[k+1];
_t_93_ = _t_120_ * _t_121_;
_t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1];
_t_93_ += _t_133_ * _t_134_;
_t_95_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2];
_t_108_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2];
_t_95_ -= mu[k+1][j][i+2] * u_0[k+1][j-2][i+2];
_t_108_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2];
_t_95_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2];
_t_108_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i-2];
_t_95_ += mu[k+1][j][i+2] * u_0[k+1][j+2][i+2];
_t_108_ += la[k+1][j+2][i] * u_0[k+1][j+2][i+2];
_t_100_ = u_0[k+1][j-2][i-1];
_t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
_t_108_ += la[k+1][j-2][i] * 8.0 * _t_111_;
_t_103_ = u_0[k+1][j-2][i+1];
_t_100_ += 8.0 * -u_0[k+1][j-1][i-1];
_t_113_ = 8.0 * -u_0[k+1][j-1][i-1];
_t_103_ += 8.0 * -u_0[k+1][j-1][i+1];
_t_113_ += 8.0 * u_0[k+1][j-1][i+1];
_t_100_ += 8.0 * u_0[k+1][j+1][i-1];
_t_116_ = 8.0 * -u_0[k+1][j+1][i-1];
_t_103_ += 8.0 * u_0[k+1][j+1][i+1];
_t_116_ += 8.0 * u_0[k+1][j+1][i+1];
_t_100_ -= u_0[k+1][j+2][i-1];
_t_95_ -= 8.0 * mu[k+1][j][i-1] * _t_100_;
_t_119_ = -u_0[k+1][j+2][i-1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_;
_t_119_ += u_0[k+1][j+2][i+1];
_t_108_ -= la[k+1][j+2][i] * 8.0 * _t_119_;
_t_106_ = -u_0[k+1][j-1][i+2];
_t_113_ -= u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
_t_95_ -= mu[k+1][j][i+2] * 8.0 * _t_106_;
_t_116_ -= u_0[k+1][j+1][i+2];
_t_98_ = -u_0[k+1][j-1][i-2];
_t_113_ += u_0[k+1][j-1][i-2];
_t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_;
_t_98_ += u_0[k+1][j+1][i-2];
_t_95_ += mu[k+1][j][i-2] * 8.0 * _t_98_;
_t_93_ += _t_94_ * _t_95_;
_t_116_ += u_0[k+1][j+1][i-2];
_t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
uacc_1kp1jc0ic0 += cof * b_r2;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
hipMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_1;
hipMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_2;
hipMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_0;
hipMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_1;
hipMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_2;
hipMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
double *strz;
hipMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
hipLaunchKernelGGL(( sw4_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipLaunchKernelGGL(( sw4_2) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipLaunchKernelGGL(( sw4_3) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (uacc_0);
hipFree (uacc_1);
hipFree (uacc_2);
hipFree (u_0);
hipFree (u_1);
hipFree (u_2);
hipFree (mu);
hipFree (la);
hipFree (strx);
hipFree (stry);
hipFree (strz);
}
|
3faf123fdf6008b3ea611a3c7402a4f0ebc41b2d.cu
|
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
double a_mux1;
double a_mux2;
double a_mux3;
double a_mux4;
double a_muy1;
double a_muy2;
double a_muy3;
double _t_7_;
double _t_9_;
double a_muy4;
double _t_11_;
double _t_13_;
double _t_1_;
double a_r2;
double _t_10_;
double _t_6_;
double _t_12_;
double _t_14_;
double _t_8_;
double a_muz3;
double a_muz2;
double a_muz4;
double a_muz1;
double _t_15_;
double _t_21_;
double _t_34_;
double _t_47_;
double _t_60_;
double _t_22_;
double _t_35_;
double _t_48_;
double _t_61_;
double _t_53_;
double _t_64_;
double _t_69_;
double _t_56_;
double _t_51_;
double _t_59_;
double _t_72_;
double _t_66_;
double _t_20_;
double _t_27_;
double _t_38_;
double _t_30_;
double _t_40_;
double _t_43_;
double _t_46_;
double _t_25_;
double _t_33_;
double uacc_1kc0jc0ic0;
double uacc_1kp1jc0ic0;
double b_mux1;
double b_mux2;
double b_mux3;
double b_mux4;
double b_muy1;
double b_muy2;
double b_muy3;
double _t_80_;
double _t_82_;
double b_muy4;
double _t_84_;
double _t_86_;
double _t_74_;
double b_r2;
double _t_81_;
double _t_79_;
double _t_83_;
double _t_85_;
double _t_87_;
double b_muz2;
double _t_88_;
double b_muz1;
double b_muz3;
double b_muz4;
double _t_142_;
double _t_134_;
double _t_121_;
double _t_126_;
double _t_129_;
double _t_137_;
double _t_124_;
double _t_132_;
double _t_139_;
double _t_145_;
double _t_107_;
double _t_94_;
double _t_120_;
double _t_93_;
double _t_133_;
double _t_95_;
double _t_108_;
double _t_100_;
double _t_111_;
double _t_103_;
double _t_113_;
double _t_116_;
double _t_119_;
double _t_106_;
double _t_98_;
a_mux1 = -3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux1 += mu[k][j][i-1] * strx[i-1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += 3.0 * mu[k][j][i] * strx[i];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i] * strx[i];
a_mux3 += mu[k][j][i+2] * strx[i+2];
a_mux4 = mu[k][j][i+1] * strx[i+1];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
a_muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy1 += mu[k][j-1][i] * stry[j-1];
a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
_t_7_ = 2.0 * a_muy1;
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
_t_9_ = 2.0 * a_muy2;
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
_t_11_ = 2.0 * a_muy3;
_t_13_ = 2.0 * a_muy4;
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j][i] * stry[j];
_t_11_ += 3.0 * la[k][j][i] * stry[j];
_t_13_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_7_ += la[k][j-1][i] * stry[j-1];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_11_ += la[k][j-1][i] * stry[j-1];
_t_9_ += la[k][j+1][i] * stry[j+1];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_11_ += la[k][j+2][i] * stry[j+2];
_t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_1_ = a_mux1 * u_1[k][j][i-2];
_t_1_ -= a_mux1 * u_1[k][j][i];
_t_1_ -= a_mux2 * u_1[k][j][i];
_t_1_ -= a_mux3 * u_1[k][j][i];
_t_1_ -= a_mux4 * u_1[k][j][i];
_t_1_ += a_mux2 * u_1[k][j][i-1];
_t_1_ += a_mux3 * u_1[k][j][i+1];
_t_1_ += a_mux4 * u_1[k][j][i+2];
a_r2 = 1.0 / 6.0 * strx[i] * _t_1_;
_t_10_ = -u_1[k][j][i];
_t_10_ += u_1[k][j-1][i];
_t_6_ = _t_9_ * _t_10_;
_t_12_ = -u_1[k][j][i];
_t_12_ += u_1[k][j+1][i];
_t_6_ += _t_11_ * _t_12_;
_t_14_ = -u_1[k][j][i];
_t_14_ += u_1[k][j+2][i];
_t_6_ += _t_13_ * _t_14_;
_t_8_ = -u_1[k][j][i];
_t_8_ += u_1[k][j-2][i];
_t_6_ += _t_7_ * _t_8_;
a_r2 += 1.0 / 6.0 * stry[j] * _t_6_;
a_muz3 = 3.0 * mu[k][j][i] * strz[k];
a_muz2 = 3.0 * mu[k][j][i] * strz[k];
a_muz4 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz4 += mu[k+1][j][i] * strz[k+1];
a_muz1 += mu[k-1][j][i] * strz[k-1];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k-1][j][i] * strz[k-1];
_t_15_ = -a_muz1 * u_1[k][j][i];
_t_15_ -= a_muz2 * u_1[k][j][i];
_t_15_ += a_muz1 * u_1[k-2][j][i];
_t_15_ += a_muz2 * u_1[k-1][j][i];
a_muz3 += mu[k+2][j][i] * strz[k+2];
_t_15_ -= a_muz3 * u_1[k][j][i];
_t_15_ += a_muz3 * u_1[k+1][j][i];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
_t_15_ -= a_muz4 * u_1[k][j][i];
_t_15_ += a_muz4 * u_1[k+2][j][i];
a_r2 += 1.0 / 6.0 * strz[k] * _t_15_;
_t_21_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_34_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_47_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_60_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_22_ = mu[k][j][i-2] * u_0[k][j-2][i-2];
_t_35_ = la[k][j-2][i] * u_0[k][j-2][i-2];
_t_22_ -= mu[k][j][i+2] * u_0[k][j-2][i+2];
_t_35_ -= la[k][j-2][i] * u_0[k][j-2][i+2];
_t_22_ -= mu[k][j][i-2] * u_0[k][j+2][i-2];
_t_35_ -= la[k][j+2][i] * u_0[k][j+2][i-2];
_t_22_ += mu[k][j][i+2] * u_0[k][j+2][i+2];
_t_35_ += la[k][j+2][i] * u_0[k][j+2][i+2];
_t_48_ = la[k][j-2][i] * u_2[k-2][j-2][i];
_t_61_ = mu[k-2][j][i] * u_2[k-2][j-2][i];
_t_48_ -= la[k][j+2][i] * u_2[k-2][j+2][i];
_t_61_ -= mu[k-2][j][i] * u_2[k-2][j+2][i];
_t_48_ -= la[k][j-2][i] * u_2[k+2][j-2][i];
_t_61_ -= mu[k+2][j][i] * u_2[k+2][j-2][i];
_t_48_ += la[k][j+2][i] * u_2[k+2][j+2][i];
_t_61_ += mu[k+2][j][i] * u_2[k+2][j+2][i];
_t_53_ = -u_2[k+2][j-1][i];
_t_53_ += u_2[k-2][j-1][i];
_t_64_ = -u_2[k-2][j-1][i];
_t_53_ += 8.0 * u_2[k+1][j-1][i];
_t_69_ = 8.0 * -u_2[k+1][j-1][i];
_t_56_ = u_2[k-2][j+1][i];
_t_64_ += u_2[k-2][j+1][i];
_t_61_ += mu[k-2][j][i] * 8.0 * _t_64_;
_t_56_ += 8.0 * u_2[k+1][j+1][i];
_t_69_ += 8.0 * u_2[k+1][j+1][i];
_t_69_ += u_2[k+1][j-2][i];
_t_51_ = u_2[k+1][j-2][i];
_t_69_ -= u_2[k+1][j+2][i];
_t_61_ += 8.0 * mu[k+1][j][i] * _t_69_;
_t_59_ = u_2[k+1][j+2][i];
_t_56_ -= u_2[k+2][j+1][i];
_t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
_t_61_ -= mu[k+2][j][i] * 8.0 * _t_72_;
_t_53_ += 8.0 * -u_2[k-1][j-1][i];
_t_48_ -= 8.0 * la[k][j-1][i] * _t_53_;
_t_66_ = 8.0 * -u_2[k-1][j-1][i];
_t_56_ += 8.0 * -u_2[k-1][j+1][i];
_t_48_ += 8.0 * la[k][j+1][i] * _t_56_;
_t_66_ += 8.0 * u_2[k-1][j+1][i];
_t_51_ += -u_2[k-1][j-2][i];
_t_48_ += la[k][j-2][i] * 8.0 * _t_51_;
_t_66_ += u_2[k-1][j-2][i];
_t_59_ += -u_2[k-1][j+2][i];
_t_48_ -= la[k][j+2][i] * 8.0 * _t_59_;
_t_66_ -= u_2[k-1][j+2][i];
_t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_;
_t_20_ = _t_47_ * _t_48_;
_t_20_ += _t_60_ * _t_61_;
_t_27_ = u_0[k][j-2][i-1];
_t_38_ = -u_0[k][j-2][i-1];
_t_38_ += u_0[k][j-2][i+1];
_t_35_ += la[k][j-2][i] * 8.0 * _t_38_;
_t_30_ = u_0[k][j-2][i+1];
_t_27_ += 8.0 * -u_0[k][j-1][i-1];
_t_40_ = 8.0 * -u_0[k][j-1][i-1];
_t_30_ += 8.0 * -u_0[k][j-1][i+1];
_t_40_ += 8.0 * u_0[k][j-1][i+1];
_t_27_ += 8.0 * u_0[k][j+1][i-1];
_t_43_ = 8.0 * -u_0[k][j+1][i-1];
_t_30_ += 8.0 * u_0[k][j+1][i+1];
_t_43_ += 8.0 * u_0[k][j+1][i+1];
_t_27_ -= u_0[k][j+2][i-1];
_t_22_ -= 8.0 * mu[k][j][i-1] * _t_27_;
_t_46_ = -u_0[k][j+2][i-1];
_t_30_ -= u_0[k][j+2][i+1];
_t_22_ += 8.0 * mu[k][j][i+1] * _t_30_;
_t_46_ += u_0[k][j+2][i+1];
_t_35_ -= la[k][j+2][i] * 8.0 * _t_46_;
_t_40_ += u_0[k][j-1][i-2];
_t_25_ = -u_0[k][j-1][i-2];
_t_25_ += u_0[k][j+1][i-2];
_t_22_ += mu[k][j][i-2] * 8.0 * _t_25_;
_t_43_ += u_0[k][j+1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
_t_35_ -= 8.0 * la[k][j-1][i] * _t_40_;
_t_33_ = -u_0[k][j-1][i+2];
_t_33_ += u_0[k][j+1][i+2];
_t_22_ -= mu[k][j][i+2] * 8.0 * _t_33_;
_t_20_ += _t_21_ * _t_22_;
_t_43_ -= u_0[k][j+1][i+2];
_t_35_ += 8.0 * la[k][j+1][i] * _t_43_;
_t_20_ += _t_34_ * _t_35_;
a_r2 += _t_20_;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * a_r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1kp1jc0ic0 = a1 * uacc_1[k+1][j][i];
b_mux1 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux1 += mu[k+1][j][i-1] * strx[i-1];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += 3.0 * mu[k+1][j][i] * strx[i];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = -3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy1 += mu[k+1][j-1][i] * stry[j-1];
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
_t_80_ = 2.0 * b_muy1;
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
_t_82_ = 2.0 * b_muy2;
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
_t_84_ = 2.0 * b_muy3;
_t_86_ = 2.0 * b_muy4;
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
_t_82_ += la[k+1][j-2][i] * stry[j-2];
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
_t_80_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_84_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_86_ += la[k+1][j+1][i] * stry[j+1];
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 3.0 * la[k+1][j][i] * stry[j];
_t_86_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_74_ = -b_mux1 * u_1[k+1][j][i];
_t_74_ -= b_mux2 * u_1[k+1][j][i];
_t_74_ -= b_mux3 * u_1[k+1][j][i];
_t_74_ -= b_mux4 * u_1[k+1][j][i];
_t_74_ += b_mux1 * u_1[k+1][j][i-2];
_t_74_ += b_mux2 * u_1[k+1][j][i-1];
_t_74_ += b_mux3 * u_1[k+1][j][i+1];
_t_74_ += b_mux4 * u_1[k+1][j][i+2];
b_r2 = 1.0 / 6.0 * strx[i] * _t_74_;
_t_81_ = -u_1[k+1][j][i];
_t_81_ += u_1[k+1][j-2][i];
_t_79_ = _t_80_ * _t_81_;
_t_83_ = -u_1[k+1][j][i];
_t_83_ += u_1[k+1][j-1][i];
_t_79_ += _t_82_ * _t_83_;
_t_85_ = -u_1[k+1][j][i];
_t_85_ += u_1[k+1][j+1][i];
_t_79_ += _t_84_ * _t_85_;
_t_87_ = -u_1[k+1][j][i];
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
b_r2 += 1.0 / 6.0 * stry[j] * _t_79_;
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
b_muz2 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
_t_88_ = -b_muz2 * u_1[k+1][j][i];
_t_88_ += b_muz2 * u_1[k][j][i];
b_muz1 = mu[k][j][i] * strz[k];
b_muz1 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
_t_88_ -= b_muz1 * u_1[k+1][j][i];
_t_88_ += b_muz1 * u_1[k-1][j][i];
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+2][j][i] * strz[k+2];
b_muz4 -= 3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz3 += mu[k+3][j][i] * strz[k+3];
_t_88_ -= b_muz3 * u_1[k+1][j][i];
_t_88_ += b_muz3 * u_1[k+2][j][i];
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
_t_88_ -= b_muz4 * u_1[k+1][j][i];
_t_88_ += b_muz4 * u_1[k+3][j][i];
b_r2 += 1.0 / 6.0 * strz[k+1] * _t_88_;
_t_142_ = u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
_t_142_ += 8.0 * -u_2[k+2][j-1][i];
_t_142_ += 8.0 * u_2[k+2][j+1][i];
_t_134_ = mu[k-1][j][i] * u_2[k-1][j-2][i];
_t_134_ -= mu[k-1][j][i] * u_2[k-1][j+2][i];
_t_134_ += 8.0 * mu[k+2][j][i] * _t_142_;
_t_121_ = la[k+1][j-2][i] * u_2[k-1][j-2][i];
_t_121_ -= la[k+1][j+2][i] * u_2[k-1][j+2][i];
_t_126_ = u_2[k-1][j-1][i];
_t_126_ += 8.0 * u_2[k+2][j-1][i];
_t_129_ = u_2[k-1][j+1][i];
_t_129_ += 8.0 * u_2[k+2][j+1][i];
_t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
_t_134_ += mu[k-1][j][i] * 8.0 * _t_137_;
_t_124_ = u_2[k+2][j-2][i];
_t_132_ = u_2[k+2][j+2][i];
_t_121_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i];
_t_134_ -= mu[k+3][j][i] * u_2[k+3][j-2][i];
_t_121_ += la[k+1][j+2][i] * u_2[k+3][j+2][i];
_t_134_ += mu[k+3][j][i] * u_2[k+3][j+2][i];
_t_126_ += 8.0 * -u_2[k][j-1][i];
_t_139_ = 8.0 * -u_2[k][j-1][i];
_t_129_ += 8.0 * -u_2[k][j+1][i];
_t_139_ += 8.0 * u_2[k][j+1][i];
_t_124_ += -u_2[k][j-2][i];
_t_121_ += la[k+1][j-2][i] * 8.0 * _t_124_;
_t_139_ += u_2[k][j-2][i];
_t_132_ += -u_2[k][j+2][i];
_t_121_ -= la[k+1][j+2][i] * 8.0 * _t_132_;
_t_139_ -= u_2[k][j+2][i];
_t_134_ -= 8.0 * mu[k][j][i] * _t_139_;
_t_126_ -= u_2[k+3][j-1][i];
_t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_;
_t_145_ = -u_2[k+3][j-1][i];
_t_129_ -= u_2[k+3][j+1][i];
_t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_;
_t_145_ += u_2[k+3][j+1][i];
_t_134_ -= mu[k+3][j][i] * 8.0 * _t_145_;
_t_107_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_94_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_120_ = 1.0 / 144.0 * stry[j] * strz[k+1];
_t_93_ = _t_120_ * _t_121_;
_t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1];
_t_93_ += _t_133_ * _t_134_;
_t_95_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2];
_t_108_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2];
_t_95_ -= mu[k+1][j][i+2] * u_0[k+1][j-2][i+2];
_t_108_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2];
_t_95_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2];
_t_108_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i-2];
_t_95_ += mu[k+1][j][i+2] * u_0[k+1][j+2][i+2];
_t_108_ += la[k+1][j+2][i] * u_0[k+1][j+2][i+2];
_t_100_ = u_0[k+1][j-2][i-1];
_t_111_ = -u_0[k+1][j-2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
_t_108_ += la[k+1][j-2][i] * 8.0 * _t_111_;
_t_103_ = u_0[k+1][j-2][i+1];
_t_100_ += 8.0 * -u_0[k+1][j-1][i-1];
_t_113_ = 8.0 * -u_0[k+1][j-1][i-1];
_t_103_ += 8.0 * -u_0[k+1][j-1][i+1];
_t_113_ += 8.0 * u_0[k+1][j-1][i+1];
_t_100_ += 8.0 * u_0[k+1][j+1][i-1];
_t_116_ = 8.0 * -u_0[k+1][j+1][i-1];
_t_103_ += 8.0 * u_0[k+1][j+1][i+1];
_t_116_ += 8.0 * u_0[k+1][j+1][i+1];
_t_100_ -= u_0[k+1][j+2][i-1];
_t_95_ -= 8.0 * mu[k+1][j][i-1] * _t_100_;
_t_119_ = -u_0[k+1][j+2][i-1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_;
_t_119_ += u_0[k+1][j+2][i+1];
_t_108_ -= la[k+1][j+2][i] * 8.0 * _t_119_;
_t_106_ = -u_0[k+1][j-1][i+2];
_t_113_ -= u_0[k+1][j-1][i+2];
_t_106_ += u_0[k+1][j+1][i+2];
_t_95_ -= mu[k+1][j][i+2] * 8.0 * _t_106_;
_t_116_ -= u_0[k+1][j+1][i+2];
_t_98_ = -u_0[k+1][j-1][i-2];
_t_113_ += u_0[k+1][j-1][i-2];
_t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_;
_t_98_ += u_0[k+1][j+1][i-2];
_t_95_ += mu[k+1][j][i-2] * 8.0 * _t_98_;
_t_93_ += _t_94_ * _t_95_;
_t_116_ += u_0[k+1][j+1][i-2];
_t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_;
_t_93_ += _t_107_ * _t_108_;
b_r2 += _t_93_;
uacc_1kp1jc0ic0 += cof * b_r2;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
27d9e604032151c0075e05f1f7e798741134dd4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ldcp_decoder.h
* ldpc3
*
* Created by legal on 02/04/11.
* Copyright 2011 ENSEIRB. All rights reserved.
*
*/
/*----------------------------------------------------------------------------*/
#include "ADMM_GPU_Decoder.h"
#include "../gpu/ADMM_shared.h"
#include "../gpu/ADMM_GPU_32b.h"
#if 0
#include "../codes/Constantes_4000x2000.h"
#else
#include "./admm/admm_2640x1320.h"
#endif
ADMM_GPU_Decoder::ADMM_GPU_Decoder( int _frames )
{
hipError_t Status;
frames = _frames;
VNs_per_frame = NOEUD;
CNs_per_frame = PARITE;
MSGs_per_frame = MESSAGES;
VNs_per_load = frames * VNs_per_frame;
CNs_per_load = frames * CNs_per_frame;
MSGs_per_load = frames * MSGs_per_frame;
//
// LLRs entrant dans le decodeur
//
CUDA_MALLOC_HOST (&h_iLLR, VNs_per_load);
CUDA_MALLOC_DEVICE(&d_iLLR, VNs_per_load);
//
// LLRs interne au decodeur
//
CUDA_MALLOC_DEVICE(&d_oLLR, VNs_per_load);
//
// LLRs (decision dure) sortant du le decodeur
//
CUDA_MALLOC_HOST (&h_hDecision, VNs_per_load);
CUDA_MALLOC_DEVICE(&d_hDecision, VNs_per_load);
// Le tableau fournissant le degree des noeuds VNs
CUDA_MALLOC_DEVICE(&d_degVNs, VNs_per_frame);
// Status = hipMemcpy(d_degVNs, t_degVN, nb_Node * sizeof(unsigned int), hipMemcpyHostToDevice);
// ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
// Le tableau fournissant le degree des noeuds CNs
CUDA_MALLOC_DEVICE(&d_degCNs, CNs_per_frame);
// Status = hipMemcpy(d_degCNs, t_degCN, nb_Check * sizeof(unsigned int), hipMemcpyHostToDevice);
// ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#if 0
CUDA_MALLOC_DEVICE(&d_t_row, nb_Msg);
Status = hipMemcpy(d_t_row, t_row, nb_Msg * sizeof(unsigned int), hipMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#else
CUDA_MALLOC_DEVICE(&d_t_row, MSGs_per_frame);
Status = hipMemcpy(d_t_row, t_row_pad_4, MSGs_per_frame * sizeof(unsigned int), hipMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#endif
CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame);
Status = hipMemcpy(d_t_col, t_col, MSGs_per_frame * sizeof(unsigned int), hipMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
// hipMemcpyToSymbol (cst_t_row, t_row, nb_Msg * sizeof(unsigned int));
// hipMemcpyToSymbol (cst_t_col, t_col, nb_Msg * sizeof(unsigned int));
// CUDA_MALLOC_DEVICE(&d_MSG_C_2_V, nb_Msg + 512);
// CUDA_MALLOC_DEVICE(&d_MSG_V_2_C, nb_Msg + 512);
// Espace memoire pour l'change de messages dans le decodeur
CUDA_MALLOC_DEVICE(&LZr, 2 * MSGs_per_load);
// exit( 0 );
}
ADMM_GPU_Decoder::~ADMM_GPU_Decoder()
{
hipError_t Status;
Status = hipHostFree(h_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_oLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipHostFree(h_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_degCNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_degVNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_t_row); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(d_t_col); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = hipFree(LZr); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
}
void ADMM_GPU_Decoder::decode(float* llrs, int* bits, int nb_iters, float _alpha, float _mu, float _rho)
{
hipError_t Status;
/* for(int m = 0; m < 100; m++){
printf("llr[%d] %f\n", m, llrs[m]);
};*/
/*for(int k=1; k<frames+1; k++){
for(int i=0; i<VNs_per_frame; i++){
llrs[VNs_per_frame * k + i] = llrs[i];
printf(",%f\n", llrs[VNs_per_frame * k + i] );
}
}*/
/*
VNs_per_frame = NOEUD;
CNs_per_frame = PARITE;
MSGs_per_frame = MESSAGES;
VNs_per_load = frames * VNs_per_frame;
CNs_per_load = frames * CNs_per_frame;
MSGs_per_load = frames * MSGs_per_frame;
*/
const float mu = _mu;
const float alpha = _alpha;
const float rho = _rho;
int threadsPerBlock = 128;
int blocksPerGridNode = (VNs_per_load + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGridCheck = (CNs_per_load + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGridMsgs = (MSGs_per_load + threadsPerBlock - 1) / threadsPerBlock;
/* On copie les donnees d'entree du decodeur */
hipMemcpyAsync(d_iLLR, llrs, VNs_per_load * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpyAsync(d_iLLR, data_1, VNs_per_load * sizeof(float), hipMemcpyHostToDevice);
/* INITIALISATION DU DECODEUR LDPC SUR GPU */
hipLaunchKernelGGL(( ADMM_InitArrays_32b), dim3(blocksPerGridMsgs), dim3(threadsPerBlock), 0, 0, LZr, MSGs_per_load);
ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__);
hipLaunchKernelGGL(( ADMM_ScaleLLRs), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0, d_iLLR, VNs_per_load);
ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__);
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
for(int k = 0; k < 200; k++)
{
hipLaunchKernelGGL(( ADMM_VN_kernel_deg3), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0,
d_iLLR, d_oLLR, LZr, d_t_row, VNs_per_load, alpha, mu);
//print d_iLLR
Status = hipMemcpy(h_iLLR, d_oLLR, VNs_per_load * sizeof(float), hipMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
/*FILE* f1 = fopen("h_iLLR_32.json", "w");
for(int m=1; m<frames+1; m++){
for(int i=0; i<VNs_per_frame; i++){
//int off = VNs_per_frame * k;
fprintf(f1, "frames %d iter %d bit %4d value %4f\n", m, k, i, h_iLLR[i]);
}
}
fclose( f1 );*/
//
ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__);
hipLaunchKernelGGL(( ADMM_CN_kernel_deg6), dim3(blocksPerGridCheck), dim3(threadsPerBlock), 0, 0,
d_oLLR, LZr, d_t_col, d_hDecision, CNs_per_load, rho);
ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__);
// GESTION DU CRITERE D'ARRET DES CODEWORDS
if( (k%5) == 0 )
{
hipLaunchKernelGGL(( reduce), dim3(blocksPerGridCheck), dim3(threadsPerBlock), 0, 0, d_hDecision, CNs_per_load);
ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__);
Status = hipMemcpy(h_hDecision, d_hDecision, blocksPerGridCheck * sizeof(int), hipMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
int sum = 0;
for(int p=0; p<blocksPerGridCheck; p++){
sum += h_hDecision[p];
}
if( sum == 0 ) break;
}
}
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
// printf("ADMM_HardDecision(%d)\n", VNs_per_load);
hipLaunchKernelGGL(( ADMM_HardDecision), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0, d_oLLR, d_hDecision, VNs_per_load);
ERROR_CHECK(hipGetLastError(), __FILE__, __LINE__);
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
// printf("h_hDecision = %p, d_hDecision = %p, VNs_per_load = %d\n", h_hDecision, d_hDecision, VNs_per_load);
Status = hipMemcpy(bits, d_hDecision, VNs_per_load * sizeof(int), hipMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
//FILE* f2 = fopen("bits.txt", "r");
/* for(int m = 0; m < 100; m++){
printf ("bit[%d]: %d", m, bits[m]);
};
*/
// fclose( f2 );
/*for (int i=0; i<VNs_per_load; i++){
bits[i] = h_hDecision[i];
}*/
//
/* FILE* fp = fopen("bits64.txt", "w");
for(int m=1; m<frames+1; m++){
for(int i=0; i<VNs_per_frame; i++){
//int off = VNs_per_frame * k;
fprintf(fp, "frame %d bit %4d value %d\n", m, i, bits[i]);
}
}
fclose(fp);*/
/*for(int k=1; k<frames+1; k++){
bool error = false;
for(int i=0; i<VNs_per_frame; i++){
if( bits[VNs_per_frame * k + i] != bits[i] )
{
int off = VNs_per_frame * k;
printf("frame %d : bit %4d : value mismatch (%d != %d | %d != %d)\n", k, i, bits[i], bits[off + i], h_hDecision[i], h_hDecision[off + i]);
error = true;
}
}
if( error ) exit( 0 );
}*/
}
|
27d9e604032151c0075e05f1f7e798741134dd4a.cu
|
/*
* ldcp_decoder.h
* ldpc3
*
* Created by legal on 02/04/11.
* Copyright 2011 ENSEIRB. All rights reserved.
*
*/
/*----------------------------------------------------------------------------*/
#include "ADMM_GPU_Decoder.h"
#include "../gpu/ADMM_shared.h"
#include "../gpu/ADMM_GPU_32b.h"
#if 0
#include "../codes/Constantes_4000x2000.h"
#else
#include "./admm/admm_2640x1320.h"
#endif
ADMM_GPU_Decoder::ADMM_GPU_Decoder( int _frames )
{
cudaError_t Status;
frames = _frames;
VNs_per_frame = NOEUD;
CNs_per_frame = PARITE;
MSGs_per_frame = MESSAGES;
VNs_per_load = frames * VNs_per_frame;
CNs_per_load = frames * CNs_per_frame;
MSGs_per_load = frames * MSGs_per_frame;
//
// LLRs entrant dans le decodeur
//
CUDA_MALLOC_HOST (&h_iLLR, VNs_per_load);
CUDA_MALLOC_DEVICE(&d_iLLR, VNs_per_load);
//
// LLRs interne au decodeur
//
CUDA_MALLOC_DEVICE(&d_oLLR, VNs_per_load);
//
// LLRs (decision dure) sortant du le decodeur
//
CUDA_MALLOC_HOST (&h_hDecision, VNs_per_load);
CUDA_MALLOC_DEVICE(&d_hDecision, VNs_per_load);
// Le tableau fournissant le degree des noeuds VNs
CUDA_MALLOC_DEVICE(&d_degVNs, VNs_per_frame);
// Status = cudaMemcpy(d_degVNs, t_degVN, nb_Node * sizeof(unsigned int), cudaMemcpyHostToDevice);
// ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
// Le tableau fournissant le degree des noeuds CNs
CUDA_MALLOC_DEVICE(&d_degCNs, CNs_per_frame);
// Status = cudaMemcpy(d_degCNs, t_degCN, nb_Check * sizeof(unsigned int), cudaMemcpyHostToDevice);
// ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#if 0
CUDA_MALLOC_DEVICE(&d_t_row, nb_Msg);
Status = cudaMemcpy(d_t_row, t_row, nb_Msg * sizeof(unsigned int), cudaMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#else
CUDA_MALLOC_DEVICE(&d_t_row, MSGs_per_frame);
Status = cudaMemcpy(d_t_row, t_row_pad_4, MSGs_per_frame * sizeof(unsigned int), cudaMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
#endif
CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame);
Status = cudaMemcpy(d_t_col, t_col, MSGs_per_frame * sizeof(unsigned int), cudaMemcpyHostToDevice);
ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
// cudaMemcpyToSymbol (cst_t_row, t_row, nb_Msg * sizeof(unsigned int));
// cudaMemcpyToSymbol (cst_t_col, t_col, nb_Msg * sizeof(unsigned int));
// CUDA_MALLOC_DEVICE(&d_MSG_C_2_V, nb_Msg + 512);
// CUDA_MALLOC_DEVICE(&d_MSG_V_2_C, nb_Msg + 512);
// Espace memoire pour l'échange de messages dans le decodeur
CUDA_MALLOC_DEVICE(&LZr, 2 * MSGs_per_load);
// exit( 0 );
}
ADMM_GPU_Decoder::~ADMM_GPU_Decoder()
{
cudaError_t Status;
Status = cudaFreeHost(h_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_oLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFreeHost(h_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_degCNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_degVNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_t_row); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(d_t_col); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
Status = cudaFree(LZr); ERROR_CHECK(Status, (char*)__FILE__, __LINE__);
}
void ADMM_GPU_Decoder::decode(float* llrs, int* bits, int nb_iters, float _alpha, float _mu, float _rho)
{
cudaError_t Status;
/* for(int m = 0; m < 100; m++){
printf("llr[%d] %f\n", m, llrs[m]);
};*/
/*for(int k=1; k<frames+1; k++){
for(int i=0; i<VNs_per_frame; i++){
llrs[VNs_per_frame * k + i] = llrs[i];
printf(",%f\n", llrs[VNs_per_frame * k + i] );
}
}*/
/*
VNs_per_frame = NOEUD;
CNs_per_frame = PARITE;
MSGs_per_frame = MESSAGES;
VNs_per_load = frames * VNs_per_frame;
CNs_per_load = frames * CNs_per_frame;
MSGs_per_load = frames * MSGs_per_frame;
*/
const float mu = _mu;
const float alpha = _alpha;
const float rho = _rho;
int threadsPerBlock = 128;
int blocksPerGridNode = (VNs_per_load + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGridCheck = (CNs_per_load + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGridMsgs = (MSGs_per_load + threadsPerBlock - 1) / threadsPerBlock;
/* On copie les donnees d'entree du decodeur */
cudaMemcpyAsync(d_iLLR, llrs, VNs_per_load * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpyAsync(d_iLLR, data_1, VNs_per_load * sizeof(float), cudaMemcpyHostToDevice);
/* INITIALISATION DU DECODEUR LDPC SUR GPU */
ADMM_InitArrays_32b<<<blocksPerGridMsgs, threadsPerBlock>>>(LZr, MSGs_per_load);
ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__);
ADMM_ScaleLLRs<<<blocksPerGridNode, threadsPerBlock>>>(d_iLLR, VNs_per_load);
ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__);
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
for(int k = 0; k < 200; k++)
{
ADMM_VN_kernel_deg3<<<blocksPerGridNode, threadsPerBlock>>>
(d_iLLR, d_oLLR, LZr, d_t_row, VNs_per_load, alpha, mu);
//print d_iLLR
Status = cudaMemcpy(h_iLLR, d_oLLR, VNs_per_load * sizeof(float), cudaMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
/*FILE* f1 = fopen("h_iLLR_32.json", "w");
for(int m=1; m<frames+1; m++){
for(int i=0; i<VNs_per_frame; i++){
//int off = VNs_per_frame * k;
fprintf(f1, "frames %d iter %d bit %4d value %4f\n", m, k, i, h_iLLR[i]);
}
}
fclose( f1 );*/
//
ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__);
ADMM_CN_kernel_deg6<<<blocksPerGridCheck, threadsPerBlock>>>
(d_oLLR, LZr, d_t_col, d_hDecision, CNs_per_load, rho);
ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__);
// GESTION DU CRITERE D'ARRET DES CODEWORDS
if( (k%5) == 0 )
{
reduce<<<blocksPerGridCheck, threadsPerBlock>>>(d_hDecision, CNs_per_load);
ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__);
Status = cudaMemcpy(h_hDecision, d_hDecision, blocksPerGridCheck * sizeof(int), cudaMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
int sum = 0;
for(int p=0; p<blocksPerGridCheck; p++){
sum += h_hDecision[p];
}
if( sum == 0 ) break;
}
}
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
// printf("ADMM_HardDecision(%d)\n", VNs_per_load);
ADMM_HardDecision<<<blocksPerGridNode, threadsPerBlock>>>(d_oLLR, d_hDecision, VNs_per_load);
ERROR_CHECK(cudaGetLastError(), __FILE__, __LINE__);
// LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS
// printf("h_hDecision = %p, d_hDecision = %p, VNs_per_load = %d\n", h_hDecision, d_hDecision, VNs_per_load);
Status = cudaMemcpy(bits, d_hDecision, VNs_per_load * sizeof(int), cudaMemcpyDeviceToHost);
ERROR_CHECK(Status, __FILE__, __LINE__);
//FILE* f2 = fopen("bits.txt", "r");
/* for(int m = 0; m < 100; m++){
printf ("bit[%d]: %d", m, bits[m]);
};
*/
// fclose( f2 );
/*for (int i=0; i<VNs_per_load; i++){
bits[i] = h_hDecision[i];
}*/
//
/* FILE* fp = fopen("bits64.txt", "w");
for(int m=1; m<frames+1; m++){
for(int i=0; i<VNs_per_frame; i++){
//int off = VNs_per_frame * k;
fprintf(fp, "frame %d bit %4d value %d\n", m, i, bits[i]);
}
}
fclose(fp);*/
/*for(int k=1; k<frames+1; k++){
bool error = false;
for(int i=0; i<VNs_per_frame; i++){
if( bits[VNs_per_frame * k + i] != bits[i] )
{
int off = VNs_per_frame * k;
printf("frame %d : bit %4d : value mismatch (%d != %d | %d != %d)\n", k, i, bits[i], bits[off + i], h_hDecision[i], h_hDecision[off + i]);
error = true;
}
}
if( error ) exit( 0 );
}*/
}
|
171fa91f706eb5b0f4b0022cb816379d3738018b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(hipSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(hipMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), hipMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = common::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = ::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(hipSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes_.data().get(), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpyAsync(tree_group_.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(hipSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, num_features,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
dh::device_vector<DevicePredictionNode> nodes_;
dh::device_vector<size_t> tree_segments_;
dh::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, model.param.num_feature, out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor", learner_param_)) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus,
dmat->Info().num_row_);
CHECK_NE(devices.Size(), 0);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
171fa91f706eb5b0f4b0022cb816379d3738018b.cu
|
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = common::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = std::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(cudaSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes_.data().get(), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpyAsync(tree_group_.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(cudaSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, num_features,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
dh::device_vector<DevicePredictionNode> nodes_;
dh::device_vector<size_t> tree_segments_;
dh::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, model.param.num_feature, out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor", learner_param_)) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus,
dmat->Info().num_row_);
CHECK_NE(devices.Size(), 0);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
8629f95638b6e397fe221ce3fa73ab8b1172070c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int row = blockIdx.x;
int col = threadIdx.x;
uchar4 mColor = rgbaImage[row * numCols + col];
greyImage[row * numCols + col] = .299f * mColor.x + .587f * mColor.y + .114f * mColor.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows,1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
|
8629f95638b6e397fe221ce3fa73ab8b1172070c.cu
|
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int row = blockIdx.x;
int col = threadIdx.x;
uchar4 mColor = rgbaImage[row * numCols + col];
greyImage[row * numCols + col] = .299f * mColor.x + .587f * mColor.y + .114f * mColor.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows,1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
0969097bcd4c05e08d62d73e2cadcdaae263309f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <fstream>
#include <string>
#include <rocblas.h>
#include <hip/device_functions.h>
using namespace std;
#define TILE_WIDTH 32
void init(double* A, int x, int y)
{
srand(time(NULL));
int i, j;
for (i = 0; i < x; ++i) {
for (j = 0; j < y; ++j) {
A[i * y + j] = (double)(rand() % 100) + ((double)rand() / RAND_MAX);
}
}
}
void init_from_file(double* A, int x, int y)
{
int i = 0, j = 0;
ifstream file;
file.open("input.txt");
if (!file.is_open()) return;
string word;
while (file >> word)
{
A[i * y + j] = atof(word.c_str());
j = j + 1;
if (j % y == 0) {
j = 0;
i = i + 1;
}
}
}
//=-=-=-=-=-=-=-=-= Function of Kernel =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
__global__ void MatrixMulKernel(double* device_a, double* device_c, int rows, int columns)
{
__shared__ double At_s[TILE_WIDTH][TILE_WIDTH];
__shared__ double A_s[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
// symmetrc matrix padding
if (blockIdx.x < blockIdx.y) return;
double Pvalue;
Pvalue = 0.0;
int m;
int loop;
int mod = rows % TILE_WIDTH;
// ittaration for N-1 tiling steps
if (mod > 0) loop = (rows / TILE_WIDTH);
else loop = (rows / TILE_WIDTH) - 1;
//printf("Loop %d", loop);
for (m = 0; m < loop; m++) {
//initializing the shared memory matrces
if (Row < rows) {
At_s[ty][tx] = device_a[(m * TILE_WIDTH + tx) * columns + Row];
}
else {
At_s[ty][tx] = 0;
}
if (Col < columns) {
A_s[ty][tx] = device_a[(m * TILE_WIDTH + ty) * columns + Col];
}
else {
A_s[ty][tx] = 0;
}
__syncthreads();
//calculating the not-final results
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += At_s[ty][k] * A_s[k][tx];
}
__syncthreads();
}
//The last step of tiling (special treatment)
int remaining_tile_length = rows - m * TILE_WIDTH;
if (ty >= remaining_tile_length) {
A_s[ty][tx] = 0;
}
else {
A_s[ty][tx] = device_a[(m * TILE_WIDTH + ty) * columns + Col];
}
if (tx >= remaining_tile_length) {
At_s[ty][tx] = 0;
}
else {
At_s[ty][tx] = device_a[(m * TILE_WIDTH + tx) * columns + Row];
}
__syncthreads();
//final results calculation
for (int k = 0; k < remaining_tile_length; k++) {
Pvalue += At_s[ty][k] * A_s[k][tx];
}
// transfering results to global memory
if (Row * columns + Col < columns * rows && Col * columns + Row < columns * rows) {
device_c[Row * columns + Col] = Pvalue;
device_c[Col * columns + Row] = Pvalue;
}
}
//=-=-=-=-=-=-=-=-=-=-=-=-=-END OF KERNEL FUNCHION=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
int main(int argc, char* argv[])
{
if (argc < 2) {
fprintf(stderr, "Rerun with -.exe rows columns-\n");
exit(1);
}
// variable initiation-----------------------------
hipEvent_t start, stop;
hipDeviceProp_t prop;
float kernel_time;
int BLOCK_SIZE_PER_DIM = 32;
int rows = atoi(argv[1]), columns = atoi(argv[2]);
int Blocks_number;
int size = rows * columns;
int size_result = columns * columns;
double* host_c, * host_b, * host_a;//host matrixes b for cublass c for our's
double* dev_c, * dev_b, * dev_a; //device matrixes
//-------------------------------------------------
hipError_t test = hipGetDeviceProperties(&prop, 0);
// Array size allocation --------------------------
host_a = (double*)malloc(size * sizeof(double));
host_b = (double*)malloc(size_result * sizeof(double));
host_c = (double*)malloc(size_result * sizeof(double));
// initialize randomly the array A
init(host_a, rows, columns);
//relocate the arrays neede to the gpu global memory
hipMalloc((void**)&dev_c, size_result * sizeof(double));
hipMalloc((void**)&dev_a, size * sizeof(double));
hipMemcpy(dev_a, host_a, size * sizeof(double), hipMemcpyHostToDevice);
//Find the grid and block sizes
unsigned int numBlocksX = ((double)(columns - 1) / BLOCK_SIZE_PER_DIM + 1);
unsigned int numBlocksY = ((double)(rows - 1) / BLOCK_SIZE_PER_DIM + 1);
dim3 dimGrid(numBlocksX, numBlocksY);
dim3 dimBlock(BLOCK_SIZE_PER_DIM, BLOCK_SIZE_PER_DIM);
//-------------------------------------------------
// save the A array for checking if result is in line
/*ofstream input_stream;
input_stream.open("input2.txt");
for (int r = 0; r < rows; r++) {
for (int c = 0; c < columns; c++) {
input_stream << host_a[r * columns + c] << "\t";
}
input_stream << endl;
}
input_stream.close();*/
//===============KERNEL===================
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
MatrixMulKernel << < dimGrid, dimBlock >> > (dev_a, dev_c, rows, columns);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&kernel_time, start, stop);
//===============KERNEL END===================
//print time it took to run and return result array to host
cout << "Time for our kernel : " << kernel_time << endl;
hipMemcpy(host_c, dev_c, size_result * sizeof(double), hipMemcpyDeviceToHost);
//Save output file for testing
/*ofstream output_stream;
output_stream.open("outputer3.txt");
for (int r = 0; r < columns; r++) {
for (int c = 0; c < columns; c++) {
output_stream << host_c[r * columns + c] << "\t";
}
output_stream << endl;
}
output_stream.close();*/
// Free alocated space for arrays
hipFree(dev_a);
hipFree(dev_c);
free(host_a);
free(host_c);
//--------------------------------
return 0;
}
|
0969097bcd4c05e08d62d73e2cadcdaae263309f.cu
|
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <fstream>
#include <string>
#include <cublas_v2.h>
#include <device_functions.h>
using namespace std;
#define TILE_WIDTH 32
void init(double* A, int x, int y)
{
srand(time(NULL));
int i, j;
for (i = 0; i < x; ++i) {
for (j = 0; j < y; ++j) {
A[i * y + j] = (double)(rand() % 100) + ((double)rand() / RAND_MAX);
}
}
}
void init_from_file(double* A, int x, int y)
{
int i = 0, j = 0;
ifstream file;
file.open("input.txt");
if (!file.is_open()) return;
string word;
while (file >> word)
{
A[i * y + j] = atof(word.c_str());
j = j + 1;
if (j % y == 0) {
j = 0;
i = i + 1;
}
}
}
//=-=-=-=-=-=-=-=-= Function of Kernel =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
__global__ void MatrixMulKernel(double* device_a, double* device_c, int rows, int columns)
{
__shared__ double At_s[TILE_WIDTH][TILE_WIDTH];
__shared__ double A_s[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
// symmetrc matrix padding
if (blockIdx.x < blockIdx.y) return;
double Pvalue;
Pvalue = 0.0;
int m;
int loop;
int mod = rows % TILE_WIDTH;
// ittaration for N-1 tiling steps
if (mod > 0) loop = (rows / TILE_WIDTH);
else loop = (rows / TILE_WIDTH) - 1;
//printf("Loop %d", loop);
for (m = 0; m < loop; m++) {
//initializing the shared memory matrces
if (Row < rows) {
At_s[ty][tx] = device_a[(m * TILE_WIDTH + tx) * columns + Row];
}
else {
At_s[ty][tx] = 0;
}
if (Col < columns) {
A_s[ty][tx] = device_a[(m * TILE_WIDTH + ty) * columns + Col];
}
else {
A_s[ty][tx] = 0;
}
__syncthreads();
//calculating the not-final results
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += At_s[ty][k] * A_s[k][tx];
}
__syncthreads();
}
//The last step of tiling (special treatment)
int remaining_tile_length = rows - m * TILE_WIDTH;
if (ty >= remaining_tile_length) {
A_s[ty][tx] = 0;
}
else {
A_s[ty][tx] = device_a[(m * TILE_WIDTH + ty) * columns + Col];
}
if (tx >= remaining_tile_length) {
At_s[ty][tx] = 0;
}
else {
At_s[ty][tx] = device_a[(m * TILE_WIDTH + tx) * columns + Row];
}
__syncthreads();
//final results calculation
for (int k = 0; k < remaining_tile_length; k++) {
Pvalue += At_s[ty][k] * A_s[k][tx];
}
// transfering results to global memory
if (Row * columns + Col < columns * rows && Col * columns + Row < columns * rows) {
device_c[Row * columns + Col] = Pvalue;
device_c[Col * columns + Row] = Pvalue;
}
}
//=-=-=-=-=-=-=-=-=-=-=-=-=-END OF KERNEL FUNCHION=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
int main(int argc, char* argv[])
{
if (argc < 2) {
fprintf(stderr, "Rerun with -.exe rows columns-\n");
exit(1);
}
// variable initiation-----------------------------
cudaEvent_t start, stop;
cudaDeviceProp prop;
float kernel_time;
int BLOCK_SIZE_PER_DIM = 32;
int rows = atoi(argv[1]), columns = atoi(argv[2]);
int Blocks_number;
int size = rows * columns;
int size_result = columns * columns;
double* host_c, * host_b, * host_a;//host matrixes b for cublass c for our's
double* dev_c, * dev_b, * dev_a; //device matrixes
//-------------------------------------------------
cudaError_t test = cudaGetDeviceProperties(&prop, 0);
// Array size allocation --------------------------
host_a = (double*)malloc(size * sizeof(double));
host_b = (double*)malloc(size_result * sizeof(double));
host_c = (double*)malloc(size_result * sizeof(double));
// initialize randomly the array A
init(host_a, rows, columns);
//relocate the arrays neede to the gpu global memory
cudaMalloc((void**)&dev_c, size_result * sizeof(double));
cudaMalloc((void**)&dev_a, size * sizeof(double));
cudaMemcpy(dev_a, host_a, size * sizeof(double), cudaMemcpyHostToDevice);
//Find the grid and block sizes
unsigned int numBlocksX = ((double)(columns - 1) / BLOCK_SIZE_PER_DIM + 1);
unsigned int numBlocksY = ((double)(rows - 1) / BLOCK_SIZE_PER_DIM + 1);
dim3 dimGrid(numBlocksX, numBlocksY);
dim3 dimBlock(BLOCK_SIZE_PER_DIM, BLOCK_SIZE_PER_DIM);
//-------------------------------------------------
// save the A array for checking if result is in line
/*ofstream input_stream;
input_stream.open("input2.txt");
for (int r = 0; r < rows; r++) {
for (int c = 0; c < columns; c++) {
input_stream << host_a[r * columns + c] << "\t";
}
input_stream << endl;
}
input_stream.close();*/
//===============KERNEL===================
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MatrixMulKernel << < dimGrid, dimBlock >> > (dev_a, dev_c, rows, columns);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&kernel_time, start, stop);
//===============KERNEL END===================
//print time it took to run and return result array to host
cout << "Time for our kernel : " << kernel_time << endl;
cudaMemcpy(host_c, dev_c, size_result * sizeof(double), cudaMemcpyDeviceToHost);
//Save output file for testing
/*ofstream output_stream;
output_stream.open("outputer3.txt");
for (int r = 0; r < columns; r++) {
for (int c = 0; c < columns; c++) {
output_stream << host_c[r * columns + c] << "\t";
}
output_stream << endl;
}
output_stream.close();*/
// Free alocated space for arrays
cudaFree(dev_a);
cudaFree(dev_c);
free(host_a);
free(host_c);
//--------------------------------
return 0;
}
|
184ee88d5d6e519614de0998cfed8b6616c79c22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include "rism3d.h"
void RISM3D :: initialize_g() {
__global__ void set_g(double4 * dgv, double * dg2,
double bx, double by, double bz,
int nx, int ny, int nz);
indga = new int[ce -> ngrid];
double * g2 = new double[ce -> ngrid];
int * indg2 = new int[ce -> ngrid];
double * dg2;
hipMalloc(&dgv, ce -> ngrid * sizeof(double4));
hipMalloc(&dg2, ce -> ngrid * sizeof(double));
hipLaunchKernelGGL(( set_g) , dim3(g), dim3(b) , 0, 0, dgv, dg2, ce -> box[0], ce -> box[1], ce -> box[2],
ce -> grid[0], ce -> grid[1], ce -> grid[2]);
hipMemcpyAsync(g2, dg2, ce -> ngrid * sizeof(double), hipMemcpyDefault);
thrust::device_vector<int> indg(ce -> ngrid);
thrust::device_ptr<double> dg2_ptr(dg2);
thrust::sequence(indg.begin(), indg.end());
thrust::sort_by_key(dg2_ptr, dg2_ptr + ce -> ngrid, indg.begin());
thrust::copy(indg.begin(), indg.end(), indg2);
double ga2o = - 1.0;
nga = 0;
for (int igk = 0; igk < ce -> ngrid; ++igk) {
int igs = indg2[igk];
double ga2 = g2[igs];
if (ga2 > ga2o) {
++nga;
ga . push_back (sqrt(ga2));
ga2o = ga2;
}
indga[igs] = nga - 1;
}
hipFree(dg2);
delete[] g2;
delete[] indg2;
}
__global__ void set_g(double4 * dgv, double * dg2,
double bx, double by, double bz,
int nx, int ny, int nz) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
dgv[ip].x = 2.0 * M_PI * (threadIdx.x - nx / 2.0 + 0.5) / bx;
dgv[ip].y = 2.0 * M_PI * (blockIdx.x - ny / 2.0 + 0.5) / by;
dgv[ip].z = 2.0 * M_PI * (blockIdx.y - nz / 2.0 + 0.5) / bz;
dg2[ip] = dgv[ip].x * dgv[ip].x + dgv[ip].y * dgv[ip].y
+ dgv[ip].z * dgv[ip].z;
}
|
184ee88d5d6e519614de0998cfed8b6616c79c22.cu
|
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include "rism3d.h"
void RISM3D :: initialize_g() {
__global__ void set_g(double4 * dgv, double * dg2,
double bx, double by, double bz,
int nx, int ny, int nz);
indga = new int[ce -> ngrid];
double * g2 = new double[ce -> ngrid];
int * indg2 = new int[ce -> ngrid];
double * dg2;
cudaMalloc(&dgv, ce -> ngrid * sizeof(double4));
cudaMalloc(&dg2, ce -> ngrid * sizeof(double));
set_g <<< g, b >>> (dgv, dg2, ce -> box[0], ce -> box[1], ce -> box[2],
ce -> grid[0], ce -> grid[1], ce -> grid[2]);
cudaMemcpyAsync(g2, dg2, ce -> ngrid * sizeof(double), cudaMemcpyDefault);
thrust::device_vector<int> indg(ce -> ngrid);
thrust::device_ptr<double> dg2_ptr(dg2);
thrust::sequence(indg.begin(), indg.end());
thrust::sort_by_key(dg2_ptr, dg2_ptr + ce -> ngrid, indg.begin());
thrust::copy(indg.begin(), indg.end(), indg2);
double ga2o = - 1.0;
nga = 0;
for (int igk = 0; igk < ce -> ngrid; ++igk) {
int igs = indg2[igk];
double ga2 = g2[igs];
if (ga2 > ga2o) {
++nga;
ga . push_back (sqrt(ga2));
ga2o = ga2;
}
indga[igs] = nga - 1;
}
cudaFree(dg2);
delete[] g2;
delete[] indg2;
}
__global__ void set_g(double4 * dgv, double * dg2,
double bx, double by, double bz,
int nx, int ny, int nz) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
dgv[ip].x = 2.0 * M_PI * (threadIdx.x - nx / 2.0 + 0.5) / bx;
dgv[ip].y = 2.0 * M_PI * (blockIdx.x - ny / 2.0 + 0.5) / by;
dgv[ip].z = 2.0 * M_PI * (blockIdx.y - nz / 2.0 + 0.5) / bz;
dg2[ip] = dgv[ip].x * dgv[ip].x + dgv[ip].y * dgv[ip].y
+ dgv[ip].z * dgv[ip].z;
}
|
bd3da88a1abc5db0930e6df2eeaa172129e49b61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
#define DATA_TYPE float
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void power_microbench(float *data1, float *data2, uint32_t *data3, uint32_t *data4, float *res, int div, unsigned iterations) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register float s1 = data1[gid];
register float s2 = data2[gid];
register uint32_t s3 = data3[gid];
register uint32_t s4 = data4[gid];
register float result = 0;
register float Value1=0;
register uint32_t Value2=0;
register float Value3=0;
// synchronize all threads
asm volatile ("bar.sync 0;");
if((gid%32)<div){
//ROI
#pragma unroll 100
for (unsigned j=0 ; j<iterations ; ++j) {
asm volatile ("{\t\n"
"add.f32 %0, %1, %0;\n\t"
"add.u32 %2, %3, %2;\n\t"
"add.u32 %2, %3, %2;\n\t"
// "add.u32 %2, %2, %0;\n\t"
// "mul.lo.u32 %1, %0, %2;\n\t"
"sin.approx.f32 %4, %5;\n\t"
"lg2.approx.f32 %5, %4;\n\t"
"sqrt.rn.f32 %5, %5;\n\t"
"ex2.approx.f32 %5, %5;\n\t"
"fma.rn.f32 %1, %1, %1 , %0;\n\t"
"mad.lo.u32 %3, %3, %3 , %2;\n\t"
"}" : "+f"(Value1),"+f"(s1),"+r"(s3),"+r"(Value2),"+f"(Value3),"+f"(s2)
);
// result=s1+s2;
// Value2=s1-s2;
// result+=Value1;
// result*=Value1;
// Value1=Value2+result;
// result=Value1+Value2;
}
}
// synchronize all threads
asm volatile("bar.sync 0;");
// write data back to memory
res[gid] = Value1 + (float)Value2 + Value3;
}
int main(int argc, char** argv){
unsigned iterations;
int blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores #ActiveThreadsperWarp\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with iterations %lu\n",iterations);
int total_threads = THREADS_PER_BLOCK*blocks;
DATA_TYPE *data1 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data2 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
uint32_t *data3 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
uint32_t *data4 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
DATA_TYPE *res = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data1_g;
DATA_TYPE *data2_g;
uint32_t *data3_g;
uint32_t *data4_g;
DATA_TYPE *res_g;
for (uint32_t i=0; i<total_threads; i++) {
srand((unsigned)time(0));
data1[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data2[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data3[i] = (uint32_t) rand() / RAND_MAX;
srand((unsigned)time(0));
data4[i] = (uint32_t) rand() / RAND_MAX;
}
gpuErrchk( hipMalloc(&data1_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMalloc(&data2_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMalloc(&data3_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&data4_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&res_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMemcpy(data1_g, data1, total_threads*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data2_g, data2, total_threads*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data3_g, data3, total_threads*sizeof(uint32_t), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data4_g, data4, total_threads*sizeof(uint32_t), hipMemcpyHostToDevice) );
hipLaunchKernelGGL((
power_microbench), dim3(blocks),dim3(THREADS_PER_BLOCK), 0, 0, data1_g, data2_g, data3_g, data4_g, res_g, div, iterations);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(res, res_g, total_threads*sizeof(DATA_TYPE), hipMemcpyDeviceToHost) );
hipFree(data1_g);
hipFree(data2_g);
hipFree(data3_g);
hipFree(data4_g);
hipFree(res_g);
free(data1);
free(data2);
free(data3);
free(data4);
free(res);
return 0;
}
|
bd3da88a1abc5db0930e6df2eeaa172129e49b61.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define DATA_TYPE float
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void power_microbench(float *data1, float *data2, uint32_t *data3, uint32_t *data4, float *res, int div, unsigned iterations) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register float s1 = data1[gid];
register float s2 = data2[gid];
register uint32_t s3 = data3[gid];
register uint32_t s4 = data4[gid];
register float result = 0;
register float Value1=0;
register uint32_t Value2=0;
register float Value3=0;
// synchronize all threads
asm volatile ("bar.sync 0;");
if((gid%32)<div){
//ROI
#pragma unroll 100
for (unsigned j=0 ; j<iterations ; ++j) {
asm volatile ("{\t\n"
"add.f32 %0, %1, %0;\n\t"
"add.u32 %2, %3, %2;\n\t"
"add.u32 %2, %3, %2;\n\t"
// "add.u32 %2, %2, %0;\n\t"
// "mul.lo.u32 %1, %0, %2;\n\t"
"sin.approx.f32 %4, %5;\n\t"
"lg2.approx.f32 %5, %4;\n\t"
"sqrt.rn.f32 %5, %5;\n\t"
"ex2.approx.f32 %5, %5;\n\t"
"fma.rn.f32 %1, %1, %1 , %0;\n\t"
"mad.lo.u32 %3, %3, %3 , %2;\n\t"
"}" : "+f"(Value1),"+f"(s1),"+r"(s3),"+r"(Value2),"+f"(Value3),"+f"(s2)
);
// result=s1+s2;
// Value2=s1-s2;
// result+=Value1;
// result*=Value1;
// Value1=Value2+result;
// result=Value1+Value2;
}
}
// synchronize all threads
asm volatile("bar.sync 0;");
// write data back to memory
res[gid] = Value1 + (float)Value2 + Value3;
}
int main(int argc, char** argv){
unsigned iterations;
int blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores #ActiveThreadsperWarp\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with iterations %lu\n",iterations);
int total_threads = THREADS_PER_BLOCK*blocks;
DATA_TYPE *data1 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data2 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
uint32_t *data3 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
uint32_t *data4 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
DATA_TYPE *res = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data1_g;
DATA_TYPE *data2_g;
uint32_t *data3_g;
uint32_t *data4_g;
DATA_TYPE *res_g;
for (uint32_t i=0; i<total_threads; i++) {
srand((unsigned)time(0));
data1[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data2[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data3[i] = (uint32_t) rand() / RAND_MAX;
srand((unsigned)time(0));
data4[i] = (uint32_t) rand() / RAND_MAX;
}
gpuErrchk( cudaMalloc(&data1_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMalloc(&data2_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMalloc(&data3_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&data4_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&res_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMemcpy(data1_g, data1, total_threads*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data2_g, data2, total_threads*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data3_g, data3, total_threads*sizeof(uint32_t), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data4_g, data4, total_threads*sizeof(uint32_t), cudaMemcpyHostToDevice) );
power_microbench<<<blocks,THREADS_PER_BLOCK>>>(data1_g, data2_g, data3_g, data4_g, res_g, div, iterations);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(res, res_g, total_threads*sizeof(DATA_TYPE), cudaMemcpyDeviceToHost) );
cudaFree(data1_g);
cudaFree(data2_g);
cudaFree(data3_g);
cudaFree(data4_g);
cudaFree(res_g);
free(data1);
free(data2);
free(data3);
free(data4);
free(res);
return 0;
}
|
7a47aa90b88f22c4011af31f1c0fc7bf64c6c22a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel(int a,int b,int *c)
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( kernel) , dim3(1), dim3(1) , 0, 0, 2,7,dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("2+7=%d\n",c);
hipFree(dev_c);
return 0;
}
|
7a47aa90b88f22c4011af31f1c0fc7bf64c6c22a.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel(int a,int b,int *c)
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
kernel <<<1, 1 >>> (2,7,dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2+7=%d\n",c);
cudaFree(dev_c);
return 0;
}
|
b46fd9c02108929ad898739f9a6c045a1dab4195.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdio>
#include <layers/fused_relu_bias_fully_connected_layer.hpp>
#include <linalg/reduce.cuh>
#include <utils.cuh>
#include <utils.hpp>
#include "common.hpp"
namespace HugeCTR {
namespace {
template <int BLOCK_WIDTH>
__global__ void reverse_add_bias_and_re_kernel(float* bias, __half* dRelu, __half* middle,
const __half* top, int ldn) {
__shared__ __half2 elem[32][BLOCK_WIDTH + 1];
__shared__ __half2 accu[BLOCK_WIDTH];
const __half2 zero = TypeFunc<__half2>::zero();
__half2* middle2 = reinterpret_cast<__half2*>(middle);
__half2* dRelu2 = reinterpret_cast<__half2*>(dRelu);
const __half2* top2 = reinterpret_cast<const __half2*>(top);
int lx, ly, gi;
int gx_offset = blockIdx.x * BLOCK_WIDTH;
int gy_offset = blockIdx.y * 32;
for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) {
lx = threadIdx.x % BLOCK_WIDTH;
ly = (i + threadIdx.x) / BLOCK_WIDTH;
gi = (ly + gy_offset) * ldn + (lx + gx_offset);
__half2 t = middle2[gi];
__half2 mask = __hgt2(t, zero);
t = __hmul2(__ldg(top2 + gi), mask);
dRelu2[gi] = t;
elem[ly][lx] = t;
}
__syncthreads();
for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) {
lx = (i + threadIdx.x) / 32;
ly = threadIdx.x % 32;
__half2 val = warpReduceSum(elem[ly][lx]);
if (ly == 0) {
accu[lx] = val;
}
}
__syncthreads();
if (threadIdx.x < BLOCK_WIDTH * 2) {
__half2 val = accu[threadIdx.x / 2];
float fval = (threadIdx.x % 2 == 0) ? __low2float(val) : __high2float(val);
atomicAdd(bias + gx_offset * 2 + threadIdx.x, fval);
}
}
} // namespace
FusedReluBiasFullyConnectedLayer::FusedReluBiasFullyConnectedLayer(
const std::shared_ptr<BufferBlock2<float>>& master_weights_buff,
const std::shared_ptr<BufferBlock2<__half>>& weights_buff,
const std::shared_ptr<BufferBlock2<__half>>& weights_grad_buff,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const Tensor2<__half>& train_in_tensor, const Tensor2<__half>& mask_in_tensor,
const Tensor2<__half>& dRelu_in_tensor, const Tensor2<__half>& db_in_tensor,
const Tensor2<__half>& train_out_tensor, const Tensor2<__half>& mask_out_tensor,
const Tensor2<__half>& dRelu_out_tensor, Tensor2<__half>& db_out_tensor,
const std::shared_ptr<GPUResource>& gpu_resource, const FcPosition_t& pos,
const Activation_t& act, const bool& skip_dgrad, std::vector<Initializer_t> initializer_types)
: Layer(gpu_resource, initializer_types),
balgo_k_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
balgo_x_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
balgo_b_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
pos_(pos),
act_(act),
skip_dgrad_(skip_dgrad) {
const auto& bottom_tensor_dim = train_in_tensor.get_dimensions();
const auto& top_tensor_dim = train_out_tensor.get_dimensions();
if (bottom_tensor_dim.size() != 2 || top_tensor_dim.size() != 2) {
CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions");
}
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) &&
act_ != Activation_t::None) {
if (m % 32 != 0 || n % 64 != 0) {
CK_THROW_(
Error_t::WrongInput,
"The first dimension of bottom tensor must be a multiple of 32, the second dimension "
"of top tensor must be a multiple of 64.");
}
}
std::vector<size_t> kernel_dim = {k, n};
std::vector<size_t> bias_dim = {1, n};
std::vector<size_t> identity_dim = {1, m};
{
Tensor2<float> tensor;
master_weights_buff->reserve(kernel_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
master_weights_buff->reserve(bias_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_buff->reserve(kernel_dim, &tensor);
weights_half_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_buff->reserve(bias_dim, &tensor);
weights_half_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_grad_buff->reserve(kernel_dim, &tensor);
weights_grad_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_grad_buff->reserve(bias_dim, &db_out_tensor);
weights_grad_.push_back(db_out_tensor);
}
blobs_buff->reserve(identity_dim, &identity_tensor_);
train_in_tensor_ = train_in_tensor;
if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated)
mask_in_tensor_ = train_in_tensor;
else {
mask_in_tensor_ = mask_in_tensor;
dRelu_in_tensor_ = dRelu_in_tensor;
db_in_tensor_ = db_in_tensor;
}
train_out_tensor_ = train_out_tensor;
mask_out_tensor_ = mask_out_tensor;
dRelu_out_tensor_ = dRelu_out_tensor;
db_out_tensor_ = db_out_tensor;
blobs_buff->reserve(kernel_dim, &bias_grad_tensor_);
std::vector<size_t> mask_dim = {m, n};
blobs_buff->reserve(mask_dim, &mask_in_tensor_temp_);
}
void FusedReluBiasFullyConnectedLayer::initialize() {
// TODO: We need different bottom desc based on is_train or not
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
__half* identity = identity_tensor_.get_ptr();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
hipLaunchKernelGGL(( initialize_array), dim3((m - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), identity, m,
__float2half(1.0f));
CK_CUBLAS_THROW_(cublasLtMatmulDescCreate(&cublas_op_desc_, CUBLAS_COMPUTE_32F, HIP_R_32F));
hipblasOperation_t trans = HIPBLAS_OP_N;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSA,
&trans, sizeof(trans)));
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSB,
&trans, sizeof(trans)));
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_RELU_AUX_BIAS;
if (act_ == Activation_t::None) epi = CUBLASLT_EPILOGUE_BIAS;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epi, sizeof(epi)));
const __half* bias = weights_half_[1].get_ptr();
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)));
if (act_ != Activation_t::None) {
__half* reluMask = mask_out_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&reluMask, sizeof(reluMask));
long reluMaskLd = n;
cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&reluMaskLd, sizeof(reluMaskLd));
}
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_kernel_desc_, HIP_R_16F, n, k, n));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_bottom_desc_, HIP_R_16F, k, m, k));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_top_desc_, HIP_R_16F, n, m, n));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_));
cublaslt_workspace_size_ = 1024 * 1024 * 16; // Set it to 8MB for now
CK_CUDA_THROW_(hipMalloc(&cublaslt_workspace_, cublaslt_workspace_size_));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(
cublas_preference_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_,
sizeof(cublaslt_workspace_size_)));
uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST;
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_,
CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK,
&pointer_mode, sizeof(pointer_mode)));
// By default set algo to best estimated heurstic
cublasLtMatmulHeuristicResult_t heuristic_result;
int returned_res = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_,
cublas_top_desc_, cublas_top_desc_, cublas_preference_, 1, &heuristic_result, &returned_res));
memcpy(&falgo_k_, &heuristic_result.algo, sizeof(falgo_k_));
if (returned_res == 0) {
CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED);
}
initialize_bprop();
}
void FusedReluBiasFullyConnectedLayer::initialize_bprop() {
// TODO: We need different bottom desc based on is_train or not
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
CK_CUBLAS_THROW_(
cublasLtMatmulDescCreate(&cublas_op_desc_bprop_, CUBLAS_COMPUTE_32F, HIP_R_32F));
hipblasOperation_t transA = HIPBLAS_OP_T;
hipblasOperation_t transB = HIPBLAS_OP_N;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA)));
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB)));
if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) {
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DEFAULT;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi)));
} else if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) {
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DRELU_BGRAD;
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi,
sizeof(epi));
__half* bgrad = db_in_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad,
sizeof(bgrad));
__half* reluMask = mask_in_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&reluMask, sizeof(reluMask));
long reluMaskLd = k;
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&reluMaskLd, sizeof(reluMaskLd));
}
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_top_desc_, HIP_R_16F, n, m, n));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_bottom_desc_, HIP_R_16F, k, m, k));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_dRelu_));
cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now
CK_CUDA_THROW_(hipMalloc(&cublaslt_workspace_dRelu_, cublaslt_workspace_size_));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(
cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_,
sizeof(cublaslt_workspace_size_)));
uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST;
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_,
CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK,
&pointer_mode, sizeof(pointer_mode)));
// By default set algo to best estimated heurstic
cublasLtMatmulHeuristicResult_t heuristic_result;
int returned_res = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_,
cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_,
cublas_preference_dRelu_, 1, &heuristic_result, &returned_res));
memcpy(&balgo_dRelu_, &heuristic_result.algo, sizeof(balgo_dRelu_));
if (returned_res == 0) {
CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED);
}
}
void FusedReluBiasFullyConnectedLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.start", get_gpu().get_stream());
const __half* kernel = weights_half_[0].get_ptr();
const __half* bias = weights_half_[1].get_ptr();
const __half* bottom = get_bottom_tensor_fprop(is_train).get_ptr();
__half* top_fprop = train_out_tensor_.get_ptr();
__half* mask_out = mask_out_tensor_.get_ptr();
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(is_train).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
const float alpha = 1.0f;
const float beta = 0.0f;
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(cublasLtMatmul(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom,
cublas_bottom_desc_, &beta, top_fprop, cublas_top_desc_, top_fprop, cublas_top_desc_,
&falgo_k_, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream()));
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.stop",
get_gpu().get_stream());
if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) &&
act_ != Activation_t::None) {
size_t len = train_out_tensor_.get_num_elements();
CK_CUDA_THROW_(hipMemcpyAsync(mask_out, top_fprop, len * sizeof(__half),
hipMemcpyDeviceToDevice, get_gpu().get_stream()));
}
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.stop", get_gpu().get_stream());
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
void FusedReluBiasFullyConnectedLayer::bprop() {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.start", get_gpu().get_stream());
const __half* kernel = weights_half_[0].get_ptr();
__half* mask_in = mask_in_tensor_.get_ptr();
const __half* train_out = train_out_tensor_.get_ptr();
__half* mask_out = mask_out_tensor_.get_ptr();
__half* kernel_grad = weights_grad_[0].get_ptr();
__half* bias_grad = weights_grad_[1].get_ptr();
const __half* bottom = get_bottom_tensor_fprop(true).get_ptr();
__half* bottom_bprop = get_bottom_tensor_bprop(true).get_ptr();
float* bias_grad_float = bias_grad_tensor_.get_ptr();
__half* dRelu_top = dRelu_out_tensor_.get_ptr();
const __half* identity = identity_tensor_.get_ptr();
const auto& bottom_tensor_dim = get_bottom_tensor_bprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
const float alpha = 1.0f;
const float beta_k = 1.0f;
const float beta_x = 0.0f;
const float beta_b = 0.0f;
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.start",
get_gpu().get_stream());
if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) {
if (act_ == Activation_t::None) {
CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, 1,
m, &alpha, train_out, HIP_R_16F, n, identity, HIP_R_16F, m,
&beta_b, bias_grad, HIP_R_16F, n, HIP_R_32F, balgo_b_));
} else {
hipLaunchKernelGGL(( initialize_array), dim3((n - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), bias_grad_float, n,
0.0f);
dim3 blocks(n / 64, m / 32);
hipLaunchKernelGGL(( reverse_add_bias_and_re_kernel<32>), dim3(blocks), dim3(512), 0, get_gpu().get_stream(),
bias_grad_float, dRelu_top, mask_out, train_out, n / 2);
hipLaunchKernelGGL(( convert_array), dim3((n - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), bias_grad,
bias_grad_float, n);
}
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.stop",
get_gpu().get_stream());
if (act_ == Activation_t::None) {
dRelu_top = train_out_tensor_.get_ptr();
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m,
&alpha, dRelu_top, HIP_R_16F, n, bottom, HIP_R_16F, k, &beta_k,
kernel_grad, HIP_R_16F, n, HIP_R_32F, balgo_k_));
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.stop",
get_gpu().get_stream());
if (skip_dgrad_) return;
if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) {
bottom_bprop = dRelu_in_tensor_.get_ptr();
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(cublasLtMatmul(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_,
dRelu_top, cublas_dRelu_top_desc_, &beta_x, bottom_bprop, cublas_dRelu_bottom_desc_,
bottom_bprop, cublas_dRelu_bottom_desc_, &balgo_dRelu_, cublaslt_workspace_dRelu_,
cublaslt_workspace_size_, get_gpu().get_stream()));
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.stop",
get_gpu().get_stream());
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream());
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
void FusedReluBiasFullyConnectedLayer::search_algorithm() {
// Set to the CUDA device where this layer assigned to
CudaDeviceContext context(get_device_id());
const size_t repeat_num = 100;
const int max_algo_count = 16;
// Device Tensors to be used
__half* bottom = get_bottom_tensor_fprop(true).get_ptr();
__half* top = train_out_tensor_.get_ptr();
__half* kernel = weights_half_[0].get_ptr();
__half* bias = weights_half_[1].get_ptr();
__half* kernel_grad = weights_grad_[0].get_ptr();
__half* bias_grad = weights_grad_[1].get_ptr();
__half* identity = identity_tensor_.get_ptr();
// Tensor dim
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
// Record time for each algorithm
float shortestTime = std::numeric_limits<float>::max();
float time;
hipEvent_t start, stop;
CK_CUDA_THROW_(hipEventCreate(&start));
CK_CUDA_THROW_(hipEventCreate(&stop));
cublasLtMatmulHeuristicResult_t heuristic_result[max_algo_count] = {0};
int algo_count = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_,
cublas_top_desc_, cublas_top_desc_, cublas_preference_, max_algo_count, heuristic_result,
&algo_count));
if (algo_count == 0) {
CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED);
}
// if(get_device_id()==0) printf("M: %d, N: %d, K: %d\n", m, n, k);
for (int algoIdx = 0; algoIdx < algo_count; algoIdx++) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) {
status =
cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel,
cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top,
cublas_top_desc_, top, cublas_top_desc_, &heuristic_result[algoIdx].algo,
cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream());
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// if(get_device_id()==0) printf("Algo: %d, wavesCount: %f, time: %f\n",
// (int)heuristic_result[algoIdx].algo,
// heuristic_result[algoIdx].wavesCount,
// time);
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
memcpy(&falgo_k_, &heuristic_result[algoIdx].algo, sizeof(falgo_k_));
// if(get_device_id()==0) printf("Picked algorithm: %d", heuristic_result[algoIdx].algo);
}
}
// dRelu in backward pass
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
cublasLtMatmulHeuristicResult_t heuristic_result_dRelu[max_algo_count] = {0};
int algo_count_dRelu = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_,
cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_,
cublas_preference_dRelu_, max_algo_count, heuristic_result_dRelu, &algo_count_dRelu));
if (algo_count_dRelu == 0) {
CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED);
}
for (int algoIdx = 0; algoIdx < algo_count_dRelu; algoIdx++) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) {
status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha,
kernel, cublas_kernel_desc_, top, cublas_dRelu_top_desc_, &beta,
bottom, cublas_dRelu_bottom_desc_, bottom, cublas_dRelu_bottom_desc_,
&heuristic_result_dRelu[algoIdx].algo, cublaslt_workspace_dRelu_,
cublaslt_workspace_size_, get_gpu().get_stream());
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
memcpy(&balgo_dRelu_, &heuristic_result_dRelu[algoIdx].algo, sizeof(balgo_dRelu_));
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Start, end for search
const hipblasGemmAlgo_t startAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
const hipblasGemmAlgo_t endAlgo = CUBLAS_GEMM_ALGO15_TENSOR_OP;
// Search all the algorithm for balgo_k_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 1.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m,
&alpha, top, HIP_R_16F, n, bottom, HIP_R_16F, k, &beta, kernel_grad,
HIP_R_16F, n, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_k_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Search all the algorithm for balgo_b_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, 1, m,
&alpha, top, HIP_R_16F, n, identity, HIP_R_16F, m, &beta, bias_grad,
HIP_R_16F, n, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_b_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Search all the algorithm for balgo_x_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS;
const __half alpha = 1.0f;
const __half beta = 0.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n,
&alpha, kernel, HIP_R_16F, n, top, HIP_R_16F, n, &beta, bottom,
HIP_R_16F, k, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_x_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Print selection information
// printf("The algorithm selection for falgo_k_, balgo_k_, balgo_x_ are: %d, %d and %d.\n",
// (int)falgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP,
// (int)balgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP,
// (int)balgo_x_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP);
// Output msg
// MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm.");
// Clean-up
CK_CUDA_THROW_(hipEventDestroy(start));
CK_CUDA_THROW_(hipEventDestroy(stop));
} // namespace HugeCTR
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_uniform_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim);
return std::make_unique<UniformDataSimulator>(-1 * limit, limit);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_uniform_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Uniform,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_norm_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_default_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
std::unique_ptr<DataSimulator> simu(nullptr);
if (0 == index) {
simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm, bottom_dim, top_dim));
} else if (1 == index) {
float stddev = sqrt(1.f / top_dim);
simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev));
} else {
CK_THROW_(Error_t::OutOfBound, "index != {0, 1}.");
}
return simu;
}
} // namespace HugeCTR
|
b46fd9c02108929ad898739f9a6c045a1dab4195.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdio>
#include <layers/fused_relu_bias_fully_connected_layer.hpp>
#include <linalg/reduce.cuh>
#include <utils.cuh>
#include <utils.hpp>
#include "common.hpp"
namespace HugeCTR {
namespace {
template <int BLOCK_WIDTH>
__global__ void reverse_add_bias_and_re_kernel(float* bias, __half* dRelu, __half* middle,
const __half* top, int ldn) {
__shared__ __half2 elem[32][BLOCK_WIDTH + 1];
__shared__ __half2 accu[BLOCK_WIDTH];
const __half2 zero = TypeFunc<__half2>::zero();
__half2* middle2 = reinterpret_cast<__half2*>(middle);
__half2* dRelu2 = reinterpret_cast<__half2*>(dRelu);
const __half2* top2 = reinterpret_cast<const __half2*>(top);
int lx, ly, gi;
int gx_offset = blockIdx.x * BLOCK_WIDTH;
int gy_offset = blockIdx.y * 32;
for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) {
lx = threadIdx.x % BLOCK_WIDTH;
ly = (i + threadIdx.x) / BLOCK_WIDTH;
gi = (ly + gy_offset) * ldn + (lx + gx_offset);
__half2 t = middle2[gi];
__half2 mask = __hgt2(t, zero);
t = __hmul2(__ldg(top2 + gi), mask);
dRelu2[gi] = t;
elem[ly][lx] = t;
}
__syncthreads();
for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) {
lx = (i + threadIdx.x) / 32;
ly = threadIdx.x % 32;
__half2 val = warpReduceSum(elem[ly][lx]);
if (ly == 0) {
accu[lx] = val;
}
}
__syncthreads();
if (threadIdx.x < BLOCK_WIDTH * 2) {
__half2 val = accu[threadIdx.x / 2];
float fval = (threadIdx.x % 2 == 0) ? __low2float(val) : __high2float(val);
atomicAdd(bias + gx_offset * 2 + threadIdx.x, fval);
}
}
} // namespace
FusedReluBiasFullyConnectedLayer::FusedReluBiasFullyConnectedLayer(
const std::shared_ptr<BufferBlock2<float>>& master_weights_buff,
const std::shared_ptr<BufferBlock2<__half>>& weights_buff,
const std::shared_ptr<BufferBlock2<__half>>& weights_grad_buff,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const Tensor2<__half>& train_in_tensor, const Tensor2<__half>& mask_in_tensor,
const Tensor2<__half>& dRelu_in_tensor, const Tensor2<__half>& db_in_tensor,
const Tensor2<__half>& train_out_tensor, const Tensor2<__half>& mask_out_tensor,
const Tensor2<__half>& dRelu_out_tensor, Tensor2<__half>& db_out_tensor,
const std::shared_ptr<GPUResource>& gpu_resource, const FcPosition_t& pos,
const Activation_t& act, const bool& skip_dgrad, std::vector<Initializer_t> initializer_types)
: Layer(gpu_resource, initializer_types),
balgo_k_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
balgo_x_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
balgo_b_(CUBLAS_GEMM_DEFAULT_TENSOR_OP),
pos_(pos),
act_(act),
skip_dgrad_(skip_dgrad) {
const auto& bottom_tensor_dim = train_in_tensor.get_dimensions();
const auto& top_tensor_dim = train_out_tensor.get_dimensions();
if (bottom_tensor_dim.size() != 2 || top_tensor_dim.size() != 2) {
CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions");
}
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) &&
act_ != Activation_t::None) {
if (m % 32 != 0 || n % 64 != 0) {
CK_THROW_(
Error_t::WrongInput,
"The first dimension of bottom tensor must be a multiple of 32, the second dimension "
"of top tensor must be a multiple of 64.");
}
}
std::vector<size_t> kernel_dim = {k, n};
std::vector<size_t> bias_dim = {1, n};
std::vector<size_t> identity_dim = {1, m};
{
Tensor2<float> tensor;
master_weights_buff->reserve(kernel_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
master_weights_buff->reserve(bias_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_buff->reserve(kernel_dim, &tensor);
weights_half_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_buff->reserve(bias_dim, &tensor);
weights_half_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_grad_buff->reserve(kernel_dim, &tensor);
weights_grad_.push_back(tensor);
}
{
Tensor2<__half> tensor;
weights_grad_buff->reserve(bias_dim, &db_out_tensor);
weights_grad_.push_back(db_out_tensor);
}
blobs_buff->reserve(identity_dim, &identity_tensor_);
train_in_tensor_ = train_in_tensor;
if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated)
mask_in_tensor_ = train_in_tensor;
else {
mask_in_tensor_ = mask_in_tensor;
dRelu_in_tensor_ = dRelu_in_tensor;
db_in_tensor_ = db_in_tensor;
}
train_out_tensor_ = train_out_tensor;
mask_out_tensor_ = mask_out_tensor;
dRelu_out_tensor_ = dRelu_out_tensor;
db_out_tensor_ = db_out_tensor;
blobs_buff->reserve(kernel_dim, &bias_grad_tensor_);
std::vector<size_t> mask_dim = {m, n};
blobs_buff->reserve(mask_dim, &mask_in_tensor_temp_);
}
void FusedReluBiasFullyConnectedLayer::initialize() {
// TODO: We need different bottom desc based on is_train or not
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
__half* identity = identity_tensor_.get_ptr();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
initialize_array<<<(m - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(identity, m,
__float2half(1.0f));
CK_CUBLAS_THROW_(cublasLtMatmulDescCreate(&cublas_op_desc_, CUBLAS_COMPUTE_32F, CUDA_R_32F));
cublasOperation_t trans = CUBLAS_OP_N;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSA,
&trans, sizeof(trans)));
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSB,
&trans, sizeof(trans)));
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_RELU_AUX_BIAS;
if (act_ == Activation_t::None) epi = CUBLASLT_EPILOGUE_BIAS;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE,
&epi, sizeof(epi)));
const __half* bias = weights_half_[1].get_ptr();
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)));
if (act_ != Activation_t::None) {
__half* reluMask = mask_out_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&reluMask, sizeof(reluMask));
long reluMaskLd = n;
cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&reluMaskLd, sizeof(reluMaskLd));
}
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_kernel_desc_, CUDA_R_16F, n, k, n));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_bottom_desc_, CUDA_R_16F, k, m, k));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_top_desc_, CUDA_R_16F, n, m, n));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_));
cublaslt_workspace_size_ = 1024 * 1024 * 16; // Set it to 8MB for now
CK_CUDA_THROW_(cudaMalloc(&cublaslt_workspace_, cublaslt_workspace_size_));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(
cublas_preference_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_,
sizeof(cublaslt_workspace_size_)));
uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST;
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_,
CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK,
&pointer_mode, sizeof(pointer_mode)));
// By default set algo to best estimated heurstic
cublasLtMatmulHeuristicResult_t heuristic_result;
int returned_res = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_,
cublas_top_desc_, cublas_top_desc_, cublas_preference_, 1, &heuristic_result, &returned_res));
memcpy(&falgo_k_, &heuristic_result.algo, sizeof(falgo_k_));
if (returned_res == 0) {
CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED);
}
initialize_bprop();
}
void FusedReluBiasFullyConnectedLayer::initialize_bprop() {
// TODO: We need different bottom desc based on is_train or not
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
CK_CUBLAS_THROW_(
cublasLtMatmulDescCreate(&cublas_op_desc_bprop_, CUBLAS_COMPUTE_32F, CUDA_R_32F));
cublasOperation_t transA = CUBLAS_OP_T;
cublasOperation_t transB = CUBLAS_OP_N;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA)));
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB)));
if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) {
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DEFAULT;
CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(
cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi)));
} else if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) {
cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DRELU_BGRAD;
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi,
sizeof(epi));
__half* bgrad = db_in_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad,
sizeof(bgrad));
__half* reluMask = mask_in_tensor_.get_ptr();
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&reluMask, sizeof(reluMask));
long reluMaskLd = k;
cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&reluMaskLd, sizeof(reluMaskLd));
}
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_top_desc_, CUDA_R_16F, n, m, n));
CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_bottom_desc_, CUDA_R_16F, k, m, k));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_dRelu_));
cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now
CK_CUDA_THROW_(cudaMalloc(&cublaslt_workspace_dRelu_, cublaslt_workspace_size_));
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(
cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_,
sizeof(cublaslt_workspace_size_)));
uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST;
CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_,
CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK,
&pointer_mode, sizeof(pointer_mode)));
// By default set algo to best estimated heurstic
cublasLtMatmulHeuristicResult_t heuristic_result;
int returned_res = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_,
cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_,
cublas_preference_dRelu_, 1, &heuristic_result, &returned_res));
memcpy(&balgo_dRelu_, &heuristic_result.algo, sizeof(balgo_dRelu_));
if (returned_res == 0) {
CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED);
}
}
void FusedReluBiasFullyConnectedLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.start", get_gpu().get_stream());
const __half* kernel = weights_half_[0].get_ptr();
const __half* bias = weights_half_[1].get_ptr();
const __half* bottom = get_bottom_tensor_fprop(is_train).get_ptr();
__half* top_fprop = train_out_tensor_.get_ptr();
__half* mask_out = mask_out_tensor_.get_ptr();
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(is_train).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
size_t m = bottom_tensor_dim[0];
size_t n = top_tensor_dim[1];
size_t k = bottom_tensor_dim[1];
const float alpha = 1.0f;
const float beta = 0.0f;
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(cublasLtMatmul(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom,
cublas_bottom_desc_, &beta, top_fprop, cublas_top_desc_, top_fprop, cublas_top_desc_,
&falgo_k_, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream()));
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.stop",
get_gpu().get_stream());
if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) &&
act_ != Activation_t::None) {
size_t len = train_out_tensor_.get_num_elements();
CK_CUDA_THROW_(cudaMemcpyAsync(mask_out, top_fprop, len * sizeof(__half),
cudaMemcpyDeviceToDevice, get_gpu().get_stream()));
}
PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.stop", get_gpu().get_stream());
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
void FusedReluBiasFullyConnectedLayer::bprop() {
CudaDeviceContext context(get_device_id());
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.start", get_gpu().get_stream());
const __half* kernel = weights_half_[0].get_ptr();
__half* mask_in = mask_in_tensor_.get_ptr();
const __half* train_out = train_out_tensor_.get_ptr();
__half* mask_out = mask_out_tensor_.get_ptr();
__half* kernel_grad = weights_grad_[0].get_ptr();
__half* bias_grad = weights_grad_[1].get_ptr();
const __half* bottom = get_bottom_tensor_fprop(true).get_ptr();
__half* bottom_bprop = get_bottom_tensor_bprop(true).get_ptr();
float* bias_grad_float = bias_grad_tensor_.get_ptr();
__half* dRelu_top = dRelu_out_tensor_.get_ptr();
const __half* identity = identity_tensor_.get_ptr();
const auto& bottom_tensor_dim = get_bottom_tensor_bprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
const float alpha = 1.0f;
const float beta_k = 1.0f;
const float beta_x = 0.0f;
const float beta_b = 0.0f;
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.start",
get_gpu().get_stream());
if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) {
if (act_ == Activation_t::None) {
CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, 1,
m, &alpha, train_out, CUDA_R_16F, n, identity, CUDA_R_16F, m,
&beta_b, bias_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_b_));
} else {
initialize_array<<<(n - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(bias_grad_float, n,
0.0f);
dim3 blocks(n / 64, m / 32);
reverse_add_bias_and_re_kernel<32><<<blocks, 512, 0, get_gpu().get_stream()>>>(
bias_grad_float, dRelu_top, mask_out, train_out, n / 2);
convert_array<<<(n - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(bias_grad,
bias_grad_float, n);
}
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.stop",
get_gpu().get_stream());
if (act_ == Activation_t::None) {
dRelu_top = train_out_tensor_.get_ptr();
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, dRelu_top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta_k,
kernel_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_k_));
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.stop",
get_gpu().get_stream());
if (skip_dgrad_) return;
if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) {
bottom_bprop = dRelu_in_tensor_.get_ptr();
}
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.start",
get_gpu().get_stream());
CK_CUBLAS_THROW_(cublasLtMatmul(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_,
dRelu_top, cublas_dRelu_top_desc_, &beta_x, bottom_bprop, cublas_dRelu_bottom_desc_,
bottom_bprop, cublas_dRelu_bottom_desc_, &balgo_dRelu_, cublaslt_workspace_dRelu_,
cublaslt_workspace_size_, get_gpu().get_stream()));
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.stop",
get_gpu().get_stream());
PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream());
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
void FusedReluBiasFullyConnectedLayer::search_algorithm() {
// Set to the CUDA device where this layer assigned to
CudaDeviceContext context(get_device_id());
const size_t repeat_num = 100;
const int max_algo_count = 16;
// Device Tensors to be used
__half* bottom = get_bottom_tensor_fprop(true).get_ptr();
__half* top = train_out_tensor_.get_ptr();
__half* kernel = weights_half_[0].get_ptr();
__half* bias = weights_half_[1].get_ptr();
__half* kernel_grad = weights_grad_[0].get_ptr();
__half* bias_grad = weights_grad_[1].get_ptr();
__half* identity = identity_tensor_.get_ptr();
// Tensor dim
const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions();
const auto& top_tensor_dim = train_out_tensor_.get_dimensions();
int m = bottom_tensor_dim[0];
int n = top_tensor_dim[1];
int k = bottom_tensor_dim[1];
// Record time for each algorithm
float shortestTime = std::numeric_limits<float>::max();
float time;
cudaEvent_t start, stop;
CK_CUDA_THROW_(cudaEventCreate(&start));
CK_CUDA_THROW_(cudaEventCreate(&stop));
cublasLtMatmulHeuristicResult_t heuristic_result[max_algo_count] = {0};
int algo_count = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_,
cublas_top_desc_, cublas_top_desc_, cublas_preference_, max_algo_count, heuristic_result,
&algo_count));
if (algo_count == 0) {
CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED);
}
// if(get_device_id()==0) printf("M: %d, N: %d, K: %d\n", m, n, k);
for (int algoIdx = 0; algoIdx < algo_count; algoIdx++) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) {
status =
cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel,
cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top,
cublas_top_desc_, top, cublas_top_desc_, &heuristic_result[algoIdx].algo,
cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream());
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// if(get_device_id()==0) printf("Algo: %d, wavesCount: %f, time: %f\n",
// (int)heuristic_result[algoIdx].algo,
// heuristic_result[algoIdx].wavesCount,
// time);
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
memcpy(&falgo_k_, &heuristic_result[algoIdx].algo, sizeof(falgo_k_));
// if(get_device_id()==0) printf("Picked algorithm: %d", heuristic_result[algoIdx].algo);
}
}
// dRelu in backward pass
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
cublasLtMatmulHeuristicResult_t heuristic_result_dRelu[max_algo_count] = {0};
int algo_count_dRelu = 0;
CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic(
get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_,
cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_,
cublas_preference_dRelu_, max_algo_count, heuristic_result_dRelu, &algo_count_dRelu));
if (algo_count_dRelu == 0) {
CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED);
}
for (int algoIdx = 0; algoIdx < algo_count_dRelu; algoIdx++) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) {
status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha,
kernel, cublas_kernel_desc_, top, cublas_dRelu_top_desc_, &beta,
bottom, cublas_dRelu_bottom_desc_, bottom, cublas_dRelu_bottom_desc_,
&heuristic_result_dRelu[algoIdx].algo, cublaslt_workspace_dRelu_,
cublaslt_workspace_size_, get_gpu().get_stream());
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
memcpy(&balgo_dRelu_, &heuristic_result_dRelu[algoIdx].algo, sizeof(balgo_dRelu_));
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Start, end for search
const cublasGemmAlgo_t startAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
const cublasGemmAlgo_t endAlgo = CUBLAS_GEMM_ALGO15_TENSOR_OP;
// Search all the algorithm for balgo_k_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 1.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta, kernel_grad,
CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_k_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Search all the algorithm for balgo_b_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
const float alpha = 1.0f;
const float beta = 0.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m,
&alpha, top, CUDA_R_16F, n, identity, CUDA_R_16F, m, &beta, bias_grad,
CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_b_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = std::numeric_limits<float>::max();
// Search all the algorithm for balgo_x_
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
const __half alpha = 1.0f;
const __half beta = 0.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n,
&alpha, kernel, CUDA_R_16F, n, top, CUDA_R_16F, n, &beta, bottom,
CUDA_R_16F, k, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_x_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Print selection information
// printf("The algorithm selection for falgo_k_, balgo_k_, balgo_x_ are: %d, %d and %d.\n",
// (int)falgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP,
// (int)balgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP,
// (int)balgo_x_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP);
// Output msg
// MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm.");
// Clean-up
CK_CUDA_THROW_(cudaEventDestroy(start));
CK_CUDA_THROW_(cudaEventDestroy(stop));
} // namespace HugeCTR
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_uniform_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim);
return std::make_unique<UniformDataSimulator>(-1 * limit, limit);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_uniform_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Uniform,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_norm_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_default_initializer(
const int index) {
size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1];
size_t top_dim = train_out_tensor_.get_dimensions()[1];
std::unique_ptr<DataSimulator> simu(nullptr);
if (0 == index) {
simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm, bottom_dim, top_dim));
} else if (1 == index) {
float stddev = sqrt(1.f / top_dim);
simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev));
} else {
CK_THROW_(Error_t::OutOfBound, "index != {0, 1}.");
}
return simu;
}
} // namespace HugeCTR
|
242cf7f490adae7c4ac5e4ebadd77d2f1d36d90a.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <hip/hip_runtime.h>
#include "transpose.cuh"
#include "transpose.hh"
#include "die.h"
// 256 threads altogether
const int BLOCK_ROWS = 8;
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
static hipError_t checkCuda(hipError_t result) {
if (result != hipSuccess)
die("CUDA Runtime Error: %s\n", hipGetErrorString(result));
return result;
}
Match::CudaTranspose::CudaTranspose(const char *ib, char *ob, int d):d_obuf(nullptr),d_ibuf(nullptr),ibuf(ib),obuf(ob),dim(d),err(nullptr) {
if (d % TRANSPOSE_TILE_DIM == 0) {
hipError_t rc;
if ((rc=hipMalloc(&d_ibuf, dim*dim))!=0)
err = hipGetErrorString(rc);
if ((rc=hipMalloc(&d_obuf, dim*dim))!=0)
err = hipGetErrorString(rc);
} else {
err = "Dimention must be divisible by TRANSPOSE_TILE_DIM";
}
}
Match::CudaTranspose::operator bool() const {
return err == nullptr;
}
Match::CudaTranspose::~CudaTranspose() {
if (d_obuf)
checkCuda(hipFree((void*)d_obuf));
if (d_ibuf)
checkCuda(hipFree((void*)d_ibuf));
}
void Match::CudaTranspose::run() {
dim3 dimGrid(dim/TRANSPOSE_TILE_DIM, dim/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, BLOCK_ROWS, 1);
checkCuda(hipMemcpy(d_ibuf, ibuf, dim*dim, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(dimGrid), dim3(dimBlock), 0, 0, d_obuf, d_ibuf);
checkCuda(hipGetLastError());
//checkCuda(hipMemcpy(obuf, d_obuf, dim*dim, hipMemcpyDeviceToHost));
}
|
242cf7f490adae7c4ac5e4ebadd77d2f1d36d90a.cu
|
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cuda_runtime.h>
#include "transpose.cuh"
#include "transpose.hh"
#include "die.h"
// 256 threads altogether
const int BLOCK_ROWS = 8;
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
static cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess)
die("CUDA Runtime Error: %s\n", cudaGetErrorString(result));
return result;
}
Match::CudaTranspose::CudaTranspose(const char *ib, char *ob, int d):d_obuf(nullptr),d_ibuf(nullptr),ibuf(ib),obuf(ob),dim(d),err(nullptr) {
if (d % TRANSPOSE_TILE_DIM == 0) {
cudaError_t rc;
if ((rc=cudaMalloc(&d_ibuf, dim*dim))!=0)
err = cudaGetErrorString(rc);
if ((rc=cudaMalloc(&d_obuf, dim*dim))!=0)
err = cudaGetErrorString(rc);
} else {
err = "Dimention must be divisible by TRANSPOSE_TILE_DIM";
}
}
Match::CudaTranspose::operator bool() const {
return err == nullptr;
}
Match::CudaTranspose::~CudaTranspose() {
if (d_obuf)
checkCuda(cudaFree((void*)d_obuf));
if (d_ibuf)
checkCuda(cudaFree((void*)d_ibuf));
}
void Match::CudaTranspose::run() {
dim3 dimGrid(dim/TRANSPOSE_TILE_DIM, dim/TRANSPOSE_TILE_DIM, 1);
dim3 dimBlock(TRANSPOSE_TILE_DIM, BLOCK_ROWS, 1);
checkCuda(cudaMemcpy(d_ibuf, ibuf, dim*dim, cudaMemcpyHostToDevice));
transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_obuf, d_ibuf);
checkCuda(cudaGetLastError());
//checkCuda(cudaMemcpy(obuf, d_obuf, dim*dim, cudaMemcpyDeviceToHost));
}
|
b88928c7d5097f69b5fd4fee26be706f899a420b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cuda/atomic>
#include <stdio.h>
#include "common_hip.cuh"
#include <string>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <thread>
#include <errno.h>
#define DONE_MSG "done"
#define DONE_MSG_LEN 5
using std::string;
using std::thread;
template <typename message_t>
struct ThreeElementGPipe;
template <typename message_t>
__host__ void producer_mediator(ThreeElementGPipe<message_t>* p)
{
dbg_printf("Producer mediator has started\n");
while (true)
{
size_t head = p->_head->load(cuda::memory_order::memory_order_relaxed);
while (p->_tail->load(cuda::memory_order::memory_order_relaxed) - head == 0)
{
if (p->terminateFlag)
{
if (p->_tail->load(cuda::memory_order::memory_order_acquire) - head == 0)
{
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
dbg_printf("producer_mediator terminated\n");
return;
}
}
}
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
size_t writeSize = write(p->_fd, &(p->_messagesQueue[head % p->_queue_size]), sizeof(message_t));
if (writeSize != sizeof(message_t))
{
perror("producer_mediator: ");
dbg_printf("Error: producer_mediator: Wrong write size: %lu\n", writeSize);
}
else
{
p->_head->store(head + 1, cuda::memory_order::memory_order_release);
}
}
}
template <typename message_t>
__host__ void consumer_mediator(ThreeElementGPipe<message_t>* p)
{
dbg_printf("Consumer mediator: start\n");
while (true)
{
const size_t tail = p->_tail->load(cuda::memory_order::memory_order_relaxed);
while (tail - p->_head->load(cuda::memory_order::memory_order_relaxed) == p->_queue_size) { }
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
switch(read(p->_fd, &(p->_messagesQueue[tail % p->_queue_size]), sizeof(message_t)))
{
case 0:
break;
case sizeof(message_t):
dbg_printf("Consumer mediator transfer message: %s\n", p->_messagesQueue[tail % p->_queue_size].content);
p->_tail->store(tail + 1, cuda::memory_order::memory_order_release);
break;
case -1:
perror("Error: consumer_mediator: named pipe read failed\n");
exit(-1);
default:
char* receivedMessage = (char*)(p->_messagesQueue + tail % p->_queue_size);
if (strcmp(receivedMessage, "done") == 0)
{
dbg_printf("consumer_mediator terminated\n");
return;
}
dbg_printf("Error: consumer_mediator: Read partial message %s\n", receivedMessage);
break;
}
}
}
template <typename message_t>
struct ThreeElementGPipe
{
message_t *_messagesQueue;
size_t _queue_size;
size_t _threadsCount;
cuda::atomic<size_t, cuda::thread_scope_system>* _head;
cuda::atomic<size_t, cuda::thread_scope_system>* _tail;
const char* _fullPath;
bool _isConsumer;
int _fd;
thread *mediator_thread;
bool terminateFlag;
ThreeElementGPipe(const char* pipe_name, size_t queue_size, size_t threadsCount, bool isConsumer)
{
_fullPath = pipe_name;
_threadsCount = threadsCount;
_queue_size = queue_size;
_isConsumer = isConsumer;
_messagesQueue = nullptr;
mediator_thread = nullptr;
_head = nullptr;
_tail = nullptr;
_fd = -1;
terminateFlag = false;
}
__host__ void gclose()
{
terminateFlag = true;
mediator_thread->join();
if (!_isConsumer)
write(_fd, "done", 5*sizeof(char));
delete(mediator_thread);
CUDA_CHECK(hipHostFree((void*)_messagesQueue));
close(_fd);
}
__host__ void initConsumer()
{
dbg_printf("Init consumer with pipe %s\n", _fullPath);
mkfifo(_fullPath, 0666); // S_IRUSR | S_IWOTH
_fd = open(_fullPath, O_RDONLY);
dbg_printf("Consumer pipe is connected\n");
// Validate pipe parameters
size_t producer_queue_size, producer_readers_count;
if (read(_fd, &producer_queue_size, sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Consumer queue size");
dbg_printf("Error: Consumer queue size: Read smaller than expected");
}
dbg_printf("Consumer: Read queue size %d\n", (int)producer_queue_size);
if (read(_fd, &producer_readers_count, sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Consumer readers count");
dbg_printf("Error: Consumer readers count: Read smaller than expected");
}
dbg_printf("Consumer: Read readers count %d\n", (int)producer_readers_count);
if (producer_queue_size != _queue_size || producer_readers_count != _threadsCount)
{
perror("Error: Consumer compare args");
dbg_printf("Error: Mismatching pipe arguments!");
}
CUDA_CHECK(hipHostMalloc((void**)&_messagesQueue, _queue_size * sizeof(message_t)));
mediator_thread = new thread(consumer_mediator<message_t>, this);
}
__host__ void initProducer()
{
dbg_printf("Init producer with pipe %s\n", _fullPath);
mkfifo(_fullPath, 0666); // S_IWUSR | S_IROTH
_fd = open(_fullPath, O_WRONLY);
dbg_printf("Producer pipe is connected\n");
// Validate pipe parameters
dbg_printf("Producer queue size %d\n", (int)_queue_size);
int write_size = write(_fd, (void*)(&_queue_size), sizeof(size_t));
if (write_size != sizeof(size_t))
{
perror("Error: Producer write size");
dbg_printf("Error: Write smaller than expected: %d\n", write_size);
}
dbg_printf("Producer readers count %d\n", (int)_threadsCount);
if (write(_fd, (void*)(&_threadsCount), sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Producer readers count");
dbg_printf("Error: Write smaller than expected\n");
}
dbg_printf("Producer pipe passed all arguments\n");
CUDA_CHECK(hipHostMalloc((void**)&_messagesQueue, _queue_size * sizeof(message_t)));
mediator_thread = new thread(producer_mediator<message_t>, this);
dbg_printf("Producer pipe finished\n");
}
__host__ void init()
{
CUDA_CHECK(hipHostMalloc((void **)(&_head), sizeof(cuda::atomic<size_t>)));
new(_head) cuda::atomic<size_t, cuda::thread_scope_system>(0);
CUDA_CHECK(hipHostMalloc((void **)(&_tail), sizeof(cuda::atomic<size_t>)));
new(_tail) cuda::atomic<size_t, cuda::thread_scope_system>(0);
if (_isConsumer)
initConsumer();
else
initProducer();
}
__device__ void gread(message_t* message)
{
__shared__ int _shared_head;
const int tid = GetThreadNum();
if (tid == 0)
{
_shared_head = _head->load(cuda::memory_order::memory_order_relaxed);
while (_tail->load(cuda::memory_order::memory_order_relaxed) - _shared_head < _threadsCount);
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
}
__syncthreads();
memcpy(message + tid, &(_messagesQueue[(_shared_head + tid) % _queue_size]), sizeof(message_t));
__syncthreads();
if (tid == 0)
{
_head->store(_shared_head + _threadsCount, cuda::memory_order::memory_order_release);
}
}
__device__ void gwrite(message_t* message)
{
__shared__ int _shared_tail;
const int tid = GetThreadNum();
if (tid == 0)
{
_shared_tail = _tail->load(cuda::memory_order::memory_order_relaxed);
while (_shared_tail - _head->load(cuda::memory_order::memory_order_relaxed) > _queue_size - _threadsCount);
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
}
__syncthreads();
memcpy(&(_messagesQueue[(_shared_tail + tid) % _queue_size]), message + tid, sizeof(message_t));
__syncthreads();
if (tid == 0)
{
_tail->store(_shared_tail + _threadsCount, cuda::memory_order::memory_order_release);
}
}
__device__ void write_many(message_t** messages, int number_of_messages)
{
dbg_printf("write many start\n");
for (int i = 0; i < number_of_messages; i++)
{
gwrite(messages[i]);
}
}
__device__ void clean_queue(message_t* emptyMessage)
{
for(int i = 0; i < this->_queue_size; i++)
_messagesQueue[i] = *emptyMessage;
}
};
|
b88928c7d5097f69b5fd4fee26be706f899a420b.cu
|
#include <cuda/atomic>
#include <stdio.h>
#include "common.cuh"
#include <string>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <thread>
#include <errno.h>
#define DONE_MSG "done"
#define DONE_MSG_LEN 5
using std::string;
using std::thread;
template <typename message_t>
struct ThreeElementGPipe;
template <typename message_t>
__host__ void producer_mediator(ThreeElementGPipe<message_t>* p)
{
dbg_printf("Producer mediator has started\n");
while (true)
{
size_t head = p->_head->load(cuda::memory_order::memory_order_relaxed);
while (p->_tail->load(cuda::memory_order::memory_order_relaxed) - head == 0)
{
if (p->terminateFlag)
{
if (p->_tail->load(cuda::memory_order::memory_order_acquire) - head == 0)
{
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
dbg_printf("producer_mediator terminated\n");
return;
}
}
}
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
size_t writeSize = write(p->_fd, &(p->_messagesQueue[head % p->_queue_size]), sizeof(message_t));
if (writeSize != sizeof(message_t))
{
perror("producer_mediator: ");
dbg_printf("Error: producer_mediator: Wrong write size: %lu\n", writeSize);
}
else
{
p->_head->store(head + 1, cuda::memory_order::memory_order_release);
}
}
}
template <typename message_t>
__host__ void consumer_mediator(ThreeElementGPipe<message_t>* p)
{
dbg_printf("Consumer mediator: start\n");
while (true)
{
const size_t tail = p->_tail->load(cuda::memory_order::memory_order_relaxed);
while (tail - p->_head->load(cuda::memory_order::memory_order_relaxed) == p->_queue_size) { }
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
switch(read(p->_fd, &(p->_messagesQueue[tail % p->_queue_size]), sizeof(message_t)))
{
case 0:
break;
case sizeof(message_t):
dbg_printf("Consumer mediator transfer message: %s\n", p->_messagesQueue[tail % p->_queue_size].content);
p->_tail->store(tail + 1, cuda::memory_order::memory_order_release);
break;
case -1:
perror("Error: consumer_mediator: named pipe read failed\n");
exit(-1);
default:
char* receivedMessage = (char*)(p->_messagesQueue + tail % p->_queue_size);
if (strcmp(receivedMessage, "done") == 0)
{
dbg_printf("consumer_mediator terminated\n");
return;
}
dbg_printf("Error: consumer_mediator: Read partial message %s\n", receivedMessage);
break;
}
}
}
template <typename message_t>
struct ThreeElementGPipe
{
message_t *_messagesQueue;
size_t _queue_size;
size_t _threadsCount;
cuda::atomic<size_t, cuda::thread_scope_system>* _head;
cuda::atomic<size_t, cuda::thread_scope_system>* _tail;
const char* _fullPath;
bool _isConsumer;
int _fd;
thread *mediator_thread;
bool terminateFlag;
ThreeElementGPipe(const char* pipe_name, size_t queue_size, size_t threadsCount, bool isConsumer)
{
_fullPath = pipe_name;
_threadsCount = threadsCount;
_queue_size = queue_size;
_isConsumer = isConsumer;
_messagesQueue = nullptr;
mediator_thread = nullptr;
_head = nullptr;
_tail = nullptr;
_fd = -1;
terminateFlag = false;
}
__host__ void gclose()
{
terminateFlag = true;
mediator_thread->join();
if (!_isConsumer)
write(_fd, "done", 5*sizeof(char));
delete(mediator_thread);
CUDA_CHECK(cudaFreeHost((void*)_messagesQueue));
close(_fd);
}
__host__ void initConsumer()
{
dbg_printf("Init consumer with pipe %s\n", _fullPath);
mkfifo(_fullPath, 0666); // S_IRUSR | S_IWOTH
_fd = open(_fullPath, O_RDONLY);
dbg_printf("Consumer pipe is connected\n");
// Validate pipe parameters
size_t producer_queue_size, producer_readers_count;
if (read(_fd, &producer_queue_size, sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Consumer queue size");
dbg_printf("Error: Consumer queue size: Read smaller than expected");
}
dbg_printf("Consumer: Read queue size %d\n", (int)producer_queue_size);
if (read(_fd, &producer_readers_count, sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Consumer readers count");
dbg_printf("Error: Consumer readers count: Read smaller than expected");
}
dbg_printf("Consumer: Read readers count %d\n", (int)producer_readers_count);
if (producer_queue_size != _queue_size || producer_readers_count != _threadsCount)
{
perror("Error: Consumer compare args");
dbg_printf("Error: Mismatching pipe arguments!");
}
CUDA_CHECK(cudaMallocHost((void**)&_messagesQueue, _queue_size * sizeof(message_t)));
mediator_thread = new thread(consumer_mediator<message_t>, this);
}
__host__ void initProducer()
{
dbg_printf("Init producer with pipe %s\n", _fullPath);
mkfifo(_fullPath, 0666); // S_IWUSR | S_IROTH
_fd = open(_fullPath, O_WRONLY);
dbg_printf("Producer pipe is connected\n");
// Validate pipe parameters
dbg_printf("Producer queue size %d\n", (int)_queue_size);
int write_size = write(_fd, (void*)(&_queue_size), sizeof(size_t));
if (write_size != sizeof(size_t))
{
perror("Error: Producer write size");
dbg_printf("Error: Write smaller than expected: %d\n", write_size);
}
dbg_printf("Producer readers count %d\n", (int)_threadsCount);
if (write(_fd, (void*)(&_threadsCount), sizeof(size_t)) != sizeof(size_t))
{
perror("Error: Producer readers count");
dbg_printf("Error: Write smaller than expected\n");
}
dbg_printf("Producer pipe passed all arguments\n");
CUDA_CHECK(cudaMallocHost((void**)&_messagesQueue, _queue_size * sizeof(message_t)));
mediator_thread = new thread(producer_mediator<message_t>, this);
dbg_printf("Producer pipe finished\n");
}
__host__ void init()
{
CUDA_CHECK(cudaMallocHost((void **)(&_head), sizeof(cuda::atomic<size_t>)));
new(_head) cuda::atomic<size_t, cuda::thread_scope_system>(0);
CUDA_CHECK(cudaMallocHost((void **)(&_tail), sizeof(cuda::atomic<size_t>)));
new(_tail) cuda::atomic<size_t, cuda::thread_scope_system>(0);
if (_isConsumer)
initConsumer();
else
initProducer();
}
__device__ void gread(message_t* message)
{
__shared__ int _shared_head;
const int tid = GetThreadNum();
if (tid == 0)
{
_shared_head = _head->load(cuda::memory_order::memory_order_relaxed);
while (_tail->load(cuda::memory_order::memory_order_relaxed) - _shared_head < _threadsCount);
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
}
__syncthreads();
memcpy(message + tid, &(_messagesQueue[(_shared_head + tid) % _queue_size]), sizeof(message_t));
__syncthreads();
if (tid == 0)
{
_head->store(_shared_head + _threadsCount, cuda::memory_order::memory_order_release);
}
}
__device__ void gwrite(message_t* message)
{
__shared__ int _shared_tail;
const int tid = GetThreadNum();
if (tid == 0)
{
_shared_tail = _tail->load(cuda::memory_order::memory_order_relaxed);
while (_shared_tail - _head->load(cuda::memory_order::memory_order_relaxed) > _queue_size - _threadsCount);
cuda::atomic_thread_fence(cuda::memory_order_acquire, cuda::thread_scope_system);
}
__syncthreads();
memcpy(&(_messagesQueue[(_shared_tail + tid) % _queue_size]), message + tid, sizeof(message_t));
__syncthreads();
if (tid == 0)
{
_tail->store(_shared_tail + _threadsCount, cuda::memory_order::memory_order_release);
}
}
__device__ void write_many(message_t** messages, int number_of_messages)
{
dbg_printf("write many start\n");
for (int i = 0; i < number_of_messages; i++)
{
gwrite(messages[i]);
}
}
__device__ void clean_queue(message_t* emptyMessage)
{
for(int i = 0; i < this->_queue_size; i++)
_messagesQueue[i] = *emptyMessage;
}
};
|
ce59566f10b2a6f20cf33ce1a6aaff499de322d6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Universit Pierre et Marie Curie
* Calcul de transport de neutrons
* Version squentielle
*/
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "neutron.h"
#include <utility>
#include <chrono>
#include <iostream>
#include "neutron_gpu_kernel.h"
using namespace std::chrono;
/**
* Retourne le quotient entier superieur ou egal a "a/b".
*/
template<typename T>
inline static T iDivUp(T a, T b){
return ((a % b != 0) ? (a / b + 1) : (a / b));
}
ExperimentalResults neutron_gpu_caller(float* absorbed, long n,
const ProblemParameters& params,
const std::vector<unsigned long long>& seeds,
int threadsPerBlock, int neutronsPerThread) {
const auto threads = threadsPerBlock*iDivUp<long>(n, threadsPerBlock*neutronsPerThread);
auto t1 = system_clock::now();
unsigned long long* d_seeds;
hipMalloc((void**)&d_seeds, seeds.size()*sizeof(unsigned long long));
hipMemcpy(d_seeds, seeds.data(), seeds.size()*sizeof(unsigned long long), hipMemcpyHostToDevice);
// launching cuda kernel
ProblemParameters* d_params;
hipMalloc((void**)&d_params, sizeof(ProblemParameters));
hipMemcpy(d_params, ¶ms, sizeof(ProblemParameters), hipMemcpyHostToDevice);
unsigned long long int* d_next_absorbed;
hipMalloc((void**)&d_next_absorbed, sizeof(unsigned long long int));
hipMemset(d_next_absorbed, 0, sizeof(unsigned long long int));
float* d_absorbed;
hipMalloc((void**)&d_absorbed, n*sizeof(float));
#ifdef TEST
hipMemcpy(d_params, absorbed, n*sizeof(float), hipMemcpyHostToDevice);
#endif
unsigned long long int* d_r, * d_b, * d_t;
hipMalloc((void**)&d_r, sizeof(unsigned long long int));
hipMalloc((void**)&d_b, sizeof(unsigned long long int));
hipMalloc((void**)&d_t, sizeof(unsigned long long int));
hipMemset(d_r, 0, sizeof(unsigned long long int));
hipMemset(d_b, 0, sizeof(unsigned long long int));
hipMemset(d_t, 0, sizeof(unsigned long long int));
hiprandState_t* d_states;
hipMalloc((void**)&d_states, threads*sizeof(hiprandState_t));
hipDeviceSynchronize();
auto t2 = system_clock::now();
std::cout << "Temps de la copie CPU -> GPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl;
const dim3 nthreads(threadsPerBlock);
const dim3 nblocs(iDivUp<long>(n, threadsPerBlock*neutronsPerThread));
std::cout << "Nombre de blocs GPU: " << nblocs.x << std::endl;
std::cout << "Nombre de threads par bloc: " << nthreads.x << std::endl;
std::cout << "Mmoire utilise: " << (n*4.)/(1024.*1024.) << "Mo" << std::endl;
auto t3 = system_clock::now();
hipLaunchKernelGGL(( neutron_gpu_kernel), dim3(nthreads), dim3(nblocs), 0, 0, n, neutronsPerThread, d_params,
d_next_absorbed, d_absorbed,
d_r, d_b, d_t, d_seeds, d_states);
// retrieving results
hipDeviceSynchronize();
auto t4 = system_clock::now();
std::cout << "Temps du kernel: " << std::chrono::duration_cast<milliseconds>(t4 - t3).count()/1000. << " sec" << std::endl;
hipFree(d_next_absorbed),
hipFree(d_seeds);
ExperimentalResults res;
unsigned long long int r, b, t;
hipMemcpy(&r, d_r, sizeof(unsigned long long int), hipMemcpyDeviceToHost);
hipMemcpy(&b, d_b, sizeof(unsigned long long int), hipMemcpyDeviceToHost);
hipMemcpy(&t, d_t, sizeof(unsigned long long int), hipMemcpyDeviceToHost);
res.r = static_cast<long>(r);
res.b = static_cast<long>(b);
res.t = static_cast<long>(t);
hipFree(d_r);
hipFree(d_b);
hipFree(d_t);
if (res.r+res.b+res.t != n)
exit(1);
t1 = system_clock::now();
res.absorbed = absorbed;
hipMemcpy(res.absorbed, d_absorbed, res.b*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_absorbed);
t2 = system_clock::now();
std::cout << "Temps de la copie GPU -> CPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl;
return res;
}
|
ce59566f10b2a6f20cf33ce1a6aaff499de322d6.cu
|
/*
* Université Pierre et Marie Curie
* Calcul de transport de neutrons
* Version séquentielle
*/
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include "neutron.h"
#include <utility>
#include <chrono>
#include <iostream>
#include "neutron_gpu_kernel.h"
using namespace std::chrono;
/**
* Retourne le quotient entier superieur ou egal a "a/b".
*/
template<typename T>
inline static T iDivUp(T a, T b){
return ((a % b != 0) ? (a / b + 1) : (a / b));
}
ExperimentalResults neutron_gpu_caller(float* absorbed, long n,
const ProblemParameters& params,
const std::vector<unsigned long long>& seeds,
int threadsPerBlock, int neutronsPerThread) {
const auto threads = threadsPerBlock*iDivUp<long>(n, threadsPerBlock*neutronsPerThread);
auto t1 = system_clock::now();
unsigned long long* d_seeds;
cudaMalloc((void**)&d_seeds, seeds.size()*sizeof(unsigned long long));
cudaMemcpy(d_seeds, seeds.data(), seeds.size()*sizeof(unsigned long long), cudaMemcpyHostToDevice);
// launching cuda kernel
ProblemParameters* d_params;
cudaMalloc((void**)&d_params, sizeof(ProblemParameters));
cudaMemcpy(d_params, ¶ms, sizeof(ProblemParameters), cudaMemcpyHostToDevice);
unsigned long long int* d_next_absorbed;
cudaMalloc((void**)&d_next_absorbed, sizeof(unsigned long long int));
cudaMemset(d_next_absorbed, 0, sizeof(unsigned long long int));
float* d_absorbed;
cudaMalloc((void**)&d_absorbed, n*sizeof(float));
#ifdef TEST
cudaMemcpy(d_params, absorbed, n*sizeof(float), cudaMemcpyHostToDevice);
#endif
unsigned long long int* d_r, * d_b, * d_t;
cudaMalloc((void**)&d_r, sizeof(unsigned long long int));
cudaMalloc((void**)&d_b, sizeof(unsigned long long int));
cudaMalloc((void**)&d_t, sizeof(unsigned long long int));
cudaMemset(d_r, 0, sizeof(unsigned long long int));
cudaMemset(d_b, 0, sizeof(unsigned long long int));
cudaMemset(d_t, 0, sizeof(unsigned long long int));
curandState* d_states;
cudaMalloc((void**)&d_states, threads*sizeof(curandState));
cudaDeviceSynchronize();
auto t2 = system_clock::now();
std::cout << "Temps de la copie CPU -> GPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl;
const dim3 nthreads(threadsPerBlock);
const dim3 nblocs(iDivUp<long>(n, threadsPerBlock*neutronsPerThread));
std::cout << "Nombre de blocs GPU: " << nblocs.x << std::endl;
std::cout << "Nombre de threads par bloc: " << nthreads.x << std::endl;
std::cout << "Mémoire utilisée: " << (n*4.)/(1024.*1024.) << "Mo" << std::endl;
auto t3 = system_clock::now();
neutron_gpu_kernel<<<nthreads, nblocs>>>(n, neutronsPerThread, d_params,
d_next_absorbed, d_absorbed,
d_r, d_b, d_t, d_seeds, d_states);
// retrieving results
cudaDeviceSynchronize();
auto t4 = system_clock::now();
std::cout << "Temps du kernel: " << std::chrono::duration_cast<milliseconds>(t4 - t3).count()/1000. << " sec" << std::endl;
cudaFree(d_next_absorbed),
cudaFree(d_seeds);
ExperimentalResults res;
unsigned long long int r, b, t;
cudaMemcpy(&r, d_r, sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaMemcpy(&b, d_b, sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaMemcpy(&t, d_t, sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
res.r = static_cast<long>(r);
res.b = static_cast<long>(b);
res.t = static_cast<long>(t);
cudaFree(d_r);
cudaFree(d_b);
cudaFree(d_t);
if (res.r+res.b+res.t != n)
exit(1);
t1 = system_clock::now();
res.absorbed = absorbed;
cudaMemcpy(res.absorbed, d_absorbed, res.b*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_absorbed);
t2 = system_clock::now();
std::cout << "Temps de la copie GPU -> CPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl;
return res;
}
|
0aef01e88639517c11d89901faa5fc25cd575eed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolve.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *source = NULL;
hipMalloc(&source, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int paddingX = 1;
int paddingY = 1;
size_t kOffset = 1;
int kWidth = XSIZE;
int kHeight = YSIZE;
unsigned char *destination = NULL;
hipMalloc(&destination, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolve), dim3(gridBlock),dim3(threadBlock), 0, 0, source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolve), dim3(gridBlock),dim3(threadBlock), 0, 0, source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolve), dim3(gridBlock),dim3(threadBlock), 0, 0, source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0aef01e88639517c11d89901faa5fc25cd575eed.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolve.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *source = NULL;
cudaMalloc(&source, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int paddingX = 1;
int paddingY = 1;
size_t kOffset = 1;
int kWidth = XSIZE;
int kHeight = YSIZE;
unsigned char *destination = NULL;
cudaMalloc(&destination, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolve<<<gridBlock,threadBlock>>>(source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolve<<<gridBlock,threadBlock>>>(source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolve<<<gridBlock,threadBlock>>>(source,width,height,paddingX,paddingY,kOffset,kWidth,kHeight,destination);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d1c4008e2ff8fc839f6c7c1930c7b50aa8d640d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cudaBMTKernel_MultiDim.cuh"
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
//#define _DEBUG
using namespace std;
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
**** a_d, **** b_d, **** c_d,
*** p_d, *** wrk1_d, *** wrk2_d, *** bnd_d,
* gosa_d,
**** a_h, **** b_h, **** c_h,
*** p_h, *** wrk1_h, *** wrk2_h, *** bnd_h,
* gosa_h;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION **** a, PRECISION **** b, PRECISION **** c,
PRECISION *** p, PRECISION *** wrk1, PRECISION *** wrk2,
PRECISION *** bnd, PRECISION * gosa,
int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (int xxx=0;xxx<8;xxx++) {
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for(int yyy=0;yyy<8;yyy++) {
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (int zzz=0;zzz<8;zzz++) {
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
s0 = a[0][i][j][k] * p[i+1][j ][k ]
+ a[1][i][j][k] * p[i ][j+1][k ]
+ a[2][i][j][k] * p[i ][j ][k+1]
+ b[0][i][j][k] * ( p[i+1][j+1][k ] - p[i+1][j-1][k ]
- p[i-1][j+1][k ] + p[i-1][j-1][k ] )
+ b[1][i][j][k] * ( p[i ][j+1][k+1] - p[i ][j-1][k+1]
- p[i ][j+1][k-1] + p[i ][j-1][k-1] )
+ b[2][i][j][k] * ( p[i+1][j ][k+1] - p[i-1][j ][k+1]
- p[i+1][j ][k-1] + p[i-1][j ][k-1] )
+ c[0][i][j][k] * p[i-1][j ][k ]
+ c[1][i][j][k] * p[i ][j-1][k ]
+ c[2][i][j][k] * p[i ][j ][k-1]
+ wrk1[i][j][k];
ss = ( s0 * a[3][i][j][k] - p[i][j][k] ) * bnd[i][j][k];
atomicAdd(gosa, ss*ss);
wrk2[i][j][k] = p[i][j][k] + omega * ss;
}
}
}
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION *** p, PRECISION *** wrk2, int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
}
#ifdef _DEBUG
__global__ void DebugKernel(PRECISION **** a) {
a[0][1][2][3] = 100;
a[3][0][2][1] = 200;
}
#endif
#define CHK_ERR(str) \
do { \
hipError_t ce = str; \
if (ce != hipSuccess) \
return ce; \
} while (0)
int bmtAssign_MultiDimension_Space_Rec(
PtrObj * ptrobj, PtrObj * ptrobj_d, PRECISION * flat_d, int dim,
int mdim, int * adim, int poffset, int doffset,
int * blocks) {
#ifdef _DEBUG
#define INDENT for (int i=0;i<dim;i++) cout << "\t";
#endif
int iIdx, offset = doffset;
if (dim < mdim - 2) {
int nloffset = 1;
for (int idx=0;idx<=dim;idx++)
nloffset *= adim[idx];
nloffset += doffset;
for (iIdx=0;iIdx<adim[dim];iIdx++) {
blocks[dim] = iIdx;
int loffset = 0;
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
loffset += adim[dim] * b;
}
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset + loffset<< endl;
#endif
bmtAssign_MultiDimension_Space_Rec(
ptrobj, ptrobj_d, flat_d, dim + 1,
mdim, adim, offset + loffset, nloffset, blocks);
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset + loffset;
/*reinterpret_cast<PtrObj>(offset+loffset);*/
offset++;
}
}
else {
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
offset += adim[dim] * b;
}
for (iIdx=0;iIdx<adim[dim];iIdx++) {
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset << endl;
#endif
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset;
/*reinterpret_cast<PtrObj>(offset);*/
int foffset = 0;
for (int i=0;i<mdim-1;i++) {
int ele = 1;
for (int j=i+1;j<mdim;j++)
ele *= adim[j];
if (i < mdim - 2)
foffset += blocks[i] * ele;
else
foffset += iIdx * ele;
}
ptrobj[offset] = flat_d + foffset;
/*reinterpret_cast<PtrObj>(foffset);*/
offset++;
}
}
return 0;
}
int bmtCreateDevice_MultiDimension_Space(
PRECISION ** m_h, PRECISION ** m_d, PRECISION * fm_d,
int dim, int * adim) {
int iIdx, jIdx, cnt = 1;
//Determine the number of blocks for storing pointer objects
for (iIdx=0;iIdx<dim-1;iIdx++)
cnt *= adim[iIdx];
for (iIdx=dim-3;iIdx>=0;iIdx--) {
int tcnt = 1;
for (jIdx=iIdx;jIdx>=0;jIdx--)
tcnt *= adim[jIdx];
cnt += tcnt;
}
#ifdef _DEBUG
cout << "***" << cnt << endl;
#endif
//Allocate blocks for storing pointer objects on both host and device
PtrObj * tm_h, * tm_d;
tm_h = new PtrObj[cnt];
CHK_ERR( hipMalloc(&tm_d, cnt * sizeof(PtrObj)));
//Assign pointer values to blocks
int blocks[4];
bmtAssign_MultiDimension_Space_Rec(
tm_h, tm_d, fm_d, 0, dim,
adim, -1, 0, blocks);
//Transfer the created multidimentional array to device
CHK_ERR( hipMemcpy(tm_d, tm_h,
cnt * sizeof(PtrObj), hipMemcpyHostToDevice));
*m_h = reinterpret_cast<PRECISION *>(tm_h);
*m_d = reinterpret_cast<PRECISION *>(tm_d);
#ifdef _DEBUG
cout << endl << "Origin:\t" << tm_d << endl;
for (iIdx=0;iIdx<cnt;iIdx++)
cout << iIdx << ":\t" << tm_h[iIdx] << endl;
#endif
return 0;
}
hipError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid) {
int devCnt = 0;
CHK_ERR( hipGetDeviceCount(&devCnt));
CHK_ERR( hipSetDevice(peid % devCnt));
gosa_h = new PRECISION();
CHK_ERR( hipMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
CHK_ERR( hipMalloc(&fa_d, 4 * memreq_3d));
CHK_ERR( hipMalloc(&fb_d, 3 * memreq_3d));
CHK_ERR( hipMalloc(&fc_d, 3 * memreq_3d));
CHK_ERR( hipMalloc(&fp_d, memreq_3d));
CHK_ERR( hipMalloc(&fwrk1_d, memreq_3d));
CHK_ERR( hipMalloc(&fwrk2_d, memreq_3d));
CHK_ERR( hipMalloc(&fbnd_d, memreq_3d));
CHK_ERR( hipMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fp_d, pp->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, hipMemcpyHostToDevice));
CHK_ERR( hipMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, hipMemcpyHostToDevice));
#ifndef _DEBUG
//Construct multi-dimensional space for matrices
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&a_h),
reinterpret_cast<PRECISION **>(&a_d),
fa_d, pa->mDim, pa->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&b_h),
reinterpret_cast<PRECISION **>(&b_d),
fb_d, pb->mDim, pb->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&c_h),
reinterpret_cast<PRECISION **>(&c_d),
fc_d, pc->mDim, pc->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&p_h),
reinterpret_cast<PRECISION **>(&p_d),
fp_d, pp->mDim, pp->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk1_h),
reinterpret_cast<PRECISION **>(&wrk1_d),
fwrk1_d, pwrk1->mDim, pwrk1->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk2_h),
reinterpret_cast<PRECISION **>(&wrk2_d),
fwrk2_d, pwrk2->mDim, pwrk2->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&bnd_h),
reinterpret_cast<PRECISION **>(&bnd_d),
fbnd_d, pbnd->mDim, pbnd->mpDim);
#else
PRECISION **** fake_h, **** fake_d, * ffake_d;
Matrix * pfake;
pfake = new Matrix(4,2,3,4);
CHK_ERR( hipMalloc(&ffake_d, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
CHK_ERR( hipMemset(ffake_d, 0, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&fake_h),
reinterpret_cast<PRECISION **>(&fake_d),
ffake_d, pfake->mDim, pfake->mpDim);
hipLaunchKernelGGL(( DebugKernel) , dim3(256), dim3(512), 0, 0, fake_d);
CHK_ERR( hipDeviceSynchronize());
CHK_ERR( hipMemcpy(pfake->mpVal, ffake_d, 4 * 2 * 3 * 4 *
sizeof(PRECISION), hipMemcpyDeviceToHost));
for (int i=0;i<4;i++) {
cout << "[0, " << i << "]" << endl;
for (int j=0;j<2;j++) {
cout << "\t[1, " << j << "]" << endl;
for (int k=0;k<3;k++) {
cout << "\t\t[2, " << k << "]" << endl;
cout << "\t\t";
for (int l=0;l<4;l++) {
cout << pfake->mpVal[(i*24)+(j*12)+(k*4)+l] << "\t";
}
cout << endl;
}
cout << endl;
}
}
#endif
return hipSuccess;
}
hipError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax) {
dim3 grid(16, 16, 1);
dim3 block(1, 1, 64);
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
// for (int idx=0;idx<nn;idx++) {
//Jacobi
CHK_ERR( hipMemset(gosa_d, 0, sizeof(PRECISION)));
hipLaunchKernelGGL(( bmtJacobiKernel) , dim3(grid), dim3(block), 0, 0,
a_d, b_d, c_d, p_d, wrk1_d, wrk2_d, bnd_d, gosa_d,
imax, jmax, kmax);
CHK_ERR( hipDeviceSynchronize());
//Update Pressure Matrix
hipLaunchKernelGGL(( bmtUpdatePressureKernel) , dim3(grid), dim3(block), 0, 0,
p_d, wrk2_d,
imax, jmax, kmax);
CHK_ERR( hipDeviceSynchronize());
CHK_ERR( hipMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), hipMemcpyDeviceToHost));
CHK_ERR( hipMemcpy(pp->mpVal, fp_d,
memreq_3d, hipMemcpyDeviceToHost));
*gosa = *gosa_h;
// cout << idx << ": " << *gosa_h << endl;
// }
// CHK_ERR( hipMemcpy(gosa_h, gosa_d,
// sizeof(PRECISION), hipMemcpyDeviceToHost));
return hipSuccess;
}
|
d1c4008e2ff8fc839f6c7c1930c7b50aa8d640d0.cu
|
#include "cudaBMTKernel_MultiDim.cuh"
#include <cuda.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
//#define _DEBUG
using namespace std;
PRECISION * fa_d, * fb_d, * fc_d,
* fp_d, * fwrk1_d, * fwrk2_d, * fbnd_d,
**** a_d, **** b_d, **** c_d,
*** p_d, *** wrk1_d, *** wrk2_d, *** bnd_d,
* gosa_d,
**** a_h, **** b_h, **** c_h,
*** p_h, *** wrk1_h, *** wrk2_h, *** bnd_h,
* gosa_h;
typedef void * PtrObj;
__global__ void bmtJacobiKernel(
PRECISION **** a, PRECISION **** b, PRECISION **** c,
PRECISION *** p, PRECISION *** wrk1, PRECISION *** wrk2,
PRECISION *** bnd, PRECISION * gosa,
int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
PRECISION s0, ss, omega = 0.8;
// __shared__ PRECISION wgosa;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
// int tid = (threadIdx.z * (blockDim.y * blockDim.x)) +
// (threadIdx.y * blockDim.x) +
// threadIdx.x;
// if (tid == 0)
// wgosa = 0.0;
// __syncthreads();
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (int xxx=0;xxx<8;xxx++) {
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for(int yyy=0;yyy<8;yyy++) {
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (int zzz=0;zzz<8;zzz++) {
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
s0 = a[0][i][j][k] * p[i+1][j ][k ]
+ a[1][i][j][k] * p[i ][j+1][k ]
+ a[2][i][j][k] * p[i ][j ][k+1]
+ b[0][i][j][k] * ( p[i+1][j+1][k ] - p[i+1][j-1][k ]
- p[i-1][j+1][k ] + p[i-1][j-1][k ] )
+ b[1][i][j][k] * ( p[i ][j+1][k+1] - p[i ][j-1][k+1]
- p[i ][j+1][k-1] + p[i ][j-1][k-1] )
+ b[2][i][j][k] * ( p[i+1][j ][k+1] - p[i-1][j ][k+1]
- p[i+1][j ][k-1] + p[i-1][j ][k-1] )
+ c[0][i][j][k] * p[i-1][j ][k ]
+ c[1][i][j][k] * p[i ][j-1][k ]
+ c[2][i][j][k] * p[i ][j ][k-1]
+ wrk1[i][j][k];
ss = ( s0 * a[3][i][j][k] - p[i][j][k] ) * bnd[i][j][k];
atomicAdd(gosa, ss*ss);
wrk2[i][j][k] = p[i][j][k] + omega * ss;
}
}
}
}
}
}
// __syncthreads();
/*
for (i=1;i<imax-1;++i) {
for (j=1;j<jmax-1;++j) {
for (k=1;k<kmax-1;++k) {
*/
#if 0
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
#endif
#if 0
if (tid == 0) {
printf("gosa: %f\n", wgosa);
atomicAdd(gosa, wgosa);
}
#endif
}
__global__ void bmtUpdatePressureKernel(
PRECISION *** p, PRECISION *** wrk2, int imax, int jmax, int kmax) {
int i, j, k ,i_s, j_s, k_s, i_strides, j_strides, k_strides;
int boffset_x = blockIdx.x * blockDim.x;
int boffset_y = blockIdx.y * blockDim.y;
int boffset_z = 0;
int totThreadsx = gridDim.x * blockDim.x;
int gThreadIdxx = boffset_x + threadIdx.x;
int totThreadsy = gridDim.y * blockDim.y;
int gThreadIdxy = boffset_y + threadIdx.y;
int totThreadsz = blockDim.z;
int gThreadIdxz = boffset_z + threadIdx.z;
i_strides = (imax / totThreadsx) + 1;
j_strides = (jmax / totThreadsy) + 1;
k_strides = (kmax / totThreadsz) + 1;
for (i_s=0;i_s<i_strides;i_s++) {
i = (i_s * totThreadsx) + gThreadIdxx;
if ((i < 1) || (i > imax - 2))
continue;
for (j_s=0;j_s<j_strides;j_s++) {
j = (j_s * totThreadsy) + gThreadIdxy;
if ((j < 1) || (j > jmax - 2))
continue;
for (k_s=0;k_s<k_strides;k_s++) {
k = (k_s * totThreadsz) + gThreadIdxz;
if ((k < 1) || (k > kmax - 2))
continue;
p[i][j][k] = wrk2[i][j][k];
}
}
}
}
#ifdef _DEBUG
__global__ void DebugKernel(PRECISION **** a) {
a[0][1][2][3] = 100;
a[3][0][2][1] = 200;
}
#endif
#define CHK_ERR(str) \
do { \
cudaError_t ce = str; \
if (ce != cudaSuccess) \
return ce; \
} while (0)
int bmtAssign_MultiDimension_Space_Rec(
PtrObj * ptrobj, PtrObj * ptrobj_d, PRECISION * flat_d, int dim,
int mdim, int * adim, int poffset, int doffset,
int * blocks) {
#ifdef _DEBUG
#define INDENT for (int i=0;i<dim;i++) cout << "\t";
#endif
int iIdx, offset = doffset;
if (dim < mdim - 2) {
int nloffset = 1;
for (int idx=0;idx<=dim;idx++)
nloffset *= adim[idx];
nloffset += doffset;
for (iIdx=0;iIdx<adim[dim];iIdx++) {
blocks[dim] = iIdx;
int loffset = 0;
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
loffset += adim[dim] * b;
}
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset + loffset<< endl;
#endif
bmtAssign_MultiDimension_Space_Rec(
ptrobj, ptrobj_d, flat_d, dim + 1,
mdim, adim, offset + loffset, nloffset, blocks);
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset + loffset;
/*reinterpret_cast<PtrObj>(offset+loffset);*/
offset++;
}
}
else {
if (dim > 0) {
int b=0;
for (int i=0;i<dim;i++) {
if (i != dim - 1)
b += blocks[i] * adim[i+1];
else
b += blocks[i];
}
offset += adim[dim] * b;
}
for (iIdx=0;iIdx<adim[dim];iIdx++) {
#ifdef _DEBUG
INDENT;
cout << "[" << dim << ", " << iIdx << "]:" << adim[dim]
<< ": " << offset << endl;
#endif
if ((poffset != -1) && (iIdx == 0))
ptrobj[poffset] = ptrobj_d + offset;
/*reinterpret_cast<PtrObj>(offset);*/
int foffset = 0;
for (int i=0;i<mdim-1;i++) {
int ele = 1;
for (int j=i+1;j<mdim;j++)
ele *= adim[j];
if (i < mdim - 2)
foffset += blocks[i] * ele;
else
foffset += iIdx * ele;
}
ptrobj[offset] = flat_d + foffset;
/*reinterpret_cast<PtrObj>(foffset);*/
offset++;
}
}
return 0;
}
int bmtCreateDevice_MultiDimension_Space(
PRECISION ** m_h, PRECISION ** m_d, PRECISION * fm_d,
int dim, int * adim) {
int iIdx, jIdx, cnt = 1;
//Determine the number of blocks for storing pointer objects
for (iIdx=0;iIdx<dim-1;iIdx++)
cnt *= adim[iIdx];
for (iIdx=dim-3;iIdx>=0;iIdx--) {
int tcnt = 1;
for (jIdx=iIdx;jIdx>=0;jIdx--)
tcnt *= adim[jIdx];
cnt += tcnt;
}
#ifdef _DEBUG
cout << "***" << cnt << endl;
#endif
//Allocate blocks for storing pointer objects on both host and device
PtrObj * tm_h, * tm_d;
tm_h = new PtrObj[cnt];
CHK_ERR( cudaMalloc(&tm_d, cnt * sizeof(PtrObj)));
//Assign pointer values to blocks
int blocks[4];
bmtAssign_MultiDimension_Space_Rec(
tm_h, tm_d, fm_d, 0, dim,
adim, -1, 0, blocks);
//Transfer the created multidimentional array to device
CHK_ERR( cudaMemcpy(tm_d, tm_h,
cnt * sizeof(PtrObj), cudaMemcpyHostToDevice));
*m_h = reinterpret_cast<PRECISION *>(tm_h);
*m_d = reinterpret_cast<PRECISION *>(tm_d);
#ifdef _DEBUG
cout << endl << "Origin:\t" << tm_d << endl;
for (iIdx=0;iIdx<cnt;iIdx++)
cout << iIdx << ":\t" << tm_h[iIdx] << endl;
#endif
return 0;
}
cudaError_t bmtInitDeviceMemory(
Matrix * pa, Matrix * pb, Matrix * pc,
Matrix * pp, Matrix * pwrk1, Matrix * pwrk2,
Matrix * pbnd, int peid) {
int devCnt = 0;
CHK_ERR( cudaGetDeviceCount(&devCnt));
CHK_ERR( cudaSetDevice(peid % devCnt));
gosa_h = new PRECISION();
CHK_ERR( cudaMalloc(&gosa_d, sizeof(PRECISION)));
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
CHK_ERR( cudaMalloc(&fa_d, 4 * memreq_3d));
CHK_ERR( cudaMalloc(&fb_d, 3 * memreq_3d));
CHK_ERR( cudaMalloc(&fc_d, 3 * memreq_3d));
CHK_ERR( cudaMalloc(&fp_d, memreq_3d));
CHK_ERR( cudaMalloc(&fwrk1_d, memreq_3d));
CHK_ERR( cudaMalloc(&fwrk2_d, memreq_3d));
CHK_ERR( cudaMalloc(&fbnd_d, memreq_3d));
CHK_ERR( cudaMemcpy(fa_d, pa->mpVal,
4 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fb_d, pb->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fc_d, pc->mpVal,
3 * memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fp_d, pp->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fwrk1_d, pwrk1->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fwrk2_d, pwrk2->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
CHK_ERR( cudaMemcpy(fbnd_d, pbnd->mpVal,
memreq_3d, cudaMemcpyHostToDevice));
#ifndef _DEBUG
//Construct multi-dimensional space for matrices
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&a_h),
reinterpret_cast<PRECISION **>(&a_d),
fa_d, pa->mDim, pa->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&b_h),
reinterpret_cast<PRECISION **>(&b_d),
fb_d, pb->mDim, pb->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&c_h),
reinterpret_cast<PRECISION **>(&c_d),
fc_d, pc->mDim, pc->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&p_h),
reinterpret_cast<PRECISION **>(&p_d),
fp_d, pp->mDim, pp->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk1_h),
reinterpret_cast<PRECISION **>(&wrk1_d),
fwrk1_d, pwrk1->mDim, pwrk1->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&wrk2_h),
reinterpret_cast<PRECISION **>(&wrk2_d),
fwrk2_d, pwrk2->mDim, pwrk2->mpDim);
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&bnd_h),
reinterpret_cast<PRECISION **>(&bnd_d),
fbnd_d, pbnd->mDim, pbnd->mpDim);
#else
PRECISION **** fake_h, **** fake_d, * ffake_d;
Matrix * pfake;
pfake = new Matrix(4,2,3,4);
CHK_ERR( cudaMalloc(&ffake_d, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
CHK_ERR( cudaMemset(ffake_d, 0, 4 * 2 * 3 * 4 * sizeof(PRECISION)));
bmtCreateDevice_MultiDimension_Space(
reinterpret_cast<PRECISION **>(&fake_h),
reinterpret_cast<PRECISION **>(&fake_d),
ffake_d, pfake->mDim, pfake->mpDim);
DebugKernel <<<256, 512>>> (fake_d);
CHK_ERR( cudaDeviceSynchronize());
CHK_ERR( cudaMemcpy(pfake->mpVal, ffake_d, 4 * 2 * 3 * 4 *
sizeof(PRECISION), cudaMemcpyDeviceToHost));
for (int i=0;i<4;i++) {
cout << "[0, " << i << "]" << endl;
for (int j=0;j<2;j++) {
cout << "\t[1, " << j << "]" << endl;
for (int k=0;k<3;k++) {
cout << "\t\t[2, " << k << "]" << endl;
cout << "\t\t";
for (int l=0;l<4;l++) {
cout << pfake->mpVal[(i*24)+(j*12)+(k*4)+l] << "\t";
}
cout << endl;
}
cout << endl;
}
}
#endif
return cudaSuccess;
}
cudaError_t bmtCudaJacobi(PRECISION * gosa, Matrix * pp,
int imax, int jmax, int kmax) {
dim3 grid(16, 16, 1);
dim3 block(1, 1, 64);
size_t memreq_3d = config.mimax * config.mjmax *
config.mkmax * sizeof(PRECISION);
// for (int idx=0;idx<nn;idx++) {
//Jacobi
CHK_ERR( cudaMemset(gosa_d, 0, sizeof(PRECISION)));
bmtJacobiKernel <<<grid, block>>> (
a_d, b_d, c_d, p_d, wrk1_d, wrk2_d, bnd_d, gosa_d,
imax, jmax, kmax);
CHK_ERR( cudaDeviceSynchronize());
//Update Pressure Matrix
bmtUpdatePressureKernel <<<grid, block>>> (
p_d, wrk2_d,
imax, jmax, kmax);
CHK_ERR( cudaDeviceSynchronize());
CHK_ERR( cudaMemcpy(gosa_h, gosa_d,
sizeof(PRECISION), cudaMemcpyDeviceToHost));
CHK_ERR( cudaMemcpy(pp->mpVal, fp_d,
memreq_3d, cudaMemcpyDeviceToHost));
*gosa = *gosa_h;
// cout << idx << ": " << *gosa_h << endl;
// }
// CHK_ERR( cudaMemcpy(gosa_h, gosa_d,
// sizeof(PRECISION), cudaMemcpyDeviceToHost));
return cudaSuccess;
}
|
086af92c11376d8e1e1a4a5477d915d7dd4b8b97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda/mgard_cuda_common_internal.h"
#include "cuda/mgard_cuda_pi_Ql.h"
namespace mgard_cuda {
template <typename T>
__global__ void _pi_Ql(int nrow, int ncol, int nr, int nc, int row_stride,
int col_stride, int *irow, int *icol, T *dcoords_y,
T *dcoords_x, T *dv, int lddv) {
int row_Cstride = row_stride * 2;
int col_Cstride = col_stride * 2;
int y0 = (blockIdx.y * blockDim.y + threadIdx.y) * row_Cstride;
int x0 = (blockIdx.x * blockDim.x + threadIdx.x) * col_Cstride;
// in most cases it only needs to iterate once unless the input is really
// large
for (int y = y0; y + row_Cstride <= nr - 1;
y += blockDim.y * gridDim.y * row_Cstride) {
for (int x = x0; x + col_Cstride <= nc - 1;
x += blockDim.x * gridDim.x * col_Cstride) {
register T a00 = dv[get_idx(lddv, irow[y], icol[x])];
register T a01 = dv[get_idx(lddv, irow[y], icol[x + col_stride])];
register T a02 = dv[get_idx(lddv, irow[y], icol[x + col_Cstride])];
register T a10 = dv[get_idx(lddv, irow[y + row_stride], icol[x])];
register T a11 =
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_stride])];
register T a12 =
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_Cstride])];
register T a20 = dv[get_idx(lddv, irow[y + row_Cstride], icol[x])];
register T a21 =
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_stride])];
register T a22 =
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_Cstride])];
int h1_col =
_get_dist(dcoords_x, icol[x],
icol[x + col_stride]); // icol[x+col_stride] - icol[x];
int h2_col = _get_dist(
dcoords_x, icol[x + col_stride],
icol[x + col_Cstride]); // icol[x+col_Cstride] - icol[x+col_stride];
int hsum_col = h1_col + h2_col;
int h1_row =
_get_dist(dcoords_y, irow[y],
irow[y + row_stride]); // irow[y+row_stride] - irow[y];
int h2_row = _get_dist(
dcoords_y, irow[y + row_stride],
irow[y + row_Cstride]); // irow[y+row_Cstride] - irow[y+row_stride];
int hsum_row = h1_row + h2_row;
a01 -= (h1_col * a02 + h2_col * a00) / hsum_col;
a10 -= (h1_row * a20 + h2_row * a00) / hsum_row;
a11 -= 1.0 / (hsum_row * hsum_col) *
(a00 * h2_col * h2_row + a02 * h1_col * h2_row +
a20 * h2_col * h1_row + a22 * h1_col * h1_row);
dv[get_idx(lddv, irow[y], icol[x + col_stride])] = a01;
dv[get_idx(lddv, irow[y + row_stride], icol[x])] = a10;
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_stride])] = a11;
if (x + col_Cstride == nc - 1) {
a12 -= (h1_row * a22 + h2_row * a02) / hsum_row;
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_Cstride])] = a12;
}
if (y + row_Cstride == nr - 1) {
a21 -= (h1_col * a22 + h2_col * a20) / hsum_col;
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_stride])] = a21;
}
}
}
}
template <typename T>
void pi_Ql(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nr, int nc,
int row_stride, int col_stride, int *dirow, int *dicol, T *dcoords_y,
T *dcoords_x, T *dv, int lddv, int queue_idx) {
int total_thread_y = floor((double)nr / (row_stride * 2));
int total_thread_x = floor((double)nc / (col_stride * 2));
int tby = min(handle.B, total_thread_y);
int tbx = min(handle.B, total_thread_x);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
hipLaunchKernelGGL(( _pi_Ql), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nr, nc, row_stride, col_stride, dirow, dicol, dcoords_y,
dcoords_x, dv, lddv);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nr, int nc, int row_stride,
int col_stride, int *dirow, int *dicol,
double *dcoords_y, double *dcoords_x, double *dv,
int lddv, int queue_idx);
template void pi_Ql<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol,
int nr, int nc, int row_stride, int col_stride,
int *dirow, int *dicol, float *dcoords_y,
float *dcoords_x, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_cpt(int nr, int nc, int row_stride, int col_stride,
T *ddist_y, T *ddist_x, T *dv, int lddv) {
register int c0 = blockIdx.x * blockDim.x;
// register int c0_stride = c0 * col_stride;
register int r0 = blockIdx.y * blockDim.y;
// register int r0_stride = r0 * row_stride;
register int total_row = ceil((double)nr / (row_stride));
register int total_col = ceil((double)nc / (col_stride));
register int c_sm = threadIdx.x;
register int r_sm = threadIdx.y;
// extern __shared__ __align__(sizeof(T)) unsigned char smem[];
// T * sm = reinterpret_cast<T *>(smem);
T *sm = SharedMemory<T>();
// extern __shared__ double sm[]; // size: (blockDim.x + 1) * (blockDim.y + 1)
int ldsm = blockDim.x + 1;
T *v_sm = sm;
T *dist_x_sm = sm + (blockDim.x + 1) * (blockDim.y + 1);
T *dist_y_sm = dist_x_sm + blockDim.x;
for (int r = r0; r < total_row - 1; r += blockDim.y * gridDim.y) {
for (int c = c0; c < total_col - 1; c += blockDim.x * gridDim.x) {
/* Load v */
if (c + c_sm < total_col && r + r_sm < total_row) {
v_sm[r_sm * ldsm + c_sm] =
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride];
if (r_sm == 0 && r + blockDim.y < total_row) {
v_sm[blockDim.y * ldsm + c_sm] =
dv[(r + blockDim.y) * row_stride * lddv +
(c + c_sm) * col_stride];
}
if (c_sm == 0 && c + blockDim.x < total_col) {
v_sm[r_sm * ldsm + blockDim.x] = dv[(r + r_sm) * row_stride * lddv +
(c + blockDim.x) * col_stride];
}
if (r_sm == 0 && c_sm == 0 && r + blockDim.y < total_row &&
c + blockDim.x < total_col) {
v_sm[blockDim.y * ldsm + blockDim.x] =
dv[(r + blockDim.y) * row_stride * lddv +
(c + blockDim.x) * col_stride];
}
}
/* Load dist_x */
// if (c + c_sm < total_col) {
if (r_sm == 0 && c + c_sm < total_col) {
dist_x_sm[c_sm] = ddist_x[c + c_sm];
}
/* Load dist_y */
// if (r + r_sm < total_row) {
if (c_sm == 0 && r + r_sm < total_row) {
dist_y_sm[r_sm] = ddist_y[r + r_sm];
// printf("load ddist_y[%d] %f\n", r_sm, dist_y_sm[r_sm]);
}
__syncthreads();
/* Compute */
if (r_sm % 2 == 0 && c_sm % 2 != 0) {
T h1 = dist_x_sm[c_sm - 1];
T h2 = dist_x_sm[c_sm];
v_sm[r_sm * ldsm + c_sm] -= (h2 * v_sm[r_sm * ldsm + (c_sm - 1)] +
h1 * v_sm[r_sm * ldsm + (c_sm + 1)]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
if (r_sm % 2 != 0 && c_sm % 2 == 0) {
T h1 = dist_y_sm[r_sm - 1];
T h2 = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + c_sm] -= (h2 * v_sm[(r_sm - 1) * ldsm + c_sm] +
h1 * v_sm[(r_sm + 1) * ldsm + c_sm]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
if (r_sm % 2 != 0 && c_sm % 2 != 0) {
T h1_col = dist_x_sm[c_sm - 1];
T h2_col = dist_x_sm[c_sm];
T h1_row = dist_y_sm[r_sm - 1];
T h2_row = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + c_sm] -=
(v_sm[(r_sm - 1) * ldsm + (c_sm - 1)] * h2_col * h2_row +
v_sm[(r_sm - 1) * ldsm + (c_sm + 1)] * h1_col * h2_row +
v_sm[(r_sm + 1) * ldsm + (c_sm - 1)] * h2_col * h1_row +
v_sm[(r_sm + 1) * ldsm + (c_sm + 1)] * h1_col * h1_row) /
((h1_col + h2_col) * (h1_row + h2_row));
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
/* extra computaion for global boarder */
if (c + blockDim.x == total_col - 1) {
if (r_sm % 2 != 0 && c_sm == 0) {
T h1 = dist_y_sm[r_sm - 1];
T h2 = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + blockDim.x] -=
(h2 * v_sm[(r_sm - 1) * ldsm + blockDim.x] +
h1 * v_sm[(r_sm + 1) * ldsm + blockDim.x]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + blockDim.x) * col_stride] =
v_sm[r_sm * ldsm + blockDim.x];
}
}
if (r + blockDim.y == total_row - 1) {
if (r_sm == 0 && c_sm % 2 != 0) {
T h1 = dist_x_sm[c_sm - 1];
T h2 = dist_x_sm[c_sm];
v_sm[blockDim.y * ldsm + c_sm] -=
(h2 * v_sm[blockDim.y * ldsm + (c_sm - 1)] +
h1 * v_sm[blockDim.y * ldsm + (c_sm + 1)]) /
(h1 + h2);
dv[(r + blockDim.y) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[blockDim.y * ldsm + c_sm];
}
}
__syncthreads();
}
}
}
template <typename T>
void pi_Ql_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int row_stride,
int col_stride, T *ddist_y, T *ddist_x, T *dv, int lddv,
int queue_idx) {
int total_row = ceil((double)nr / (row_stride));
int total_col = ceil((double)nc / (col_stride));
int total_thread_y = total_row - 1;
int total_thread_x = total_col - 1;
int tby = min(handle.B, total_thread_y);
int tbx = min(handle.B, total_thread_x);
size_t sm_size = ((handle.B + 1) * (handle.B + 1) + 2 * handle.B) * sizeof(T);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
hipLaunchKernelGGL(( _pi_Ql_cpt), dim3(blockPerGrid), dim3(threadsPerBlock), sm_size,
*(hipStream_t *)handle.get(queue_idx),
nr, nc, row_stride, col_stride, ddist_y, ddist_x, dv, lddv);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_cpt<double>(mgard_cuda_handle<double> &handle, int nr,
int nc, int row_stride, int col_stride,
double *ddist_y, double *ddist_x, double *dv,
int lddv, int queue_idx);
template void pi_Ql_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc,
int row_stride, int col_stride, float *ddist_y,
float *ddist_x, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_1(const int nrow, const int ncol, const int nr,
const int nc, int *irow, int *icol_p, T *dist_r,
T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < ncol - nc; x += blockDim.x * gridDim.x) {
int r = irow[y];
int c = icol_p[x];
register T center = v[get_idx(ldv, r, c)];
register T left = v[get_idx(ldv, r, c - 1)];
register T right = v[get_idx(ldv, r, c + 1)];
register T h1 = dist_c[c - 1];
register T h2 = dist_c[c];
center -= (h2 * left + h1 * right) / (h1 + h2);
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_1(mgard_cuda_handle<T> &handle, const int nrow, const int ncol,
const int nr, const int nc, int *dirow, int *dicol_p,
T *ddist_r, T *ddist_c, T *dv, int lddv, int queue_idx) {
int total_thread_x = ncol - nc;
int total_thread_y = nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
hipLaunchKernelGGL(( _pi_Ql_first_1), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nr, nc, dirow, dicol_p, ddist_r, ddist_c, dv, lddv);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_1<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow,
int *dicol_p, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_1<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol, const int nr,
const int nc, int *dirow, int *dicol_p,
float *ddist_r, float *ddist_c, float *dv,
int lddv, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_2(const int nrow, const int ncol, const int nr,
const int nc, int *irow_p, int *icol, T *dist_r,
T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nrow - nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nc; x += blockDim.x * gridDim.x) {
int r = irow_p[y];
int c = icol[x];
register T center = v[get_idx(ldv, r, c)];
register T up = v[get_idx(ldv, r - 1, c)];
register T down = v[get_idx(ldv, r + 1, c)];
register T h1 = dist_r[r - 1];
register T h2 = dist_r[r];
center -= (h2 * up + h1 * down) / (h1 + h2);
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_2(mgard_cuda_handle<T> &handle, const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p, int *dicol,
T *ddist_r, T *ddist_c, T *dv, int lddv, int queue_idx) {
int total_thread_x = nc;
int total_thread_y = nrow - nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
hipLaunchKernelGGL(( _pi_Ql_first_2), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nr, nc, dirow_p, dicol, ddist_r, ddist_c, dv, lddv);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_2<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_2<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol, const int nr,
const int nc, int *dirow_p, int *dicol,
float *ddist_r, float *ddist_c, float *dv,
int lddv, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_12(const int nrow, const int ncol, const int nr,
const int nc, int *irow_p, int *icol_p,
T *dist_r, T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nrow - nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < ncol - nc; x += blockDim.x * gridDim.x) {
int r = irow_p[y];
int c = icol_p[x];
register T center = v[get_idx(ldv, r, c)];
register T upleft = v[get_idx(ldv, r - 1, c - 1)];
register T upright = v[get_idx(ldv, r - 1, c + 1)];
register T downleft = v[get_idx(ldv, r + 1, c - 1)];
register T downright = v[get_idx(ldv, r + 1, c + 1)];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (upleft * h2_c * h2_r + upright * h1_c * h2_r +
downleft * h2_c * h1_r + downright * h1_c * h1_r) /
((h1_c + h2_c) * (h1_r + h2_r));
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_12(mgard_cuda_handle<T> &handle, const int nrow,
const int ncol, const int nr, const int nc, int *dirow_p,
int *dicol_p, T *ddist_r, T *ddist_c, T *dv, int lddv,
int queue_idx) {
int total_thread_x = ncol - nc;
int total_thread_y = nrow - nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
hipLaunchKernelGGL(( _pi_Ql_first_12), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nr, nc, dirow_p, dicol_p, ddist_r, ddist_c, dv, lddv);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_12<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol_p, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_12<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol_p, float *ddist_r,
float *ddist_c, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_cpt(int nr, int nc, int nf, int row_stride,
int col_stride, int fib_stride, T *ddist_r,
T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2) {
register int r0 = blockIdx.z * blockDim.z;
register int c0 = blockIdx.y * blockDim.y;
register int f0 = blockIdx.x * blockDim.x;
register int total_row = ceil((double)nr / (row_stride));
register int total_col = ceil((double)nc / (col_stride));
register int total_fib = ceil((double)nf / (fib_stride));
register int r_sm = threadIdx.z;
register int c_sm = threadIdx.y;
register int f_sm = threadIdx.x;
register int r_sm_ex = blockDim.z;
register int c_sm_ex = blockDim.y;
register int f_sm_ex = blockDim.x;
register int r_gl;
register int c_gl;
register int f_gl;
register int r_gl_ex;
register int c_gl_ex;
register int f_gl_ex;
// extern __shared__ __align__(sizeof(T)) unsigned char smem[];
// T * sm = reinterpret_cast<T *>(smem);
T *sm = SharedMemory<T>();
// extern __shared__ double sm[]; // size: (blockDim.x + 1) * (blockDim.y + 1)
// * (blockDim.z + 1)
int ldsm1 = blockDim.x + 1;
int ldsm2 = blockDim.y + 1;
T *v_sm = sm;
T *dist_f_sm = sm + (blockDim.x + 1) * (blockDim.y + 1) * (blockDim.z + 1);
T *dist_c_sm = dist_f_sm + blockDim.x;
T *dist_r_sm = dist_c_sm + blockDim.y;
for (int r = r0; r < total_row - 1; r += blockDim.z * gridDim.z) {
r_gl = (r + r_sm) * row_stride;
r_gl_ex = (r + blockDim.z) * row_stride;
for (int c = c0; c < total_col - 1; c += blockDim.y * gridDim.y) {
c_gl = (c + c_sm) * col_stride;
c_gl_ex = (c + blockDim.y) * col_stride;
for (int f = f0; f < total_fib - 1; f += blockDim.x * gridDim.x) {
f_gl = (f + f_sm) * fib_stride;
f_gl_ex = (f + blockDim.x) * fib_stride;
/* Load v */
if (r + r_sm < total_row && c + c_sm < total_col &&
f + f_sm < total_fib) {
// load cubic
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl)];
// load extra surfaces
if (r + blockDim.z < total_row && r_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl)];
}
if (c + blockDim.y < total_col && c_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl)];
}
if (f + blockDim.x < total_fib && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl_ex)];
}
// load extra edges
if (c + blockDim.y < total_col && f + blockDim.x < total_fib &&
c_sm == 0 && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl_ex)];
}
if (r + blockDim.z < total_row && f + blockDim.x < total_fib &&
r_sm == 0 && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl_ex)];
}
if (r + blockDim.z < total_row && c + blockDim.y < total_col &&
r_sm == 0 && c_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl)];
}
// load extra vertex
if (r + blockDim.z < total_row && c + blockDim.y < total_col &&
f + blockDim.x < total_fib && r_sm == 0 && c_sm == 0 &&
f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl_ex)];
}
// load dist
if (c_sm == 0 && f_sm == 0 && r + r_sm < total_row) {
dist_r_sm[r_sm] = ddist_r[r + r_sm];
}
if (r_sm == 0 && f_sm == 0 && c + c_sm < total_col) {
dist_c_sm[c_sm] = ddist_c[c + c_sm];
}
if (c_sm == 0 && r_sm == 0 && f + f_sm < total_fib) {
dist_f_sm[f_sm] = ddist_f[f + f_sm];
}
__syncthreads();
T h1_row = dist_r_sm[r_sm - 1];
T h2_row = dist_r_sm[r_sm];
T h1_col = dist_c_sm[c_sm - 1];
T h2_col = dist_c_sm[c_sm];
T h1_fib = dist_f_sm[f_sm - 1];
T h2_fib = dist_f_sm[f_sm];
/* Compute */
// edges
if (r_sm % 2 != 0 && c_sm % 2 == 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm)] * h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm)] * h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm)] * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm)] * h1_col) /
(h1_col + h2_col);
}
if (r_sm % 2 == 0 && c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm - 1)] * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm + 1)] * h1_fib) /
(h1_fib + h2_fib);
}
// surfaces
if (r_sm % 2 == 0 && c_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm - 1)] *
h2_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm - 1)] *
h1_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm + 1)] *
h2_col * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm + 1)] *
h1_col * h1_fib) /
((h1_col + h2_col) * (h1_fib + h2_fib));
}
if (r_sm % 2 != 0 && c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm - 1)] *
h2_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm - 1)] *
h1_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm + 1)] *
h2_row * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm + 1)] *
h1_row * h1_fib) /
((h1_row + h2_row) * (h1_fib + h2_fib));
}
if (r_sm % 2 != 0 && c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm)] *
h2_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm)] *
h1_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm)] *
h2_row * h1_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm)] *
h1_row * h1_col) /
((h1_row + h2_row) * (h1_col + h2_col));
}
// core
if (r_sm % 2 != 0 && c_sm % 2 != 0 && f_sm % 2 != 0) {
T x00 = (v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x01 = (v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x10 = (v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x11 = (v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T y0 = (h2_col * x00 + h1_col * x01) / (h2_col + h1_col);
T y1 = (h2_col * x10 + h1_col * x11) / (h2_col + h1_col);
T z = (h2_row * y0 + h1_row * y1) / (h2_row + h1_row);
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -= z;
}
// store
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)];
/* extra computaion for global boarder */
// extra surface
if (r + blockDim.z == total_row - 1) {
if (r_sm == 0) {
// edge
if (c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm)] *
h1_col) /
(h1_col + h2_col);
}
if (c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
// surface
if (c_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm - 1)] *
h2_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm - 1)] *
h1_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm + 1)] *
h2_col * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm + 1)] *
h1_col * h1_fib) /
((h1_col + h2_col) * (h1_fib + h2_fib));
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)];
}
}
if (c + blockDim.y == total_col - 1) {
if (c_sm == 0) {
// edge
if (r_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm)] *
h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
// surface
if (r_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm - 1)] *
h2_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm - 1)] *
h1_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm + 1)] *
h2_row * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm + 1)] *
h1_row * h1_fib) /
((h1_row + h2_row) * (h1_fib + h2_fib));
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)];
}
}
if (f + blockDim.x == total_fib - 1) {
if (f_sm == 0) {
// edge
if (r_sm % 2 != 0 && c_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm_ex)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm_ex)] *
h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm_ex)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm_ex)] *
h1_col) /
(h1_col + h2_col);
}
// surface
if (r_sm % 2 != 0 && c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm_ex)] *
h2_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm_ex)] *
h1_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm_ex)] *
h2_row * h1_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm_ex)] *
h1_row * h1_col) /
((h1_row + h2_row) * (h1_col + h2_col));
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)];
}
}
// edge
if (c + blockDim.y == total_col - 1 &&
f + blockDim.x == total_fib - 1) {
if (c_sm == 0 && f_sm == 0) {
if (r_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm_ex)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm_ex)] *
h1_row) /
(h1_row + h2_row);
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)];
}
}
if (r + blockDim.z == total_row - 1 &&
f + blockDim.x == total_fib - 1) {
if (r_sm == 0 && f_sm == 0) {
if (c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm_ex)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm_ex)] *
h1_col) /
(h1_col + h2_col);
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)];
}
}
if (r + blockDim.z == total_row - 1 &&
c + blockDim.y == total_col - 1) {
if (r_sm == 0 && c_sm == 0) {
if (f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)];
}
}
} // restrict boundary
} // end f
} // end c
} // end r
}
template <typename T>
void pi_Ql_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int nf,
int row_stride, int col_stride, int fib_stride, T *ddist_r,
T *ddist_c, T *ddist_f, T *dv, int lddv1, int lddv2,
int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_row = ceil((double)nr / (row_stride));
int total_col = ceil((double)nc / (col_stride));
int total_fib = ceil((double)nf / (fib_stride));
int total_thread_z = total_row - 1;
int total_thread_y = total_col - 1;
int total_thread_x = total_fib - 1;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
size_t sm_size = ((B_adjusted + 1) * (B_adjusted + 1) * (B_adjusted + 1) +
3 * B_adjusted) *
sizeof(T);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_cpt), dim3(blockPerGrid), dim3(threadsPerBlock), sm_size,
*(hipStream_t *)handle.get(queue_idx),
nr, nc, nf, row_stride, col_stride, fib_stride, ddist_r, ddist_c, ddist_f,
dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_cpt<double>(mgard_cuda_handle<double> &handle, int nr,
int nc, int nf, int row_stride, int col_stride,
int fib_stride, double *ddist_r,
double *ddist_c, double *ddist_f, double *dv,
int lddv1, int lddv2, int queue_idx);
template void pi_Ql_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc,
int nf, int row_stride, int col_stride,
int fib_stride, float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1, int lddv2,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_1(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol[y];
int r = irow[z];
register T left = v[get_idx(ldv1, ldv2, r, c, f - 1)];
register T right = v[get_idx(ldv1, ldv2, r, c, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_f[f - 1];
register T h2 = dist_f[f];
center -= (h2 * left + h1 * right) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_1(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol, int *difib_p,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_1), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow, dicol, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_1<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol, int *difib_p,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_1<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_2(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol_p, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol_p[y];
int r = irow[z];
register T front = v[get_idx(ldv1, ldv2, r, c - 1, f)];
register T back = v[get_idx(ldv1, ldv2, r, c + 1, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_c[c - 1];
register T h2 = dist_c[c];
center -= (h2 * front + h1 * back) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_2(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol_p, int *difib,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = ncol - nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_2), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow, dicol_p, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_2<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_2<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_3(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol[y];
int r = irow_p[z];
register T up = v[get_idx(ldv1, ldv2, r - 1, c, f)];
register T down = v[get_idx(ldv1, ldv2, r + 1, c, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_r[r - 1];
register T h2 = dist_r[r];
center -= (h2 * up + h1 * down) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_3(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol, int *difib,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_3), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_3<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_3<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_12(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol_p, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol_p[y];
int r = irow[z];
register T leftfront = v[get_idx(ldv1, ldv2, r, c - 1, f - 1)];
register T rightfront = v[get_idx(ldv1, ldv2, r, c - 1, f + 1)];
register T leftback = v[get_idx(ldv1, ldv2, r, c + 1, f - 1)];
register T rightback = v[get_idx(ldv1, ldv2, r, c + 1, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
center -= (leftfront * h2_f * h2_c + rightfront * h1_f * h2_c +
leftback * h2_f * h1_c + rightback * h1_f * h1_c) /
((h1_f + h2_f) * (h1_c + h2_c));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_12(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol_p,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = ncol - nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_12), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow, dicol_p, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_12<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow, int *dicol_p,
int *difib_p, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_12<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_13(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol[y];
int r = irow_p[z];
register T leftup = v[get_idx(ldv1, ldv2, r - 1, c, f - 1)];
register T rightup = v[get_idx(ldv1, ldv2, r - 1, c, f + 1)];
register T leftdown = v[get_idx(ldv1, ldv2, r + 1, c, f - 1)];
register T rightdown = v[get_idx(ldv1, ldv2, r + 1, c, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (leftup * h2_f * h2_r + rightup * h1_f * h2_r +
leftdown * h2_f * h1_r + rightdown * h1_f * h1_r) /
((h1_f + h2_f) * (h1_r + h2_r));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_13(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_13), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_13<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p, int *dicol,
int *difib_p, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_13<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_23(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol_p, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol_p[y];
int r = irow_p[z];
register T frontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f)];
register T frontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f)];
register T backup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f)];
register T backdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (frontup * h2_c * h2_r + frontdown * h1_c * h2_r +
backup * h2_c * h1_r + backdown * h1_c * h1_r) /
((h1_c + h2_c) * (h1_r + h2_r));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_23(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol_p,
int *difib, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = ncol - nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_23), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol_p, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_23<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p, int *dicol_p,
int *difib, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_23<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol_p, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_123(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol_p, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v,
int ldv1, int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol_p[y];
int r = irow_p[z];
register T rightfrontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f - 1)];
register T rightfrontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f - 1)];
register T rightbackup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f - 1)];
register T rightbackdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f - 1)];
register T leftfrontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f + 1)];
register T leftfrontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f + 1)];
register T leftbackup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f + 1)];
register T leftbackdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
T x00 = (rightfrontup * h2_f + leftfrontup * h1_f) / (h2_f + h1_f);
T x01 = (rightbackup * h2_f + leftbackup * h1_f) / (h2_f + h1_f);
T x10 = (rightfrontdown * h2_f + leftfrontdown * h1_f) / (h2_f + h1_f);
T x11 = (rightbackdown * h2_f + leftbackdown * h1_f) / (h2_f + h1_f);
T y0 = (h2_c * x00 + h1_c * x01) / (h2_c + h1_c);
T y1 = (h2_c * x10 + h1_c * x11) / (h2_c + h1_c);
T z = (h2_r * y0 + h1_r * y1) / (h2_r + h1_r);
center -= z;
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_123(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol_p,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = ncol - nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
hipLaunchKernelGGL(( _pi_Ql_first_123), dim3(blockPerGrid), dim3(threadsPerBlock), 0,
*(hipStream_t *)handle.get(queue_idx),
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol_p, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(hipGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void pi_Ql_first_123<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p,
int *dicol_p, int *difib_p,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_123<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol_p, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
} // namespace mgard_cuda
|
086af92c11376d8e1e1a4a5477d915d7dd4b8b97.cu
|
#include "cuda/mgard_cuda_common_internal.h"
#include "cuda/mgard_cuda_pi_Ql.h"
namespace mgard_cuda {
template <typename T>
__global__ void _pi_Ql(int nrow, int ncol, int nr, int nc, int row_stride,
int col_stride, int *irow, int *icol, T *dcoords_y,
T *dcoords_x, T *dv, int lddv) {
int row_Cstride = row_stride * 2;
int col_Cstride = col_stride * 2;
int y0 = (blockIdx.y * blockDim.y + threadIdx.y) * row_Cstride;
int x0 = (blockIdx.x * blockDim.x + threadIdx.x) * col_Cstride;
// in most cases it only needs to iterate once unless the input is really
// large
for (int y = y0; y + row_Cstride <= nr - 1;
y += blockDim.y * gridDim.y * row_Cstride) {
for (int x = x0; x + col_Cstride <= nc - 1;
x += blockDim.x * gridDim.x * col_Cstride) {
register T a00 = dv[get_idx(lddv, irow[y], icol[x])];
register T a01 = dv[get_idx(lddv, irow[y], icol[x + col_stride])];
register T a02 = dv[get_idx(lddv, irow[y], icol[x + col_Cstride])];
register T a10 = dv[get_idx(lddv, irow[y + row_stride], icol[x])];
register T a11 =
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_stride])];
register T a12 =
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_Cstride])];
register T a20 = dv[get_idx(lddv, irow[y + row_Cstride], icol[x])];
register T a21 =
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_stride])];
register T a22 =
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_Cstride])];
int h1_col =
_get_dist(dcoords_x, icol[x],
icol[x + col_stride]); // icol[x+col_stride] - icol[x];
int h2_col = _get_dist(
dcoords_x, icol[x + col_stride],
icol[x + col_Cstride]); // icol[x+col_Cstride] - icol[x+col_stride];
int hsum_col = h1_col + h2_col;
int h1_row =
_get_dist(dcoords_y, irow[y],
irow[y + row_stride]); // irow[y+row_stride] - irow[y];
int h2_row = _get_dist(
dcoords_y, irow[y + row_stride],
irow[y + row_Cstride]); // irow[y+row_Cstride] - irow[y+row_stride];
int hsum_row = h1_row + h2_row;
a01 -= (h1_col * a02 + h2_col * a00) / hsum_col;
a10 -= (h1_row * a20 + h2_row * a00) / hsum_row;
a11 -= 1.0 / (hsum_row * hsum_col) *
(a00 * h2_col * h2_row + a02 * h1_col * h2_row +
a20 * h2_col * h1_row + a22 * h1_col * h1_row);
dv[get_idx(lddv, irow[y], icol[x + col_stride])] = a01;
dv[get_idx(lddv, irow[y + row_stride], icol[x])] = a10;
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_stride])] = a11;
if (x + col_Cstride == nc - 1) {
a12 -= (h1_row * a22 + h2_row * a02) / hsum_row;
dv[get_idx(lddv, irow[y + row_stride], icol[x + col_Cstride])] = a12;
}
if (y + row_Cstride == nr - 1) {
a21 -= (h1_col * a22 + h2_col * a20) / hsum_col;
dv[get_idx(lddv, irow[y + row_Cstride], icol[x + col_stride])] = a21;
}
}
}
}
template <typename T>
void pi_Ql(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nr, int nc,
int row_stride, int col_stride, int *dirow, int *dicol, T *dcoords_y,
T *dcoords_x, T *dv, int lddv, int queue_idx) {
int total_thread_y = floor((double)nr / (row_stride * 2));
int total_thread_x = floor((double)nc / (col_stride * 2));
int tby = min(handle.B, total_thread_y);
int tbx = min(handle.B, total_thread_x);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
_pi_Ql<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nr, nc, row_stride, col_stride, dirow, dicol, dcoords_y,
dcoords_x, dv, lddv);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nr, int nc, int row_stride,
int col_stride, int *dirow, int *dicol,
double *dcoords_y, double *dcoords_x, double *dv,
int lddv, int queue_idx);
template void pi_Ql<float>(mgard_cuda_handle<float> &handle, int nrow, int ncol,
int nr, int nc, int row_stride, int col_stride,
int *dirow, int *dicol, float *dcoords_y,
float *dcoords_x, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_cpt(int nr, int nc, int row_stride, int col_stride,
T *ddist_y, T *ddist_x, T *dv, int lddv) {
register int c0 = blockIdx.x * blockDim.x;
// register int c0_stride = c0 * col_stride;
register int r0 = blockIdx.y * blockDim.y;
// register int r0_stride = r0 * row_stride;
register int total_row = ceil((double)nr / (row_stride));
register int total_col = ceil((double)nc / (col_stride));
register int c_sm = threadIdx.x;
register int r_sm = threadIdx.y;
// extern __shared__ __align__(sizeof(T)) unsigned char smem[];
// T * sm = reinterpret_cast<T *>(smem);
T *sm = SharedMemory<T>();
// extern __shared__ double sm[]; // size: (blockDim.x + 1) * (blockDim.y + 1)
int ldsm = blockDim.x + 1;
T *v_sm = sm;
T *dist_x_sm = sm + (blockDim.x + 1) * (blockDim.y + 1);
T *dist_y_sm = dist_x_sm + blockDim.x;
for (int r = r0; r < total_row - 1; r += blockDim.y * gridDim.y) {
for (int c = c0; c < total_col - 1; c += blockDim.x * gridDim.x) {
/* Load v */
if (c + c_sm < total_col && r + r_sm < total_row) {
v_sm[r_sm * ldsm + c_sm] =
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride];
if (r_sm == 0 && r + blockDim.y < total_row) {
v_sm[blockDim.y * ldsm + c_sm] =
dv[(r + blockDim.y) * row_stride * lddv +
(c + c_sm) * col_stride];
}
if (c_sm == 0 && c + blockDim.x < total_col) {
v_sm[r_sm * ldsm + blockDim.x] = dv[(r + r_sm) * row_stride * lddv +
(c + blockDim.x) * col_stride];
}
if (r_sm == 0 && c_sm == 0 && r + blockDim.y < total_row &&
c + blockDim.x < total_col) {
v_sm[blockDim.y * ldsm + blockDim.x] =
dv[(r + blockDim.y) * row_stride * lddv +
(c + blockDim.x) * col_stride];
}
}
/* Load dist_x */
// if (c + c_sm < total_col) {
if (r_sm == 0 && c + c_sm < total_col) {
dist_x_sm[c_sm] = ddist_x[c + c_sm];
}
/* Load dist_y */
// if (r + r_sm < total_row) {
if (c_sm == 0 && r + r_sm < total_row) {
dist_y_sm[r_sm] = ddist_y[r + r_sm];
// printf("load ddist_y[%d] %f\n", r_sm, dist_y_sm[r_sm]);
}
__syncthreads();
/* Compute */
if (r_sm % 2 == 0 && c_sm % 2 != 0) {
T h1 = dist_x_sm[c_sm - 1];
T h2 = dist_x_sm[c_sm];
v_sm[r_sm * ldsm + c_sm] -= (h2 * v_sm[r_sm * ldsm + (c_sm - 1)] +
h1 * v_sm[r_sm * ldsm + (c_sm + 1)]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
if (r_sm % 2 != 0 && c_sm % 2 == 0) {
T h1 = dist_y_sm[r_sm - 1];
T h2 = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + c_sm] -= (h2 * v_sm[(r_sm - 1) * ldsm + c_sm] +
h1 * v_sm[(r_sm + 1) * ldsm + c_sm]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
if (r_sm % 2 != 0 && c_sm % 2 != 0) {
T h1_col = dist_x_sm[c_sm - 1];
T h2_col = dist_x_sm[c_sm];
T h1_row = dist_y_sm[r_sm - 1];
T h2_row = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + c_sm] -=
(v_sm[(r_sm - 1) * ldsm + (c_sm - 1)] * h2_col * h2_row +
v_sm[(r_sm - 1) * ldsm + (c_sm + 1)] * h1_col * h2_row +
v_sm[(r_sm + 1) * ldsm + (c_sm - 1)] * h2_col * h1_row +
v_sm[(r_sm + 1) * ldsm + (c_sm + 1)] * h1_col * h1_row) /
((h1_col + h2_col) * (h1_row + h2_row));
dv[(r + r_sm) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[r_sm * ldsm + c_sm];
}
/* extra computaion for global boarder */
if (c + blockDim.x == total_col - 1) {
if (r_sm % 2 != 0 && c_sm == 0) {
T h1 = dist_y_sm[r_sm - 1];
T h2 = dist_y_sm[r_sm];
v_sm[r_sm * ldsm + blockDim.x] -=
(h2 * v_sm[(r_sm - 1) * ldsm + blockDim.x] +
h1 * v_sm[(r_sm + 1) * ldsm + blockDim.x]) /
(h1 + h2);
dv[(r + r_sm) * row_stride * lddv + (c + blockDim.x) * col_stride] =
v_sm[r_sm * ldsm + blockDim.x];
}
}
if (r + blockDim.y == total_row - 1) {
if (r_sm == 0 && c_sm % 2 != 0) {
T h1 = dist_x_sm[c_sm - 1];
T h2 = dist_x_sm[c_sm];
v_sm[blockDim.y * ldsm + c_sm] -=
(h2 * v_sm[blockDim.y * ldsm + (c_sm - 1)] +
h1 * v_sm[blockDim.y * ldsm + (c_sm + 1)]) /
(h1 + h2);
dv[(r + blockDim.y) * row_stride * lddv + (c + c_sm) * col_stride] =
v_sm[blockDim.y * ldsm + c_sm];
}
}
__syncthreads();
}
}
}
template <typename T>
void pi_Ql_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int row_stride,
int col_stride, T *ddist_y, T *ddist_x, T *dv, int lddv,
int queue_idx) {
int total_row = ceil((double)nr / (row_stride));
int total_col = ceil((double)nc / (col_stride));
int total_thread_y = total_row - 1;
int total_thread_x = total_col - 1;
int tby = min(handle.B, total_thread_y);
int tbx = min(handle.B, total_thread_x);
size_t sm_size = ((handle.B + 1) * (handle.B + 1) + 2 * handle.B) * sizeof(T);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
_pi_Ql_cpt<<<blockPerGrid, threadsPerBlock, sm_size,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nr, nc, row_stride, col_stride, ddist_y, ddist_x, dv, lddv);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_cpt<double>(mgard_cuda_handle<double> &handle, int nr,
int nc, int row_stride, int col_stride,
double *ddist_y, double *ddist_x, double *dv,
int lddv, int queue_idx);
template void pi_Ql_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc,
int row_stride, int col_stride, float *ddist_y,
float *ddist_x, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_1(const int nrow, const int ncol, const int nr,
const int nc, int *irow, int *icol_p, T *dist_r,
T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < ncol - nc; x += blockDim.x * gridDim.x) {
int r = irow[y];
int c = icol_p[x];
register T center = v[get_idx(ldv, r, c)];
register T left = v[get_idx(ldv, r, c - 1)];
register T right = v[get_idx(ldv, r, c + 1)];
register T h1 = dist_c[c - 1];
register T h2 = dist_c[c];
center -= (h2 * left + h1 * right) / (h1 + h2);
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_1(mgard_cuda_handle<T> &handle, const int nrow, const int ncol,
const int nr, const int nc, int *dirow, int *dicol_p,
T *ddist_r, T *ddist_c, T *dv, int lddv, int queue_idx) {
int total_thread_x = ncol - nc;
int total_thread_y = nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
_pi_Ql_first_1<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nr, nc, dirow, dicol_p, ddist_r, ddist_c, dv, lddv);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_1<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow,
int *dicol_p, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_1<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol, const int nr,
const int nc, int *dirow, int *dicol_p,
float *ddist_r, float *ddist_c, float *dv,
int lddv, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_2(const int nrow, const int ncol, const int nr,
const int nc, int *irow_p, int *icol, T *dist_r,
T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nrow - nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nc; x += blockDim.x * gridDim.x) {
int r = irow_p[y];
int c = icol[x];
register T center = v[get_idx(ldv, r, c)];
register T up = v[get_idx(ldv, r - 1, c)];
register T down = v[get_idx(ldv, r + 1, c)];
register T h1 = dist_r[r - 1];
register T h2 = dist_r[r];
center -= (h2 * up + h1 * down) / (h1 + h2);
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_2(mgard_cuda_handle<T> &handle, const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p, int *dicol,
T *ddist_r, T *ddist_c, T *dv, int lddv, int queue_idx) {
int total_thread_x = nc;
int total_thread_y = nrow - nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
_pi_Ql_first_2<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nr, nc, dirow_p, dicol, ddist_r, ddist_c, dv, lddv);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_2<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_2<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol, const int nr,
const int nc, int *dirow_p, int *dicol,
float *ddist_r, float *ddist_c, float *dv,
int lddv, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_12(const int nrow, const int ncol, const int nr,
const int nc, int *irow_p, int *icol_p,
T *dist_r, T *dist_c, T *v, int ldv) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = y0; y < nrow - nr; y += blockDim.y * gridDim.y) {
for (int x = x0; x < ncol - nc; x += blockDim.x * gridDim.x) {
int r = irow_p[y];
int c = icol_p[x];
register T center = v[get_idx(ldv, r, c)];
register T upleft = v[get_idx(ldv, r - 1, c - 1)];
register T upright = v[get_idx(ldv, r - 1, c + 1)];
register T downleft = v[get_idx(ldv, r + 1, c - 1)];
register T downright = v[get_idx(ldv, r + 1, c + 1)];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (upleft * h2_c * h2_r + upright * h1_c * h2_r +
downleft * h2_c * h1_r + downright * h1_c * h1_r) /
((h1_c + h2_c) * (h1_r + h2_r));
v[get_idx(ldv, r, c)] = center;
}
}
}
template <typename T>
void pi_Ql_first_12(mgard_cuda_handle<T> &handle, const int nrow,
const int ncol, const int nr, const int nc, int *dirow_p,
int *dicol_p, T *ddist_r, T *ddist_c, T *dv, int lddv,
int queue_idx) {
int total_thread_x = ncol - nc;
int total_thread_y = nrow - nr;
if (total_thread_y == 0 || total_thread_x == 0)
return;
int tbx = min(handle.B, total_thread_x);
int tby = min(handle.B, total_thread_y);
int gridx = ceil((float)total_thread_x / tbx);
int gridy = ceil((float)total_thread_y / tby);
dim3 threadsPerBlock(tbx, tby);
dim3 blockPerGrid(gridx, gridy);
_pi_Ql_first_12<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nr, nc, dirow_p, dicol_p, ddist_r, ddist_c, dv, lddv);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_12<double>(mgard_cuda_handle<double> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol_p, double *ddist_r,
double *ddist_c, double *dv, int lddv,
int queue_idx);
template void pi_Ql_first_12<float>(mgard_cuda_handle<float> &handle,
const int nrow, const int ncol,
const int nr, const int nc, int *dirow_p,
int *dicol_p, float *ddist_r,
float *ddist_c, float *dv, int lddv,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_cpt(int nr, int nc, int nf, int row_stride,
int col_stride, int fib_stride, T *ddist_r,
T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2) {
register int r0 = blockIdx.z * blockDim.z;
register int c0 = blockIdx.y * blockDim.y;
register int f0 = blockIdx.x * blockDim.x;
register int total_row = ceil((double)nr / (row_stride));
register int total_col = ceil((double)nc / (col_stride));
register int total_fib = ceil((double)nf / (fib_stride));
register int r_sm = threadIdx.z;
register int c_sm = threadIdx.y;
register int f_sm = threadIdx.x;
register int r_sm_ex = blockDim.z;
register int c_sm_ex = blockDim.y;
register int f_sm_ex = blockDim.x;
register int r_gl;
register int c_gl;
register int f_gl;
register int r_gl_ex;
register int c_gl_ex;
register int f_gl_ex;
// extern __shared__ __align__(sizeof(T)) unsigned char smem[];
// T * sm = reinterpret_cast<T *>(smem);
T *sm = SharedMemory<T>();
// extern __shared__ double sm[]; // size: (blockDim.x + 1) * (blockDim.y + 1)
// * (blockDim.z + 1)
int ldsm1 = blockDim.x + 1;
int ldsm2 = blockDim.y + 1;
T *v_sm = sm;
T *dist_f_sm = sm + (blockDim.x + 1) * (blockDim.y + 1) * (blockDim.z + 1);
T *dist_c_sm = dist_f_sm + blockDim.x;
T *dist_r_sm = dist_c_sm + blockDim.y;
for (int r = r0; r < total_row - 1; r += blockDim.z * gridDim.z) {
r_gl = (r + r_sm) * row_stride;
r_gl_ex = (r + blockDim.z) * row_stride;
for (int c = c0; c < total_col - 1; c += blockDim.y * gridDim.y) {
c_gl = (c + c_sm) * col_stride;
c_gl_ex = (c + blockDim.y) * col_stride;
for (int f = f0; f < total_fib - 1; f += blockDim.x * gridDim.x) {
f_gl = (f + f_sm) * fib_stride;
f_gl_ex = (f + blockDim.x) * fib_stride;
/* Load v */
if (r + r_sm < total_row && c + c_sm < total_col &&
f + f_sm < total_fib) {
// load cubic
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl)];
// load extra surfaces
if (r + blockDim.z < total_row && r_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl)];
}
if (c + blockDim.y < total_col && c_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl)];
}
if (f + blockDim.x < total_fib && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl_ex)];
}
// load extra edges
if (c + blockDim.y < total_col && f + blockDim.x < total_fib &&
c_sm == 0 && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl_ex)];
}
if (r + blockDim.z < total_row && f + blockDim.x < total_fib &&
r_sm == 0 && f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl_ex)];
}
if (r + blockDim.z < total_row && c + blockDim.y < total_col &&
r_sm == 0 && c_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl)];
}
// load extra vertex
if (r + blockDim.z < total_row && c + blockDim.y < total_col &&
f + blockDim.x < total_fib && r_sm == 0 && c_sm == 0 &&
f_sm == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm_ex)] =
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl_ex)];
}
// load dist
if (c_sm == 0 && f_sm == 0 && r + r_sm < total_row) {
dist_r_sm[r_sm] = ddist_r[r + r_sm];
}
if (r_sm == 0 && f_sm == 0 && c + c_sm < total_col) {
dist_c_sm[c_sm] = ddist_c[c + c_sm];
}
if (c_sm == 0 && r_sm == 0 && f + f_sm < total_fib) {
dist_f_sm[f_sm] = ddist_f[f + f_sm];
}
__syncthreads();
T h1_row = dist_r_sm[r_sm - 1];
T h2_row = dist_r_sm[r_sm];
T h1_col = dist_c_sm[c_sm - 1];
T h2_col = dist_c_sm[c_sm];
T h1_fib = dist_f_sm[f_sm - 1];
T h2_fib = dist_f_sm[f_sm];
/* Compute */
// edges
if (r_sm % 2 != 0 && c_sm % 2 == 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm)] * h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm)] * h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm)] * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm)] * h1_col) /
(h1_col + h2_col);
}
if (r_sm % 2 == 0 && c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm - 1)] * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm + 1)] * h1_fib) /
(h1_fib + h2_fib);
}
// surfaces
if (r_sm % 2 == 0 && c_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm - 1)] *
h2_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm - 1)] *
h1_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm + 1)] *
h2_col * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm + 1)] *
h1_col * h1_fib) /
((h1_col + h2_col) * (h1_fib + h2_fib));
}
if (r_sm % 2 != 0 && c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm - 1)] *
h2_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm - 1)] *
h1_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm + 1)] *
h2_row * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm + 1)] *
h1_row * h1_fib) /
((h1_row + h2_row) * (h1_fib + h2_fib));
}
if (r_sm % 2 != 0 && c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm)] *
h2_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm)] *
h1_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm)] *
h2_row * h1_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm)] *
h1_row * h1_col) /
((h1_row + h2_row) * (h1_col + h2_col));
}
// core
if (r_sm % 2 != 0 && c_sm % 2 != 0 && f_sm % 2 != 0) {
T x00 = (v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x01 = (v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x10 = (v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T x11 = (v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm + 1)] *
h1_fib) /
(h2_fib + h1_fib);
T y0 = (h2_col * x00 + h1_col * x01) / (h2_col + h1_col);
T y1 = (h2_col * x10 + h1_col * x11) / (h2_col + h1_col);
T z = (h2_row * y0 + h1_row * y1) / (h2_row + h1_row);
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)] -= z;
}
// store
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm)];
/* extra computaion for global boarder */
// extra surface
if (r + blockDim.z == total_row - 1) {
if (r_sm == 0) {
// edge
if (c_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm)] *
h1_col) /
(h1_col + h2_col);
}
if (c_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
// surface
if (c_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm - 1)] *
h2_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm - 1)] *
h1_col * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm + 1)] *
h2_col * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm + 1)] *
h1_col * h1_fib) /
((h1_col + h2_col) * (h1_fib + h2_fib));
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm)];
}
}
if (c + blockDim.y == total_col - 1) {
if (c_sm == 0) {
// edge
if (r_sm % 2 != 0 && f_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm)] *
h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
// surface
if (r_sm % 2 != 0 && f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm - 1)] *
h2_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm - 1)] *
h1_row * h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm + 1)] *
h2_row * h1_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm + 1)] *
h1_row * h1_fib) /
((h1_row + h2_row) * (h1_fib + h2_fib));
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm)];
}
}
if (f + blockDim.x == total_fib - 1) {
if (f_sm == 0) {
// edge
if (r_sm % 2 != 0 && c_sm % 2 == 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm, f_sm_ex)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm, f_sm_ex)] *
h1_row) /
(h1_row + h2_row);
}
if (r_sm % 2 == 0 && c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm - 1, f_sm_ex)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm + 1, f_sm_ex)] *
h1_col) /
(h1_col + h2_col);
}
// surface
if (r_sm % 2 != 0 && c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm - 1, f_sm_ex)] *
h2_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm - 1, f_sm_ex)] *
h1_row * h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm + 1, f_sm_ex)] *
h2_row * h1_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm + 1, f_sm_ex)] *
h1_row * h1_col) /
((h1_row + h2_row) * (h1_col + h2_col));
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm, f_sm_ex)];
}
}
// edge
if (c + blockDim.y == total_col - 1 &&
f + blockDim.x == total_fib - 1) {
if (c_sm == 0 && f_sm == 0) {
if (r_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm - 1, c_sm_ex, f_sm_ex)] *
h2_row +
v_sm[get_idx(ldsm1, ldsm2, r_sm + 1, c_sm_ex, f_sm_ex)] *
h1_row) /
(h1_row + h2_row);
}
dv[get_idx(lddv1, lddv2, r_gl, c_gl_ex, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm, c_sm_ex, f_sm_ex)];
}
}
if (r + blockDim.z == total_row - 1 &&
f + blockDim.x == total_fib - 1) {
if (r_sm == 0 && f_sm == 0) {
if (c_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm - 1, f_sm_ex)] *
h2_col +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm + 1, f_sm_ex)] *
h1_col) /
(h1_col + h2_col);
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl, f_gl_ex)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm, f_sm_ex)];
}
}
if (r + blockDim.z == total_row - 1 &&
c + blockDim.y == total_col - 1) {
if (r_sm == 0 && c_sm == 0) {
if (f_sm % 2 != 0) {
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)] -=
(v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm - 1)] *
h2_fib +
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm + 1)] *
h1_fib) /
(h1_fib + h2_fib);
}
dv[get_idx(lddv1, lddv2, r_gl_ex, c_gl_ex, f_gl)] =
v_sm[get_idx(ldsm1, ldsm2, r_sm_ex, c_sm_ex, f_sm)];
}
}
} // restrict boundary
} // end f
} // end c
} // end r
}
template <typename T>
void pi_Ql_cpt(mgard_cuda_handle<T> &handle, int nr, int nc, int nf,
int row_stride, int col_stride, int fib_stride, T *ddist_r,
T *ddist_c, T *ddist_f, T *dv, int lddv1, int lddv2,
int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_row = ceil((double)nr / (row_stride));
int total_col = ceil((double)nc / (col_stride));
int total_fib = ceil((double)nf / (fib_stride));
int total_thread_z = total_row - 1;
int total_thread_y = total_col - 1;
int total_thread_x = total_fib - 1;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
size_t sm_size = ((B_adjusted + 1) * (B_adjusted + 1) * (B_adjusted + 1) +
3 * B_adjusted) *
sizeof(T);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_cpt<<<blockPerGrid, threadsPerBlock, sm_size,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nr, nc, nf, row_stride, col_stride, fib_stride, ddist_r, ddist_c, ddist_f,
dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_cpt<double>(mgard_cuda_handle<double> &handle, int nr,
int nc, int nf, int row_stride, int col_stride,
int fib_stride, double *ddist_r,
double *ddist_c, double *ddist_f, double *dv,
int lddv1, int lddv2, int queue_idx);
template void pi_Ql_cpt<float>(mgard_cuda_handle<float> &handle, int nr, int nc,
int nf, int row_stride, int col_stride,
int fib_stride, float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1, int lddv2,
int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_1(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol[y];
int r = irow[z];
register T left = v[get_idx(ldv1, ldv2, r, c, f - 1)];
register T right = v[get_idx(ldv1, ldv2, r, c, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_f[f - 1];
register T h2 = dist_f[f];
center -= (h2 * left + h1 * right) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_1(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol, int *difib_p,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_1<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow, dicol, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_1<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol, int *difib_p,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_1<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_2(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol_p, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol_p[y];
int r = irow[z];
register T front = v[get_idx(ldv1, ldv2, r, c - 1, f)];
register T back = v[get_idx(ldv1, ldv2, r, c + 1, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_c[c - 1];
register T h2 = dist_c[c];
center -= (h2 * front + h1 * back) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_2(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol_p, int *difib,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = ncol - nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_2<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow, dicol_p, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_2<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_2<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_3(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol[y];
int r = irow_p[z];
register T up = v[get_idx(ldv1, ldv2, r - 1, c, f)];
register T down = v[get_idx(ldv1, ldv2, r + 1, c, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1 = dist_r[r - 1];
register T h2 = dist_r[r];
center -= (h2 * up + h1 * down) / (h1 + h2);
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_3(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol, int *difib,
T *ddist_r, T *ddist_c, T *ddist_f, T *dv, int lddv1,
int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_3<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_3<double>(mgard_cuda_handle<double> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_3<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_12(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow, int *icol_p, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol_p[y];
int r = irow[z];
register T leftfront = v[get_idx(ldv1, ldv2, r, c - 1, f - 1)];
register T rightfront = v[get_idx(ldv1, ldv2, r, c - 1, f + 1)];
register T leftback = v[get_idx(ldv1, ldv2, r, c + 1, f - 1)];
register T rightback = v[get_idx(ldv1, ldv2, r, c + 1, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
center -= (leftfront * h2_f * h2_c + rightfront * h1_f * h2_c +
leftback * h2_f * h1_c + rightback * h1_f * h1_c) /
((h1_f + h2_f) * (h1_c + h2_c));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_12(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow, int *dicol_p,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nr;
int total_thread_y = ncol - nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_12<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow, dicol_p, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_12<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow, int *dicol_p,
int *difib_p, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_12<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow, int *dicol_p, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_13(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol[y];
int r = irow_p[z];
register T leftup = v[get_idx(ldv1, ldv2, r - 1, c, f - 1)];
register T rightup = v[get_idx(ldv1, ldv2, r - 1, c, f + 1)];
register T leftdown = v[get_idx(ldv1, ldv2, r + 1, c, f - 1)];
register T rightdown = v[get_idx(ldv1, ldv2, r + 1, c, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (leftup * h2_f * h2_r + rightup * h1_f * h2_r +
leftdown * h2_f * h1_r + rightdown * h1_f * h1_r) /
((h1_f + h2_f) * (h1_r + h2_r));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_13(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_13<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_13<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p, int *dicol,
int *difib_p, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_13<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_23(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol_p, int *ifib,
T *dist_r, T *dist_c, T *dist_f, T *v, int ldv1,
int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nf; x += blockDim.x * gridDim.x) {
int f = ifib[x];
int c = icol_p[y];
int r = irow_p[z];
register T frontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f)];
register T frontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f)];
register T backup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f)];
register T backdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
center -= (frontup * h2_c * h2_r + frontdown * h1_c * h2_r +
backup * h2_c * h1_r + backdown * h1_c * h1_r) /
((h1_c + h2_c) * (h1_r + h2_r));
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_23(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol_p,
int *difib, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = ncol - nc;
int total_thread_x = nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_23<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol_p, difib, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_23<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p, int *dicol_p,
int *difib, double *ddist_r,
double *ddist_c, double *ddist_f,
double *dv, int lddv1, int lddv2,
int queue_idx);
template void pi_Ql_first_23<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol_p, int *difib,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
template <typename T>
__global__ void _pi_Ql_first_123(int nrow, int ncol, int nfib, int nr, int nc,
int nf, int *irow_p, int *icol_p, int *ifib_p,
T *dist_r, T *dist_c, T *dist_f, T *v,
int ldv1, int ldv2) {
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z0 = blockIdx.z * blockDim.z + threadIdx.z;
for (int z = z0; z < nrow - nr; z += blockDim.z * gridDim.z) {
for (int y = y0; y < ncol - nc; y += blockDim.y * gridDim.y) {
for (int x = x0; x < nfib - nf; x += blockDim.x * gridDim.x) {
int f = ifib_p[x];
int c = icol_p[y];
int r = irow_p[z];
register T rightfrontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f - 1)];
register T rightfrontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f - 1)];
register T rightbackup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f - 1)];
register T rightbackdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f - 1)];
register T leftfrontup = v[get_idx(ldv1, ldv2, r - 1, c - 1, f + 1)];
register T leftfrontdown = v[get_idx(ldv1, ldv2, r + 1, c - 1, f + 1)];
register T leftbackup = v[get_idx(ldv1, ldv2, r - 1, c + 1, f + 1)];
register T leftbackdown = v[get_idx(ldv1, ldv2, r + 1, c + 1, f + 1)];
register T center = v[get_idx(ldv1, ldv2, r, c, f)];
register T h1_f = dist_f[f - 1];
register T h2_f = dist_f[f];
register T h1_c = dist_c[c - 1];
register T h2_c = dist_c[c];
register T h1_r = dist_r[r - 1];
register T h2_r = dist_r[r];
T x00 = (rightfrontup * h2_f + leftfrontup * h1_f) / (h2_f + h1_f);
T x01 = (rightbackup * h2_f + leftbackup * h1_f) / (h2_f + h1_f);
T x10 = (rightfrontdown * h2_f + leftfrontdown * h1_f) / (h2_f + h1_f);
T x11 = (rightbackdown * h2_f + leftbackdown * h1_f) / (h2_f + h1_f);
T y0 = (h2_c * x00 + h1_c * x01) / (h2_c + h1_c);
T y1 = (h2_c * x10 + h1_c * x11) / (h2_c + h1_c);
T z = (h2_r * y0 + h1_r * y1) / (h2_r + h1_r);
center -= z;
v[get_idx(ldv1, ldv2, r, c, f)] = center;
}
}
}
}
template <typename T>
void pi_Ql_first_123(mgard_cuda_handle<T> &handle, int nrow, int ncol, int nfib,
int nr, int nc, int nf, int *dirow_p, int *dicol_p,
int *difib_p, T *ddist_r, T *ddist_c, T *ddist_f, T *dv,
int lddv1, int lddv2, int queue_idx) {
int B_adjusted = min(8, handle.B);
int total_thread_z = nrow - nr;
int total_thread_y = ncol - nc;
int total_thread_x = nfib - nf;
if (total_thread_z == 0 || total_thread_y == 0 || total_thread_x == 0)
return;
int tbz = min(B_adjusted, total_thread_z);
int tby = min(B_adjusted, total_thread_y);
int tbx = min(B_adjusted, total_thread_x);
int gridz = ceil((float)total_thread_z / tbz);
int gridy = ceil((float)total_thread_y / tby);
int gridx = ceil((float)total_thread_x / tbx);
dim3 threadsPerBlock(tbx, tby, tbz);
dim3 blockPerGrid(gridx, gridy, gridz);
_pi_Ql_first_123<<<blockPerGrid, threadsPerBlock, 0,
*(cudaStream_t *)handle.get(queue_idx)>>>(
nrow, ncol, nfib, nr, nc, nf, dirow_p, dicol_p, difib_p, ddist_r, ddist_c,
ddist_f, dv, lddv1, lddv2);
gpuErrchk(cudaGetLastError());
#ifdef MGARD_CUDA_DEBUG
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void pi_Ql_first_123<double>(mgard_cuda_handle<double> &handle,
int nrow, int ncol, int nfib, int nr,
int nc, int nf, int *dirow_p,
int *dicol_p, int *difib_p,
double *ddist_r, double *ddist_c,
double *ddist_f, double *dv, int lddv1,
int lddv2, int queue_idx);
template void pi_Ql_first_123<float>(mgard_cuda_handle<float> &handle, int nrow,
int ncol, int nfib, int nr, int nc, int nf,
int *dirow_p, int *dicol_p, int *difib_p,
float *ddist_r, float *ddist_c,
float *ddist_f, float *dv, int lddv1,
int lddv2, int queue_idx);
} // namespace mgard_cuda
|
11f6786e53402ec61ac1e3136570a74c8be3f85d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/kthvalue_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
inline int getBlockSize(int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
}
template <typename T>
bool SortKthvalue(const phi::GPUContext& dev_ctx,
const DenseTensor* input_tensor,
const int64_t num_cols,
const int64_t num_rows,
const int k,
DenseTensor* out_tensor,
DenseTensor* indices_tensor) {
auto cu_stream = dev_ctx.stream();
DenseTensor input_indices;
const std::vector<int64_t> dims = {num_rows, num_cols};
auto dim = phi::make_ddim(dims);
input_indices.Resize(dim);
dev_ctx.template Alloc<int64_t>(&input_indices);
size_t temp_storage_bytes = -1;
int block_size = getBlockSize(num_cols);
unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0];
unsigned int grid_size = num_rows < maxGridDimX
? static_cast<unsigned int>(num_rows)
: maxGridDimX;
hipLaunchKernelGGL(( phi::funcs::InitIndex<int64_t>), dim3(grid_size), dim3(block_size), 0, cu_stream,
input_indices.data<int64_t>(), num_rows, num_cols);
hipcub::CountingInputIterator<int64_t> counting_iter(0);
hipcub::TransformInputIterator<int64_t,
phi::funcs::SegmentOffsetIter,
hipcub::CountingInputIterator<int64_t>>
segment_offsets_t(counting_iter, phi::funcs::SegmentOffsetIter(num_cols));
T* sorted_values_ptr;
int64_t* sorted_indices_ptr;
DenseTensor temp_values, temp_indices;
const T* input = input_tensor->data<T>();
T* values = out_tensor->data<T>();
int64_t* indices = dev_ctx.template Alloc<int64_t>(indices_tensor);
temp_values.Resize(dim);
temp_indices.Resize(dim);
sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values);
sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices);
auto err =
hipcub::DeviceSegmentedRadixSort::SortPairs(nullptr,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#else
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#endif
DenseTensor temp_storage;
temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))});
uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage);
err = hipcub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#else
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#endif
auto& dev = *dev_ctx.eigen_device();
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1};
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1};
auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim);
auto e_tmp_indices =
EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices));
std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)};
dim = phi::make_ddim(odims);
auto e_values = EigenMatrix<T>::From(*out_tensor, dim);
auto e_tmp_values =
EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values));
funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval(
dev, e_indices, e_tmp_indices, slice_indices, slice_sizes);
funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval(
dev, e_values, e_tmp_values, slice_indices, slice_sizes);
return true;
}
template <typename T, typename Context>
void KthvalueKernel(const Context& dev_ctx,
const DenseTensor& x,
int k,
int axis,
bool keepdim,
DenseTensor* output,
DenseTensor* indices) {
const auto& in_dims = x.dims();
if (axis < 0) axis += in_dims.size();
auto out_dims = output->dims();
const T* input_data = x.data<T>();
T* output_data = dev_ctx.template Alloc<T>(output);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
// For 0D Tensor
if (in_dims.size() == 0) {
PADDLE_ENFORCE_EQ(k,
1,
phi::errors::InvalidArgument(
"the k in the kthvalue must less equal than the "
"elemenents number of the input X, but received %d .",
k));
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, output);
phi::funcs::set_constant(dev_ctx, indices, 0);
return;
}
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(
dev_ctx, &x, input_width, input_height, k, output, indices),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
return;
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
DDim tmp_out_dims = phi::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
DDim trans_dims(in_dims);
DDim trans_out_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, x, &trans_input, trans);
DenseTensor trans_ind, trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(dev_ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(kthvalue,
GPU,
ALL_LAYOUT,
phi::KthvalueKernel,
float,
double,
int,
int64_t) {}
|
11f6786e53402ec61ac1e3136570a74c8be3f85d.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/kthvalue_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/top_k_function_cuda.h"
namespace phi {
inline int getBlockSize(int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
}
template <typename T>
bool SortKthvalue(const phi::GPUContext& dev_ctx,
const DenseTensor* input_tensor,
const int64_t num_cols,
const int64_t num_rows,
const int k,
DenseTensor* out_tensor,
DenseTensor* indices_tensor) {
auto cu_stream = dev_ctx.stream();
DenseTensor input_indices;
const std::vector<int64_t> dims = {num_rows, num_cols};
auto dim = phi::make_ddim(dims);
input_indices.Resize(dim);
dev_ctx.template Alloc<int64_t>(&input_indices);
size_t temp_storage_bytes = -1;
int block_size = getBlockSize(num_cols);
unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0];
unsigned int grid_size = num_rows < maxGridDimX
? static_cast<unsigned int>(num_rows)
: maxGridDimX;
phi::funcs::InitIndex<int64_t><<<grid_size, block_size, 0, cu_stream>>>(
input_indices.data<int64_t>(), num_rows, num_cols);
cub::CountingInputIterator<int64_t> counting_iter(0);
cub::TransformInputIterator<int64_t,
phi::funcs::SegmentOffsetIter,
cub::CountingInputIterator<int64_t>>
segment_offsets_t(counting_iter, phi::funcs::SegmentOffsetIter(num_cols));
T* sorted_values_ptr;
int64_t* sorted_indices_ptr;
DenseTensor temp_values, temp_indices;
const T* input = input_tensor->data<T>();
T* values = out_tensor->data<T>();
int64_t* indices = dev_ctx.template Alloc<int64_t>(indices_tensor);
temp_values.Resize(dim);
temp_indices.Resize(dim);
sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values);
sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices);
auto err =
cub::DeviceSegmentedRadixSort::SortPairs(nullptr,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#else
if (err != cudaSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairs, status: "
<< cudaGetErrorString(err);
return false;
}
#endif
DenseTensor temp_storage;
temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))});
uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage);
err = cub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#else
if (err != cudaSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << cudaGetErrorString(err);
return false;
}
#endif
auto& dev = *dev_ctx.eigen_device();
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1};
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1};
auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim);
auto e_tmp_indices =
EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices));
std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)};
dim = phi::make_ddim(odims);
auto e_values = EigenMatrix<T>::From(*out_tensor, dim);
auto e_tmp_values =
EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values));
funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval(
dev, e_indices, e_tmp_indices, slice_indices, slice_sizes);
funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval(
dev, e_values, e_tmp_values, slice_indices, slice_sizes);
return true;
}
template <typename T, typename Context>
void KthvalueKernel(const Context& dev_ctx,
const DenseTensor& x,
int k,
int axis,
bool keepdim,
DenseTensor* output,
DenseTensor* indices) {
const auto& in_dims = x.dims();
if (axis < 0) axis += in_dims.size();
auto out_dims = output->dims();
const T* input_data = x.data<T>();
T* output_data = dev_ctx.template Alloc<T>(output);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
// For 0D Tensor
if (in_dims.size() == 0) {
PADDLE_ENFORCE_EQ(k,
1,
phi::errors::InvalidArgument(
"the k in the kthvalue must less equal than the "
"elemenents number of the input X, but received %d .",
k));
phi::Copy<Context>(dev_ctx, x, dev_ctx.GetPlace(), false, output);
phi::funcs::set_constant(dev_ctx, indices, 0);
return;
}
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(
dev_ctx, &x, input_width, input_height, k, output, indices),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
return;
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
DDim tmp_out_dims = phi::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
DDim trans_dims(in_dims);
DDim trans_out_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
DenseTensor trans_input;
trans_input.Resize(trans_dims);
dev_ctx.template Alloc<T>(&trans_input);
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, x, &trans_input, trans);
DenseTensor trans_ind, trans_out;
trans_ind.Resize(trans_out_dims);
trans_out.Resize(trans_out_dims);
dev_ctx.template Alloc<int64_t>(&trans_ind);
dev_ctx.template Alloc<T>(&trans_out);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(dev_ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(kthvalue,
GPU,
ALL_LAYOUT,
phi::KthvalueKernel,
float,
double,
int,
int64_t) {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.