hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cd971d4077805a73813b1f036fb432de1658762f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Wed Aug 14 12:16:41 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_c
#include "commonblas.h"
__global__ void ctranspose_32( magmaFloatComplex *B, int ldb, const magmaFloatComplex *A, int lda )
{
__shared__ magmaFloatComplex a[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_ctranspose(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
hipLaunchKernelGGL(( ctranspose_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi );
}
| cd971d4077805a73813b1f036fb432de1658762f.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Wed Aug 14 12:16:41 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_c
#include "commonblas.h"
__global__ void ctranspose_32( magmaFloatComplex *B, int ldb, const magmaFloatComplex *A, int lda )
{
__shared__ magmaFloatComplex a[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[16*ldb] = a[inx][iny+16];
B[24*ldb] = a[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb);
a[iny+0][inx] = A[0*lda];
a[iny+8][inx] = A[8*lda];
a[iny+16][inx] = A[16*lda];
a[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = a[inx][iny+0];
B[8*ldb] = a[inx][iny+8];
B[0*ldb+16] = a[inx+16][iny+0];
B[8*ldb+16] = a[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source matrix
// This version works when m and n are divisible by 32.
//
extern "C" void
magmablas_ctranspose(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
//assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" );
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( m/32, n/32, 1 );
ctranspose_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi );
}
|
76c664efe306f9ad712507cdc5e2f81ba4aab912.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <stdio.h>
#include <cmath>
#include <iostream>
int main(void)
{
const size_t N = 100000;
const size_t K = 16;
//
// Initialize CUBLAS:
//
std::cout << "Init CUBLAS..." << std::endl;
hipblasHandle_t h;
hipblasCreate(&h);
//
// allocate host memory:
//
std::cout << "Allocating host arrays..." << std::endl;
double *x = (double*)malloc(sizeof(double) * N);
double **y = (double**)malloc(sizeof(double*) * K);
for (size_t i=0; i<K; ++i) {
y[i] = (double*)malloc(sizeof(double) * N);
}
double *results = (double*)malloc(sizeof(double) * K);
double *results2 = (double*)malloc(sizeof(double) * K);
//
// allocate device memory
//
std::cout << "Allocating CUDA arrays..." << std::endl;
double *cuda_x; hipMalloc( (void **)(&cuda_x), sizeof(double)*N);
double **cuda_y = (double**)malloc(sizeof(double*) * K); // storing CUDA pointers on host!
for (size_t i=0; i<K; ++i) {
hipMalloc( (void **)(&cuda_y[i]), sizeof(double)*N);
}
//
// fill host arrays with values
//
for (size_t j=0; j<N; ++j) {
x[j] = 1 + j%K;
}
for (size_t i=0; i<K; ++i) {
for (size_t j=0; j<N; ++j) {
y[i][j] = 1 + rand() / (1.1 * RAND_MAX);
}
}
//
// Reference calculation on CPU:
//
for (size_t i=0; i<K; ++i) {
results[i] = 0;
results2[i] = 0;
for (size_t j=0; j<N; ++j) {
results[i] += x[j] * y[i][j];
}
}
//
// Copy data to GPU
//
std::cout << "Copying data to GPU..." << std::endl;
hipMemcpy(cuda_x, x, sizeof(double)*N, hipMemcpyHostToDevice);
for (size_t i=0; i<K; ++i) {
hipMemcpy(cuda_y[i], y[i], sizeof(double)*N, hipMemcpyHostToDevice);
}
//
// Let CUBLAS do the work:
//
std::cout << "Running dot products with CUBLAS..." << std::endl;
for (size_t i=0; i<K; ++i) {
hipblasDdot(h, N, cuda_x, 1, cuda_y[i], 1, results2 + i);
}
//
// Compare results
//
std::cout << "Copying results back to host..." << std::endl;
for (size_t i=0; i<K; ++i) {
std::cout << results[i] << " on CPU, " << results2[i] << " on GPU. Relative difference: " << fabs(results[i] - results2[i]) / results[i] << std::endl;
}
//
// Clean up:
//
std::cout << "Cleaning up..." << std::endl;
free(x);
hipFree(cuda_x);
for (size_t i=0; i<K; ++i) {
free(y[i]);
hipFree(cuda_y[i]);
}
free(y);
free(cuda_y);
free(results);
free(results2);
hipblasDestroy(h);
return 0;
}
| 76c664efe306f9ad712507cdc5e2f81ba4aab912.cu | #include <cuda_runtime.h>
#include <cublas_v2.h>
#include <stdio.h>
#include <cmath>
#include <iostream>
int main(void)
{
const size_t N = 100000;
const size_t K = 16;
//
// Initialize CUBLAS:
//
std::cout << "Init CUBLAS..." << std::endl;
cublasHandle_t h;
cublasCreate(&h);
//
// allocate host memory:
//
std::cout << "Allocating host arrays..." << std::endl;
double *x = (double*)malloc(sizeof(double) * N);
double **y = (double**)malloc(sizeof(double*) * K);
for (size_t i=0; i<K; ++i) {
y[i] = (double*)malloc(sizeof(double) * N);
}
double *results = (double*)malloc(sizeof(double) * K);
double *results2 = (double*)malloc(sizeof(double) * K);
//
// allocate device memory
//
std::cout << "Allocating CUDA arrays..." << std::endl;
double *cuda_x; cudaMalloc( (void **)(&cuda_x), sizeof(double)*N);
double **cuda_y = (double**)malloc(sizeof(double*) * K); // storing CUDA pointers on host!
for (size_t i=0; i<K; ++i) {
cudaMalloc( (void **)(&cuda_y[i]), sizeof(double)*N);
}
//
// fill host arrays with values
//
for (size_t j=0; j<N; ++j) {
x[j] = 1 + j%K;
}
for (size_t i=0; i<K; ++i) {
for (size_t j=0; j<N; ++j) {
y[i][j] = 1 + rand() / (1.1 * RAND_MAX);
}
}
//
// Reference calculation on CPU:
//
for (size_t i=0; i<K; ++i) {
results[i] = 0;
results2[i] = 0;
for (size_t j=0; j<N; ++j) {
results[i] += x[j] * y[i][j];
}
}
//
// Copy data to GPU
//
std::cout << "Copying data to GPU..." << std::endl;
cudaMemcpy(cuda_x, x, sizeof(double)*N, cudaMemcpyHostToDevice);
for (size_t i=0; i<K; ++i) {
cudaMemcpy(cuda_y[i], y[i], sizeof(double)*N, cudaMemcpyHostToDevice);
}
//
// Let CUBLAS do the work:
//
std::cout << "Running dot products with CUBLAS..." << std::endl;
for (size_t i=0; i<K; ++i) {
cublasDdot(h, N, cuda_x, 1, cuda_y[i], 1, results2 + i);
}
//
// Compare results
//
std::cout << "Copying results back to host..." << std::endl;
for (size_t i=0; i<K; ++i) {
std::cout << results[i] << " on CPU, " << results2[i] << " on GPU. Relative difference: " << fabs(results[i] - results2[i]) / results[i] << std::endl;
}
//
// Clean up:
//
std::cout << "Cleaning up..." << std::endl;
free(x);
cudaFree(cuda_x);
for (size_t i=0; i<K; ++i) {
free(y[i]);
cudaFree(cuda_y[i]);
}
free(y);
free(cuda_y);
free(results);
free(results2);
cublasDestroy(h);
return 0;
}
|
7dc6d316587c2cdf2e05f5c435fc3b89bbdcc585.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
//#include <hip/hip_runtime.h>
typedef struct {
int width;
int height;
float* elements;
}Matrix;
float rand(float a,float b)
{
//return( a + rand()%(b-a+1.0) );
return(b - a) * ((float)rand() / RAND_MAX) + a;
}
#define BLOCK_SIZE 16
#define BLOCK_SIZE_SINGLE 1
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
/*
We couldn't find proper library for functions:
cutCreateTimer, cutResetTimer, hipEventCreate, hipEventRecord,hipEventSynchronize, cudaEventsElapsedTime
That's why we have left them so far in the comments for clarity of the program
*/
//uint kernelTime;
//cutCreateTimer(&kernelTime);
//cutResetTimer(kernelTime);
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
printf("CUDA malloc A: %s\n",hipGetErrorString(err));
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
printf("Copy A to device: %s\n",hipGetErrorString(err));
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size);
printf("CUDA malloc B: %s\n",hipGetErrorString(err));
err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
printf("Copy B to device: %s\n",hipGetErrorString(err));
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size);
printf("CUDA malloc C: %s\n",hipGetErrorString(err));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
printf("Block size= %d\n",BLOCK_SIZE);
printf("DimBlock = %d\n",dimBlock);
//hipEvent_t start, stop;
//float time;
//hipEventCreate(&start);
//hipEventCreate(&stop);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
err = hipDeviceSynchronize();
//hipEventRecord(stop,0);
//hipEventSynchronize(stop);
printf("Run kernel: %s\n", hipGetErrorString(err));
err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",hipGetErrorString(err));
hipFree(d_A.elements);
hipFree(d_B.elements);
//hipEventElapsedTime(&time, start, stop);
//printf("Time : %f mss\n",time);
}
void MatMulSingleThreaded(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
printf("CUDA malloc A: %s\n",hipGetErrorString(err));
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
printf("Copy A to device: %s\n",hipGetErrorString(err));
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = hipMalloc(&d_B.elements, size);
printf("CUDA malloc B: %s\n",hipGetErrorString(err));
err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
printf("Copy B to device: %s\n",hipGetErrorString(err));
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = hipMalloc(&d_C.elements, size);
printf("CUDA malloc C: %s\n",hipGetErrorString(err));
dim3 dimBlock(BLOCK_SIZE_SINGLE, BLOCK_SIZE_SINGLE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
printf("Block size= %d\n",BLOCK_SIZE_SINGLE);
printf("DimBlock = %d\n",dimBlock);
//hipEvent_t start, stop;
//float time;
//hipEventCreate(&start);
//hipEventCreate(&stop);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
err = hipDeviceSynchronize();
//hipEventRecord(stop,0);
//hipEventSynchronize(stop);
printf("Run kernel: %s\n", hipGetErrorString(err));
err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",hipGetErrorString(err));
hipFree(d_A.elements);
hipFree(d_B.elements);
//hipEventElapsedTime(&time, start, stop);
//printf("Time : %f mss\n",time);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > A.height || col > B.width) return;
for (int e = 0; e < A.width; ++e)
Cvalue += (A.elements[row * A.width + e]) * (B.elements[e * B.width + col]);
C.elements[row * C.width + col] = Cvalue;
}
int main(int argc, char* argv[]){
time_t czas;
srand( (unsigned int)time(&czas));
Matrix A, B, C,C1;
int a1, a2, b1, b2;
a1 = atoi(argv[1]); /* Height of A */
a2 = atoi(argv[2]); /* Width of A */
b1 = a2; /* Height of B */
b2 = atoi(argv[3]); /* Width of B */
A.height = a1;
A.width = a2;
A.elements = (float*)malloc(A.width * A.height * sizeof(float));
B.height = b1;
B.width = b2;
B.elements = (float*)malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*)malloc(C.width * C.height * sizeof(float));
C1.height = A.height;
C1.width = B.width;
C1.elements = (float*)malloc(C.width * C.height * sizeof(float));
printf("\n");
for(int i = 0; i < A.height; i++) {
for(int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = rand(1.0,50.0);
}
for(int i = 0; i < B.height; i++){
for(int j = 0; j < B.width; j++)
B.elements[i*B.width + j] = rand(1.0,50.0);
}
MatMul(A, B, C);
MatMulSingleThreaded(A,B,C1);
int checksum=0;
for(int i = 0; i < C.height; i++){
for(int j = 0; j < min(10, C.width); j++){
if(C.elements[i*C.width + j]!= C1.elements[i*C1.width + j])
checksum++;
}
printf("checksum= %d\n",checksum);
}
// HERE we had a problem with every first value of each row, we couldn't solve it..
if (checksum == 0)
printf("\nResults for single-threaded and multiple-threaded are the same! : ) \n");
else
printf("\nResults for single-threaded and multiple-threaded are different! : ( \n");
printf("Unfortunately we had a problem with every first value of each row (from the second row)");
printf("\n");
printf("Matrix A:\n");
for(int i = 0; i < min(10, A.height); i++){
for(int j = 0; j < min(10, A.width); j++)
printf("%5.1f ", A.elements[i*A.width + j]);
printf("\n");
}
printf("\n");
printf("Matrix B:\n");
for(int i = 0; i < min(10, B.height); i++){
for(int j = 0; j < min(10, B.width); j++)
printf("%5.1f ", B.elements[i*B.width + j]);
printf("\n");
}
printf("\n");
printf("Result matrix C (multiple-threaded):\n");
for(int i = 0; i < min(10, C.height); i++){
for(int j = 0; j < min(10, C.width); j++)
printf("%7.1f ", C.elements[i*C.width + j]);
printf("\n");
}
printf("\n");
printf("Result matrix C (single-threaded):\n");
for(int i = 0; i < min(10, C1.height); i++){
for(int j = 0; j < min(10, C1.width); j++)
printf("%7.1f ", C1.elements[i*C1.width + j]);
printf("\n");
}
}
| 7dc6d316587c2cdf2e05f5c435fc3b89bbdcc585.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
//#include <cuda.h>
typedef struct {
int width;
int height;
float* elements;
}Matrix;
float rand(float a,float b)
{
//return( a + rand()%(b-a+1.0) );
return(b - a) * ((float)rand() / RAND_MAX) + a;
}
#define BLOCK_SIZE 16
#define BLOCK_SIZE_SINGLE 1
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
/*
We couldn't find proper library for functions:
cutCreateTimer, cutResetTimer, cudaEventCreate, cudaEventRecord,cudaEventSynchronize, cudaEventsElapsedTime
That's why we have left them so far in the comments for clarity of the program
*/
//uint kernelTime;
//cutCreateTimer(&kernelTime);
//cutResetTimer(kernelTime);
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
printf("CUDA malloc A: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
printf("Copy A to device: %s\n",cudaGetErrorString(err));
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
printf("CUDA malloc B: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
printf("Copy B to device: %s\n",cudaGetErrorString(err));
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
printf("CUDA malloc C: %s\n",cudaGetErrorString(err));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
printf("Block size= %d\n",BLOCK_SIZE);
printf("DimBlock = %d\n",dimBlock);
//cudaEvent_t start, stop;
//float time;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaDeviceSynchronize();
//cudaEventRecord(stop,0);
//cudaEventSynchronize(stop);
printf("Run kernel: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",cudaGetErrorString(err));
cudaFree(d_A.elements);
cudaFree(d_B.elements);
//cudaEventElapsedTime(&time, start, stop);
//printf("Time : %f mss\n",time);
}
void MatMulSingleThreaded(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
printf("CUDA malloc A: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
printf("Copy A to device: %s\n",cudaGetErrorString(err));
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
printf("CUDA malloc B: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
printf("Copy B to device: %s\n",cudaGetErrorString(err));
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
printf("CUDA malloc C: %s\n",cudaGetErrorString(err));
dim3 dimBlock(BLOCK_SIZE_SINGLE, BLOCK_SIZE_SINGLE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x,
(A.height + dimBlock.y - 1) / dimBlock.y);
printf("Block size= %d\n",BLOCK_SIZE_SINGLE);
printf("DimBlock = %d\n",dimBlock);
//cudaEvent_t start, stop;
//float time;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaDeviceSynchronize();
//cudaEventRecord(stop,0);
//cudaEventSynchronize(stop);
printf("Run kernel: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
printf("Copy C off of device: %s\n",cudaGetErrorString(err));
cudaFree(d_A.elements);
cudaFree(d_B.elements);
//cudaEventElapsedTime(&time, start, stop);
//printf("Time : %f mss\n",time);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > A.height || col > B.width) return;
for (int e = 0; e < A.width; ++e)
Cvalue += (A.elements[row * A.width + e]) * (B.elements[e * B.width + col]);
C.elements[row * C.width + col] = Cvalue;
}
int main(int argc, char* argv[]){
time_t czas;
srand( (unsigned int)time(&czas));
Matrix A, B, C,C1;
int a1, a2, b1, b2;
a1 = atoi(argv[1]); /* Height of A */
a2 = atoi(argv[2]); /* Width of A */
b1 = a2; /* Height of B */
b2 = atoi(argv[3]); /* Width of B */
A.height = a1;
A.width = a2;
A.elements = (float*)malloc(A.width * A.height * sizeof(float));
B.height = b1;
B.width = b2;
B.elements = (float*)malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*)malloc(C.width * C.height * sizeof(float));
C1.height = A.height;
C1.width = B.width;
C1.elements = (float*)malloc(C.width * C.height * sizeof(float));
printf("\n");
for(int i = 0; i < A.height; i++) {
for(int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = rand(1.0,50.0);
}
for(int i = 0; i < B.height; i++){
for(int j = 0; j < B.width; j++)
B.elements[i*B.width + j] = rand(1.0,50.0);
}
MatMul(A, B, C);
MatMulSingleThreaded(A,B,C1);
int checksum=0;
for(int i = 0; i < C.height; i++){
for(int j = 0; j < min(10, C.width); j++){
if(C.elements[i*C.width + j]!= C1.elements[i*C1.width + j])
checksum++;
}
printf("checksum= %d\n",checksum);
}
// HERE we had a problem with every first value of each row, we couldn't solve it..
if (checksum == 0)
printf("\nResults for single-threaded and multiple-threaded are the same! : ) \n");
else
printf("\nResults for single-threaded and multiple-threaded are different! : ( \n");
printf("Unfortunately we had a problem with every first value of each row (from the second row)");
printf("\n");
printf("Matrix A:\n");
for(int i = 0; i < min(10, A.height); i++){
for(int j = 0; j < min(10, A.width); j++)
printf("%5.1f ", A.elements[i*A.width + j]);
printf("\n");
}
printf("\n");
printf("Matrix B:\n");
for(int i = 0; i < min(10, B.height); i++){
for(int j = 0; j < min(10, B.width); j++)
printf("%5.1f ", B.elements[i*B.width + j]);
printf("\n");
}
printf("\n");
printf("Result matrix C (multiple-threaded):\n");
for(int i = 0; i < min(10, C.height); i++){
for(int j = 0; j < min(10, C.width); j++)
printf("%7.1f ", C.elements[i*C.width + j]);
printf("\n");
}
printf("\n");
printf("Result matrix C (single-threaded):\n");
for(int i = 0; i < min(10, C1.height); i++){
for(int j = 0; j < min(10, C1.width); j++)
printf("%7.1f ", C1.elements[i*C1.width + j]);
printf("\n");
}
}
|
de46531421ec090a80e203c461204d9bdaa8fa03.hip | // !!! This is a file automatically generated by hipify!!!
/*! Copyright 2019-2021 by XGBoost Contributors */
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/context.h>
#include <xgboost/data.h>
#include <xgboost/json.h>
#include "../../../src/common/device_helpers.cuh"
#include "test_array_interface.h"
#include "test_metainfo.h"
namespace xgboost {
template <typename T>
std::string PrepareData(std::string typestr, thrust::device_vector<T>* out, const size_t kRows=16) {
out->resize(kRows);
auto& d_data = *out;
for (size_t i = 0; i < d_data.size(); ++i) {
d_data[i] = i * 2.0;
}
Json column { Object() };
std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))};
column["shape"] = Array(j_shape);
column["strides"] = Array(std::vector<Json>{Json(Integer{static_cast<Integer::Int>(sizeof(T))})});
column["version"] = 3;
column["typestr"] = String(typestr);
auto p_d_data = d_data.data().get();
std::vector<Json> j_data{Json(Integer{reinterpret_cast<Integer::Int>(p_d_data)}),
Json(Boolean(false))};
column["data"] = j_data;
column["stream"] = nullptr;
Json array(std::vector<Json>{column});
std::string str;
Json::Dump(array, &str);
return str;
}
TEST(MetaInfo, FromInterface) {
hipSetDevice(0);
Context ctx;
thrust::device_vector<float> d_data;
std::string str = PrepareData<float>("<f4", &d_data);
MetaInfo info;
info.SetInfo(ctx, "label", str.c_str());
auto const& h_label = info.labels.HostView();
ASSERT_EQ(h_label.Size(), d_data.size());
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_label(i), d_data[i]);
}
info.SetInfo(ctx, "weight", str.c_str());
auto const& h_weight = info.weights_.HostVector();
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_weight[i], d_data[i]);
}
info.SetInfo(ctx, "base_margin", str.c_str());
auto const h_base_margin = info.base_margin_.View(DeviceOrd::CPU());
ASSERT_EQ(h_base_margin.Size(), d_data.size());
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_base_margin(i), d_data[i]);
}
thrust::device_vector<int> d_group_data;
std::string group_str = PrepareData<int>("<i4", &d_group_data, 4);
d_group_data[0] = 4;
d_group_data[1] = 3;
d_group_data[2] = 2;
d_group_data[3] = 1;
info.SetInfo(ctx, "group", group_str.c_str());
std::vector<bst_group_t> expected_group_ptr = {0, 4, 7, 9, 10};
EXPECT_EQ(info.group_ptr_, expected_group_ptr);
}
TEST(MetaInfo, GPUStridedData) {
TestMetaInfoStridedData(DeviceOrd::CUDA(0));
}
TEST(MetaInfo, Group) {
hipSetDevice(0);
MetaInfo info;
Context ctx;
thrust::device_vector<uint32_t> d_uint;
std::string uint_str = PrepareData<uint32_t>("<u4", &d_uint);
info.SetInfo(ctx, "group", uint_str.c_str());
auto& h_group = info.group_ptr_;
ASSERT_EQ(h_group.size(), d_uint.size() + 1);
for (size_t i = 1; i < h_group.size(); ++i) {
ASSERT_EQ(h_group[i], d_uint[i - 1] + h_group[i - 1]) << "i: " << i;
}
thrust::device_vector<int64_t> d_int64;
std::string int_str = PrepareData<int64_t>("<i8", &d_int64);
info = MetaInfo();
info.SetInfo(ctx, "group", int_str.c_str());
h_group = info.group_ptr_;
ASSERT_EQ(h_group.size(), d_uint.size() + 1);
for (size_t i = 1; i < h_group.size(); ++i) {
ASSERT_EQ(h_group[i], d_uint[i - 1] + h_group[i - 1]) << "i: " << i;
}
// Incorrect type
thrust::device_vector<float> d_float;
std::string float_str = PrepareData<float>("<f4", &d_float);
info = MetaInfo();
EXPECT_ANY_THROW(info.SetInfo(ctx, "group", float_str.c_str()));
}
TEST(MetaInfo, GPUQid) {
xgboost::MetaInfo info;
Context ctx;
info.num_row_ = 100;
thrust::device_vector<uint32_t> qid(info.num_row_, 0);
for (size_t i = 0; i < qid.size(); ++i) {
qid[i] = i;
}
auto column = Generate2dArrayInterface(info.num_row_, 1, "<u4", &qid);
Json array{std::vector<Json>{column}};
std::string array_str;
Json::Dump(array, &array_str);
info.SetInfo(ctx, "qid", array_str.c_str());
ASSERT_EQ(info.group_ptr_.size(), info.num_row_ + 1);
ASSERT_EQ(info.group_ptr_.front(), 0);
ASSERT_EQ(info.group_ptr_.back(), info.num_row_);
for (size_t i = 0; i < info.num_row_ + 1; ++i) {
ASSERT_EQ(info.group_ptr_[i], i);
}
}
TEST(MetaInfo, DeviceExtend) {
dh::safe_cuda(hipSetDevice(0));
size_t const kRows = 100;
MetaInfo lhs, rhs;
Context ctx;
thrust::device_vector<float> d_data;
std::string str = PrepareData<float>("<f4", &d_data, kRows);
lhs.SetInfo(ctx, "label", str.c_str());
rhs.SetInfo(ctx, "label", str.c_str());
ASSERT_FALSE(rhs.labels.Data()->HostCanRead());
lhs.num_row_ = kRows;
rhs.num_row_ = kRows;
lhs.Extend(rhs, true, true);
ASSERT_EQ(lhs.num_row_, kRows * 2);
ASSERT_FALSE(lhs.labels.Data()->HostCanRead());
ASSERT_FALSE(lhs.labels.Data()->HostCanRead());
ASSERT_FALSE(rhs.labels.Data()->HostCanRead());
}
} // namespace xgboost
| de46531421ec090a80e203c461204d9bdaa8fa03.cu | /*! Copyright 2019-2021 by XGBoost Contributors */
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <xgboost/context.h>
#include <xgboost/data.h>
#include <xgboost/json.h>
#include "../../../src/common/device_helpers.cuh"
#include "test_array_interface.h"
#include "test_metainfo.h"
namespace xgboost {
template <typename T>
std::string PrepareData(std::string typestr, thrust::device_vector<T>* out, const size_t kRows=16) {
out->resize(kRows);
auto& d_data = *out;
for (size_t i = 0; i < d_data.size(); ++i) {
d_data[i] = i * 2.0;
}
Json column { Object() };
std::vector<Json> j_shape {Json(Integer(static_cast<Integer::Int>(kRows)))};
column["shape"] = Array(j_shape);
column["strides"] = Array(std::vector<Json>{Json(Integer{static_cast<Integer::Int>(sizeof(T))})});
column["version"] = 3;
column["typestr"] = String(typestr);
auto p_d_data = d_data.data().get();
std::vector<Json> j_data{Json(Integer{reinterpret_cast<Integer::Int>(p_d_data)}),
Json(Boolean(false))};
column["data"] = j_data;
column["stream"] = nullptr;
Json array(std::vector<Json>{column});
std::string str;
Json::Dump(array, &str);
return str;
}
TEST(MetaInfo, FromInterface) {
cudaSetDevice(0);
Context ctx;
thrust::device_vector<float> d_data;
std::string str = PrepareData<float>("<f4", &d_data);
MetaInfo info;
info.SetInfo(ctx, "label", str.c_str());
auto const& h_label = info.labels.HostView();
ASSERT_EQ(h_label.Size(), d_data.size());
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_label(i), d_data[i]);
}
info.SetInfo(ctx, "weight", str.c_str());
auto const& h_weight = info.weights_.HostVector();
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_weight[i], d_data[i]);
}
info.SetInfo(ctx, "base_margin", str.c_str());
auto const h_base_margin = info.base_margin_.View(DeviceOrd::CPU());
ASSERT_EQ(h_base_margin.Size(), d_data.size());
for (size_t i = 0; i < d_data.size(); ++i) {
ASSERT_EQ(h_base_margin(i), d_data[i]);
}
thrust::device_vector<int> d_group_data;
std::string group_str = PrepareData<int>("<i4", &d_group_data, 4);
d_group_data[0] = 4;
d_group_data[1] = 3;
d_group_data[2] = 2;
d_group_data[3] = 1;
info.SetInfo(ctx, "group", group_str.c_str());
std::vector<bst_group_t> expected_group_ptr = {0, 4, 7, 9, 10};
EXPECT_EQ(info.group_ptr_, expected_group_ptr);
}
TEST(MetaInfo, GPUStridedData) {
TestMetaInfoStridedData(DeviceOrd::CUDA(0));
}
TEST(MetaInfo, Group) {
cudaSetDevice(0);
MetaInfo info;
Context ctx;
thrust::device_vector<uint32_t> d_uint;
std::string uint_str = PrepareData<uint32_t>("<u4", &d_uint);
info.SetInfo(ctx, "group", uint_str.c_str());
auto& h_group = info.group_ptr_;
ASSERT_EQ(h_group.size(), d_uint.size() + 1);
for (size_t i = 1; i < h_group.size(); ++i) {
ASSERT_EQ(h_group[i], d_uint[i - 1] + h_group[i - 1]) << "i: " << i;
}
thrust::device_vector<int64_t> d_int64;
std::string int_str = PrepareData<int64_t>("<i8", &d_int64);
info = MetaInfo();
info.SetInfo(ctx, "group", int_str.c_str());
h_group = info.group_ptr_;
ASSERT_EQ(h_group.size(), d_uint.size() + 1);
for (size_t i = 1; i < h_group.size(); ++i) {
ASSERT_EQ(h_group[i], d_uint[i - 1] + h_group[i - 1]) << "i: " << i;
}
// Incorrect type
thrust::device_vector<float> d_float;
std::string float_str = PrepareData<float>("<f4", &d_float);
info = MetaInfo();
EXPECT_ANY_THROW(info.SetInfo(ctx, "group", float_str.c_str()));
}
TEST(MetaInfo, GPUQid) {
xgboost::MetaInfo info;
Context ctx;
info.num_row_ = 100;
thrust::device_vector<uint32_t> qid(info.num_row_, 0);
for (size_t i = 0; i < qid.size(); ++i) {
qid[i] = i;
}
auto column = Generate2dArrayInterface(info.num_row_, 1, "<u4", &qid);
Json array{std::vector<Json>{column}};
std::string array_str;
Json::Dump(array, &array_str);
info.SetInfo(ctx, "qid", array_str.c_str());
ASSERT_EQ(info.group_ptr_.size(), info.num_row_ + 1);
ASSERT_EQ(info.group_ptr_.front(), 0);
ASSERT_EQ(info.group_ptr_.back(), info.num_row_);
for (size_t i = 0; i < info.num_row_ + 1; ++i) {
ASSERT_EQ(info.group_ptr_[i], i);
}
}
TEST(MetaInfo, DeviceExtend) {
dh::safe_cuda(cudaSetDevice(0));
size_t const kRows = 100;
MetaInfo lhs, rhs;
Context ctx;
thrust::device_vector<float> d_data;
std::string str = PrepareData<float>("<f4", &d_data, kRows);
lhs.SetInfo(ctx, "label", str.c_str());
rhs.SetInfo(ctx, "label", str.c_str());
ASSERT_FALSE(rhs.labels.Data()->HostCanRead());
lhs.num_row_ = kRows;
rhs.num_row_ = kRows;
lhs.Extend(rhs, true, true);
ASSERT_EQ(lhs.num_row_, kRows * 2);
ASSERT_FALSE(lhs.labels.Data()->HostCanRead());
ASSERT_FALSE(lhs.labels.Data()->HostCanRead());
ASSERT_FALSE(rhs.labels.Data()->HostCanRead());
}
} // namespace xgboost
|
b31d532862760a429e01c83327ac323013dd4e76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_hdbne1.cuh"
__global__ void hyperdifbsourcene6_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<((p->n[0])) && j<((p->n[1])))
{
//dwn1[fencode3_hdbne1(p,iia,energy)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp6)];
dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp4)];
}
//__syncthreads();
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<(ni) && j<(nj))
{
// - sign here same as vac maybe a +
wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,b1+ii0)];
//wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,energy)];
}
// }
//__syncthreads();
}
__global__ void hyperdifbsourcene6a_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<((p->n[0])) && j<((p->n[1])))
{
dwn1[fencode3_hdbne1(p,iia,energy)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp6)];
//dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp4)];
}
//__syncthreads();
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<(ni) && j<(nj))
{
// - sign here same as vac maybe a +
//wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,b1+ii0)];
wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,energy)];
// if(i==127 && j==252)
// p->test=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)];
}
// }
//__syncthreads();
}
__global__ void hyperdifbsourcene5_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
//if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp6)]=grad13n_hdbne1(wtemp,wd,p,iia,tmp5,mm);
}
//__syncthreads();
}
__global__ void hyperdifbsourcene4_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if( i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if( i<((p->n[0])) && j<((p->n[1])))
#endif
//if( i<((p->n[0])) && j<((p->n[1])))
{
wtemp[fencode3_hdbne1(p,iia,tmp5)]=wtemp[fencode3_hdbne1(p,iia,tmp3)]*wmod[(shift)+fencode3_hdbne1(p,iia,b1+jj)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene3_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j>0 && k>0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
//if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp4)]=grad13n_hdbne1(wtemp,wd,p,iia,tmp3,mm);
// if(i==252 && j==127)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp3)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene2_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1]))&& k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
{
wtemp[fencode3_hdbne1(p,iia,tmp3)]=wtemp[fencode3_hdbne1(p,iia,tmp2)]*(wd[fencode3_hdbne1(p,iia,hdnul)]+wd[fencode3_hdbne1(p,iia,hdnur)])/2;
//wtemp[fencode3_hdbne1(p,iia,tmp3)]=wtemp[fencode3_hdbne1(p,iia,tmp2)]*3.75;
//if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp2)];
// if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp3)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1b_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j >0 && k>0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
// if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp2)]=/*0.25**/grad13n_hdbne1(wtemp,wd,p,iia,tmp1,dim);
// wtemp[fencode3_hdbne1(p,iia,tmp2)]=/*0.25**/grad13n_hdbne1(wmod+shift,wd,p,iia,b1+field,dim);
//wmod[(shift)+fencode3_hdbne1(p,iia,b1+field)]
//if(i==127 && j==252)
// p->test=grad13n_hdbne1(wtemp,wd,p,iia,tmp2,dim);
//if(i==127 && j==252)
// p->test=grad13n_hdbne1(wmod+shift,wd,p,iia,b1+field,dim);
//if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp2)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1a_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if( i<((p->n[0])) && j<((p->n[1])))
{
wtemp[fencode3_hdbne1(p,iia,tmp1)]=wmod[(shift)+fencode3_hdbne1(p,iia,b1+field)];
//wtemp[fencode3_hdbne1(p,iia,tmp1)]=wmod[fencode3_hdbne1(p,iia,b1+field)];
// if(i==127 && j==127)
// p->test=wmod[shift+fencode3_hdbne1(p,iia,b1+field)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//init rhol and rhor
//if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=tmp1; f<=tmp8; f++)
wtemp[fencode3_hdbne1(p,iia,f)]=0.0;
dwn1[fencode3_hdbne1(p,iia,energy)]=0.0;
dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=0.0;
}
//__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdbne1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifbsourcene1(struct params **p, struct params **d_p, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real **d_wtemp, int field, int dim, int jj, int ii0,int mm,real sb,real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipLaunchKernelGGL(( hyperdifbsourcene1_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene1a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene1b_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene2_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene3_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene4_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene6_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene5_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( hyperdifbsourcene6a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//printf("e %d %10.20g\n",mm,(*p)->test);
}
| b31d532862760a429e01c83327ac323013dd4e76.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_hdbne1.cuh"
__global__ void hyperdifbsourcene6_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<((p->n[0])) && j<((p->n[1])))
{
//dwn1[fencode3_hdbne1(p,iia,energy)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp6)];
dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp4)];
}
//__syncthreads();
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<(ni) && j<(nj))
{
// - sign here same as vac maybe a +
wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,b1+ii0)];
//wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,energy)];
}
// }
//__syncthreads();
}
__global__ void hyperdifbsourcene6a_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<((p->n[0])) && j<((p->n[1])))
{
dwn1[fencode3_hdbne1(p,iia,energy)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp6)];
//dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=sb*wtemp[fencode3_hdbne1(p,iia,tmp4)];
}
//__syncthreads();
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if(i<(ni) && j<(nj))
{
// - sign here same as vac maybe a +
//wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,b1+ii0)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,b1+ii0)];
wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)]+dt*dwn1[fencode3_hdbne1(p,iia,energy)];
// if(i==127 && j==252)
// p->test=wmod[fencode3_hdbne1(p,iia,energy)+(ordero*NVAR*dimp)];
}
// }
//__syncthreads();
}
__global__ void hyperdifbsourcene5_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
//if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp6)]=grad13n_hdbne1(wtemp,wd,p,iia,tmp5,mm);
}
//__syncthreads();
}
__global__ void hyperdifbsourcene4_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if( i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if( i<((p->n[0])) && j<((p->n[1])))
#endif
//if( i<((p->n[0])) && j<((p->n[1])))
{
wtemp[fencode3_hdbne1(p,iia,tmp5)]=wtemp[fencode3_hdbne1(p,iia,tmp3)]*wmod[(shift)+fencode3_hdbne1(p,iia,b1+jj)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene3_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int m,ii1;
real fip,fim1,tmpc;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j>0 && k>0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
//if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp4)]=grad13n_hdbne1(wtemp,wd,p,iia,tmp3,mm);
// if(i==252 && j==127)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp3)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene2_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1]))&& k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
{
wtemp[fencode3_hdbne1(p,iia,tmp3)]=wtemp[fencode3_hdbne1(p,iia,tmp2)]*(wd[fencode3_hdbne1(p,iia,hdnul)]+wd[fencode3_hdbne1(p,iia,hdnur)])/2;
//wtemp[fencode3_hdbne1(p,iia,tmp3)]=wtemp[fencode3_hdbne1(p,iia,tmp2)]*3.75;
//if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp2)];
// if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp3)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1b_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i>0 && j >0 && k>0 && i<((p->n[0])-1) && j<((p->n[1])-1) && k<((p->n[2])-1))
#else
if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
#endif
// if(i>0 && j >0 && i<((p->n[0])-1) && j<((p->n[1])-1))
{
wtemp[fencode3_hdbne1(p,iia,tmp2)]=/*0.25**/grad13n_hdbne1(wtemp,wd,p,iia,tmp1,dim);
// wtemp[fencode3_hdbne1(p,iia,tmp2)]=/*0.25**/grad13n_hdbne1(wmod+shift,wd,p,iia,b1+field,dim);
//wmod[(shift)+fencode3_hdbne1(p,iia,b1+field)]
//if(i==127 && j==252)
// p->test=grad13n_hdbne1(wtemp,wd,p,iia,tmp2,dim);
//if(i==127 && j==252)
// p->test=grad13n_hdbne1(wmod+shift,wd,p,iia,b1+field,dim);
//if(i==127 && j==252)
// p->test=wtemp[fencode3_hdbne1(p,iia,tmp2)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1a_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//if( i<((p->n[0])) && j<((p->n[1])))
{
wtemp[fencode3_hdbne1(p,iia,tmp1)]=wmod[(shift)+fencode3_hdbne1(p,iia,b1+field)];
//wtemp[fencode3_hdbne1(p,iia,tmp1)]=wmod[fencode3_hdbne1(p,iia,b1+field)];
// if(i==127 && j==127)
// p->test=wmod[shift+fencode3_hdbne1(p,iia,b1+field)];
}
//__syncthreads();
}
__global__ void hyperdifbsourcene1_parallel(struct params *p, real *wmod,
real *dwn1, real *wd, int order,int ordero, real *wtemp, int field, int dim, int jj, int ii0,int mm,real sb)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,k;
int m,ii1;
//real fip,fim1,tmpc;
//int index,k;
int ni=p->n[0];
int nj=p->n[1];
//real dt=p->dt;
//real dy=p->dx[1];
//real dx=p->dx[0];
int ip,jp;
int iia[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
//int ip,jp,ipg,jpg;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
int shift=order*NVAR*dimp;
iia[0]=ip;
iia[1]=jp;
i=iia[0];
j=iia[1];
k=0;
#ifdef USE_SAC_3D
iia[2]=kp;
k=iia[2];
#endif
#ifdef USE_SAC_3D
if(i<((p->n[0])) && j<((p->n[1])) && k<((p->n[2])))
#else
if(i<((p->n[0])) && j<((p->n[1])))
#endif
//init rhol and rhor
//if(i<((p->n[0])) && j<((p->n[1])))
{
for(int f=tmp1; f<=tmp8; f++)
wtemp[fencode3_hdbne1(p,iia,f)]=0.0;
dwn1[fencode3_hdbne1(p,iia,energy)]=0.0;
dwn1[fencode3_hdbne1(p,iia,b1+ii0)]=0.0;
}
//__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdbne1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifbsourcene1(struct params **p, struct params **d_p, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real **d_wtemp, int field, int dim, int jj, int ii0,int mm,real sb,real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hyperdifbsourcene1_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
cudaThreadSynchronize();
hyperdifbsourcene1a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
cudaThreadSynchronize();
hyperdifbsourcene1b_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
cudaThreadSynchronize();
hyperdifbsourcene2_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
cudaThreadSynchronize();
hyperdifbsourcene3_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb);
cudaThreadSynchronize();
hyperdifbsourcene4_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
cudaThreadSynchronize();
hyperdifbsourcene6_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
cudaThreadSynchronize();
hyperdifbsourcene5_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
cudaThreadSynchronize();
hyperdifbsourcene6a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wmod, *d_dwn1, *d_wd, order,ordero,*d_wtemp, field, dim,jj,ii0,mm,sb,dt);
cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//printf("e %d %10.20g\n",mm,(*p)->test);
}
|
ef8e66b120ed2e65ef1282d28b7ca91a38a1b500.hip | // !!! This is a file automatically generated by hipify!!!
#include "A.h"
#include <A.xti>
#include "B.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <typeinfo>
namespace A {
template < typename T >
__global__ void addKernel(T *c, const T *a, const T *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
template < typename T >
bool addWithCuda(T *c, const T *a, const T *b, int size)
{
printf("%s -- T = %s\n", __FUNCSIG__, typeid(T).name());
T *dev_a = 0;
T *dev_b = 0;
T *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(T));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(T));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(T));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(T), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(T), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus == hipSuccess && B::addWithCuda(c, a, b, size);
}
} // namespace A | ef8e66b120ed2e65ef1282d28b7ca91a38a1b500.cu | #include "A.h"
#include <A.xti>
#include "B.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <typeinfo>
namespace A {
template < typename T >
__global__ void addKernel(T *c, const T *a, const T *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
template < typename T >
bool addWithCuda(T *c, const T *a, const T *b, int size)
{
printf("%s -- T = %s\n", __FUNCSIG__, typeid(T).name());
T *dev_a = 0;
T *dev_b = 0;
T *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(T));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(T));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(T));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(T), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(T), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus == cudaSuccess && B::addWithCuda(c, a, b, size);
}
} // namespace A |
66f9d8815805181ca538d3a962a30fe0be882bc3.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// 3D Oliveira 3D (we think we are the first)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<Connectivity3D::CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
void PerformLabelingBlocksize(int x, int y, int z) override {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + x - 1) / x, (d_img_.y + y - 1) / y, (d_img_.z + z - 1) / z);
block_size_ = dim3(x, y, z);
// Phase 1
BLOCKSIZE_KERNEL(Initialization, grid_size_, block_size_, 0, d_img_, d_img_labels_)
// Phase 2
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_labels_)
// Phase 3
BLOCKSIZE_KERNEL(PathCompression, grid_size_, block_size_, 0, d_img_labels_)
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
hipMemset(d_img_labels_.data, 0, d_img_labels_.stepz * d_img_labels_.z);
hipDeviceSynchronize();
double t = perf_.stop();
perf_.start();
hipMemset(d_img_labels_.data, 0, d_img_labels_.stepz * d_img_labels_.z);
hipDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
REGISTER_KERNELS(UF_3D, Initialization, Merge, PathCompression)
| 66f9d8815805181ca538d3a962a30fe0be882bc3.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// 3D Oliveira 3D (we think we are the first)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<Connectivity3D::CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
void PerformLabelingBlocksize(int x, int y, int z) override {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + x - 1) / x, (d_img_.y + y - 1) / y, (d_img_.z + z - 1) / z);
block_size_ = dim3(x, y, z);
// Phase 1
BLOCKSIZE_KERNEL(Initialization, grid_size_, block_size_, 0, d_img_, d_img_labels_)
// Phase 2
BLOCKSIZE_KERNEL(Merge, grid_size_, block_size_, 0, d_img_labels_)
// Phase 3
BLOCKSIZE_KERNEL(PathCompression, grid_size_, block_size_, 0, d_img_labels_)
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
cudaMemset(d_img_labels_.data, 0, d_img_labels_.stepz * d_img_labels_.z);
cudaDeviceSynchronize();
double t = perf_.stop();
perf_.start();
cudaMemset(d_img_labels_.data, 0, d_img_labels_.stepz * d_img_labels_.z);
cudaDeviceSynchronize();
t -= perf_.stop();
return t;
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
REGISTER_KERNELS(UF_3D, Initialization, Merge, PathCompression)
|
ad0d885d37216f59c780a21916236ca62a5b59da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// Fast R-CNN
// Copyright (c) Microsoft. All rights reserved.
// Written by Ross Girshick, 2015.
// Licensed under the BSD 2-clause "Simplified" license.
// See LICENSE in the Fast R-CNN project root for license
// information.
// --------------------------------------------------------
#include "caffe/FRCNN/roi_pooling_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(::floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(::floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(::ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(::ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| ad0d885d37216f59c780a21916236ca62a5b59da.cu | // --------------------------------------------------------
// Fast R-CNN
// Copyright (c) Microsoft. All rights reserved.
// Written by Ross Girshick, 2015.
// Licensed under the BSD 2-clause "Simplified" license.
// See LICENSE in the Fast R-CNN project root for license
// information.
// --------------------------------------------------------
#include "caffe/FRCNN/roi_pooling_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(std::floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(std::floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(std::ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(std::ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
c854ce5c0c720427017d4f2fe77a1ec3f8a413a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
float * get_network_output_layer_gpu(network net, int i);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_network_gpu_use_flag(network net, network_state state, int* flag, int isTrain)
{
state.workspace = net.workspace;
int i;
//get upper threashold
float upper = net.upperbound;
if(isTrain)
{
float epoch = (float)(*net.seen) / net.N;
float percentage = epoch / net.nclasses / 100;
float prob_rand = 1.0 / net.nclasses;
upper = (net.upperbound - prob_rand) * percentage + prob_rand;
upper = upper > 1.0 ? 1.0 : upper;
if (int(epoch) % (net.nclasses + 1) == 1)
{
upper = 1;
printf("upper force to be %f", upper);
}
else
{
printf("upper change to be %f", upper);
}
if (net.print2console)
printf("\t\tEpoch: %f\t\tpercentage: %f\n",epoch, percentage);
}
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
flag[i] = 1;
if (l.type == COST)
{
// float* out = (float*)calloc(net.layers[i - 1].outputs*net.layers[i - 1].batch, sizeof(float));
// cuda_copy_array(net.layers[i - 1].output_gpu, out, net.layers[i - 1].outputs*net.layers[i - 1].batch);
if (net.early_stop)
{
//if train use voting to deciside whether to stop
//else use one sample.
float* out = get_network_output_layer_gpu(net, i - 1);
int outputs = net.layers[i - 1].outputs;
int batch_size = net.batch;
int indexes;
int b;
int early_stop_number = 0;
float mean_prob = 0;
for (b = 0; b < batch_size; b++)
{
top_k(out, outputs, 1, &indexes);
if(out[indexes + outputs * b] >= upper)
{
early_stop_number++;
mean_prob += out[indexes + outputs * b];
}
}
if (net.print2console)
{
if (batch_size == 1)
printf("Cost layer AT %d with probability %.6f of type %d", i, out[indexes], indexes);
else
printf("Cost layer AT %d, mean probability %.6f of %d samples",
i, mean_prob /(early_stop_number + 0.000001), early_stop_number);
}
if(early_stop_number > batch_size / 2)
{
if (net.print2console)
printf("----------------------------STOP!\n");
break;
}
}
if (i != net.n - 1)
{
if (net.early_stop && net.print2console)
printf("----------------------------DOESN'T STOP!\n");
int i_forward = i;
//Cost layer set to be false
flag[i_forward--] = 0;
while(net.layers[i_forward].type != CONVOLUTIONAL)
flag[i_forward--] = 0;
//last fully convolutional layer set to be false
flag[i_forward--] = 0;
state.input = net.layers[i_forward].output_gpu;
}
else
{
if (net.early_stop && net.print2console)
printf("----------------------------STOP!\n");
}
}
}
if (net.early_stop && net.print2console)
{
printf("Layer");
int total_ignored = 0;
for (i = 0; i < net.n; i++)
if (!flag[i])
{
printf(" %d", i);
total_ignored++;
}
if (total_ignored)
printf(" is ignored!\n");
else
printf("None is ignored!\n");
}
}
int get_previous_layer_index_by_flag(int* flag, int index)
{
while(!flag[--index]);
return index;
}
void backward_network_gpu_use_flag(network net, network_state state, int* flag)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
int last_layer, first_layer;
for (i = net.n - 1; i >= 0; i--)
if (flag[i]) break;
last_layer = i;
for(i = last_layer; i >= 0; i--)
if (!flag[i]) break;
first_layer = i;
printf("Backward and Update layer:");
for(i = last_layer; i > first_layer; --i){
printf(" %d", i);
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
int prev_index = get_previous_layer_index_by_flag(flag, i);
if (net.print2console)
printf("<-%d", prev_index);
layer prev = net.layers[prev_index];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
printf("\n");
}
void update_network_gpu_use_flag(network net, int* flag)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
int last_layer, first_layer;
for (i = net.n - 1; i >= 0; i--)
if (flag[i]) break;
last_layer = i;
for(i = last_layer; i >= 0; i--)
if (!flag[i]) break;
first_layer = i;
for(i = first_layer + 1; i <= last_layer; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
void forward_backward_network_gpu_use_flag(network net, float *x, float *y, int* flag)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu_use_flag(net, state, flag, 1);
backward_network_gpu_use_flag(net, state, flag);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
int* flag = (int*)calloc(net.n, sizeof(int));
*net.seen += net.batch;
forward_backward_network_gpu_use_flag(net, x, y, flag);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu_use_flag(net, flag);
free(flag);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *get_network_output_gpu_from_index(network net, int index)
{
int i;
for(i = index; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
int i, lindex = 0;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
int* flag = (int*)calloc(net.n, sizeof(int));
forward_network_gpu_use_flag(net, state, flag, 0);
for (i = 0; i < net.n; i++)
if (flag[i]) lindex = i;
float *out = get_network_output_gpu_from_index(net, lindex - 1);
cuda_free(state.input);
free(flag);
return out;
}
| c854ce5c0c720427017d4f2fe77a1ec3f8a413a8.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
float * get_network_output_layer_gpu(network net, int i);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_network_gpu_use_flag(network net, network_state state, int* flag, int isTrain)
{
state.workspace = net.workspace;
int i;
//get upper threashold
float upper = net.upperbound;
if(isTrain)
{
float epoch = (float)(*net.seen) / net.N;
float percentage = epoch / net.nclasses / 100;
float prob_rand = 1.0 / net.nclasses;
upper = (net.upperbound - prob_rand) * percentage + prob_rand;
upper = upper > 1.0 ? 1.0 : upper;
if (int(epoch) % (net.nclasses + 1) == 1)
{
upper = 1;
printf("upper force to be %f", upper);
}
else
{
printf("upper change to be %f", upper);
}
if (net.print2console)
printf("\t\tEpoch: %f\t\tpercentage: %f\n",epoch, percentage);
}
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
l.forward_gpu(l, state);
state.input = l.output_gpu;
flag[i] = 1;
if (l.type == COST)
{
// float* out = (float*)calloc(net.layers[i - 1].outputs*net.layers[i - 1].batch, sizeof(float));
// cuda_copy_array(net.layers[i - 1].output_gpu, out, net.layers[i - 1].outputs*net.layers[i - 1].batch);
if (net.early_stop)
{
//if train use voting to deciside whether to stop
//else use one sample.
float* out = get_network_output_layer_gpu(net, i - 1);
int outputs = net.layers[i - 1].outputs;
int batch_size = net.batch;
int indexes;
int b;
int early_stop_number = 0;
float mean_prob = 0;
for (b = 0; b < batch_size; b++)
{
top_k(out, outputs, 1, &indexes);
if(out[indexes + outputs * b] >= upper)
{
early_stop_number++;
mean_prob += out[indexes + outputs * b];
}
}
if (net.print2console)
{
if (batch_size == 1)
printf("Cost layer AT %d with probability %.6f of type %d", i, out[indexes], indexes);
else
printf("Cost layer AT %d, mean probability %.6f of %d samples",
i, mean_prob /(early_stop_number + 0.000001), early_stop_number);
}
if(early_stop_number > batch_size / 2)
{
if (net.print2console)
printf("----------------------------STOP!\n");
break;
}
}
if (i != net.n - 1)
{
if (net.early_stop && net.print2console)
printf("----------------------------DOESN'T STOP!\n");
int i_forward = i;
//Cost layer set to be false
flag[i_forward--] = 0;
while(net.layers[i_forward].type != CONVOLUTIONAL)
flag[i_forward--] = 0;
//last fully convolutional layer set to be false
flag[i_forward--] = 0;
state.input = net.layers[i_forward].output_gpu;
}
else
{
if (net.early_stop && net.print2console)
printf("----------------------------STOP!\n");
}
}
}
if (net.early_stop && net.print2console)
{
printf("Layer");
int total_ignored = 0;
for (i = 0; i < net.n; i++)
if (!flag[i])
{
printf(" %d", i);
total_ignored++;
}
if (total_ignored)
printf(" is ignored!\n");
else
printf("None is ignored!\n");
}
}
int get_previous_layer_index_by_flag(int* flag, int index)
{
while(!flag[--index]);
return index;
}
void backward_network_gpu_use_flag(network net, network_state state, int* flag)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
int last_layer, first_layer;
for (i = net.n - 1; i >= 0; i--)
if (flag[i]) break;
last_layer = i;
for(i = last_layer; i >= 0; i--)
if (!flag[i]) break;
first_layer = i;
printf("Backward and Update layer:");
for(i = last_layer; i > first_layer; --i){
printf(" %d", i);
state.index = i;
layer l = net.layers[i];
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
int prev_index = get_previous_layer_index_by_flag(flag, i);
if (net.print2console)
printf("<-%d", prev_index);
layer prev = net.layers[prev_index];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
printf("\n");
}
void update_network_gpu_use_flag(network net, int* flag)
{
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
int last_layer, first_layer;
for (i = net.n - 1; i >= 0; i--)
if (flag[i]) break;
last_layer = i;
for(i = last_layer; i >= 0; i--)
if (!flag[i]) break;
first_layer = i;
for(i = first_layer + 1; i <= last_layer; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
void forward_backward_network_gpu_use_flag(network net, float *x, float *y, int* flag)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu_use_flag(net, state, flag, 1);
backward_network_gpu_use_flag(net, state, flag);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
int* flag = (int*)calloc(net.n, sizeof(int));
*net.seen += net.batch;
forward_backward_network_gpu_use_flag(net, x, y, flag);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu_use_flag(net, flag);
free(flag);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *get_network_output_gpu_from_index(network net, int index)
{
int i;
for(i = index; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
int i, lindex = 0;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
int* flag = (int*)calloc(net.n, sizeof(int));
forward_network_gpu_use_flag(net, state, flag, 0);
for (i = 0; i < net.n; i++)
if (flag[i]) lindex = i;
float *out = get_network_output_gpu_from_index(net, lindex - 1);
cuda_free(state.input);
free(flag);
return out;
}
|
d33fd84a6f9e91139dd5a0182b5469ce36476ca1.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// includes
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
extern "C"
{
#include <hip/hip_runtime.h>
}
#define MEMSIZE 30
__global__ void kern_compute_string(char *res, char *a, char *b, char *c, int length)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length)
{
res[i] = a[i] + b[i] + c[i];
}
}
/* Function computing the final string to print */
void compute_string(char *res, char *a, char *b, char *c, int length)
{
int i;
for (i = 0; i < length; i++)
{
res[i] = a[i] + b[i] + c[i];
}
}
extern "C" int nameOfFunction()
{
char *res;
char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79,
70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0};
char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0};
char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22,
29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0};
res = (char *)malloc(30 * sizeof(char));
/* This function call should be programmed in CUDA */
/* -> need to allocate and transfer data to/from the device */
char *d_a, *d_b, *d_c, *d_res;
dim3 gridDim;
gridDim.x = 8;
dim3 blockDim;
blockDim.x = 8;
checkCudaErrors(hipMalloc((void **)&d_a, MEMSIZE * sizeof(char)));
checkCudaErrors(hipMalloc((void **)&d_b, MEMSIZE * sizeof(char)));
checkCudaErrors(hipMalloc((void **)&d_c, MEMSIZE * sizeof(char)));
checkCudaErrors(hipMalloc((void **)&d_res, MEMSIZE * sizeof(char)));
//initialize the device memory
checkCudaErrors(hipMemcpy(d_a, a, MEMSIZE, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_b, b, MEMSIZE, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_c, c, MEMSIZE, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kern_compute_string), dim3(gridDim), dim3(blockDim), 0, 0, d_res, d_a, d_b, d_c, MEMSIZE);
checkCudaErrors(hipMemcpy(res, d_res, MEMSIZE, hipMemcpyDeviceToHost));
// compute_string(res, a, b, c, MEMSIZE);
printf("%s\n", res);
return 0;
}
| d33fd84a6f9e91139dd5a0182b5469ce36476ca1.cu | #include <string.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// includes
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
extern "C"
{
#include <cuda.h>
}
#define MEMSIZE 30
__global__ void kern_compute_string(char *res, char *a, char *b, char *c, int length)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length)
{
res[i] = a[i] + b[i] + c[i];
}
}
/* Function computing the final string to print */
void compute_string(char *res, char *a, char *b, char *c, int length)
{
int i;
for (i = 0; i < length; i++)
{
res[i] = a[i] + b[i] + c[i];
}
}
extern "C" int nameOfFunction()
{
char *res;
char a[30] = {40, 70, 70, 70, 80, 0, 50, 80, 80, 70, 70, 0, 40, 80, 79,
70, 0, 40, 50, 50, 0, 70, 80, 0, 30, 50, 30, 30, 0, 0};
char b[30] = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0};
char c[30] = {22, 21, 28, 28, 21, 22, 27, 21, 24, 28, 20, 22, 20, 24, 22,
29, 22, 21, 20, 25, 22, 25, 20, 22, 27, 25, 28, 25, 0, 0};
res = (char *)malloc(30 * sizeof(char));
/* This function call should be programmed in CUDA */
/* -> need to allocate and transfer data to/from the device */
char *d_a, *d_b, *d_c, *d_res;
dim3 gridDim;
gridDim.x = 8;
dim3 blockDim;
blockDim.x = 8;
checkCudaErrors(cudaMalloc((void **)&d_a, MEMSIZE * sizeof(char)));
checkCudaErrors(cudaMalloc((void **)&d_b, MEMSIZE * sizeof(char)));
checkCudaErrors(cudaMalloc((void **)&d_c, MEMSIZE * sizeof(char)));
checkCudaErrors(cudaMalloc((void **)&d_res, MEMSIZE * sizeof(char)));
//initialize the device memory
checkCudaErrors(cudaMemcpy(d_a, a, MEMSIZE, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, b, MEMSIZE, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_c, c, MEMSIZE, cudaMemcpyHostToDevice));
kern_compute_string<<<gridDim, blockDim>>>(d_res, d_a, d_b, d_c, MEMSIZE);
checkCudaErrors(cudaMemcpy(res, d_res, MEMSIZE, cudaMemcpyDeviceToHost));
// compute_string(res, a, b, c, MEMSIZE);
printf("%s\n", res);
return 0;
}
|
2c7cbea6800c6967d60f818d617f8420891f41f7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <mshadow/tensor.h>
#include "./index_array-inl.h"
namespace mxnet {
namespace op {
using namespace mshadow::cuda;
void IndexArrayForwardGPU(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const IndexArrayParam& param = nnvm::get<IndexArrayParam>(attrs.parsed);
const TShape inshape = in_data.shape_;
const int ndim = inshape.ndim();
Stream<gpu> *stream = ctx.get_stream<gpu>();
using namespace mxnet_op;
if (param.axes.has_value()) {
const mxnet::Tuple<int>& axes = param.axes.value();
const int naxes = axes.ndim();
std::vector<int64_t> index_products = IndexArrayComputeIndexProducts(inshape);
std::vector<int64_t> cpu_workspace(2 * naxes);
IndexArrayBuildSelectedAxesWorkspace(axes, index_products, cpu_workspace.data(), ndim);
Tensor<gpu, 1, int64_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, int64_t>(Shape1(2 * naxes), stream);
CUDA_CALL(hipMemcpy(workspace.dptr_, cpu_workspace.data(), sizeof(int64_t) * (2 * naxes),
hipMemcpyHostToDevice));
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<IndexArrayKernel<req_type>, gpu>::Launch(stream, in_data.Size(),
out_data.dptr<int64_t>(), naxes, workspace.dptr_);
});
} else {
Tensor<gpu, 1, dim_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, dim_t>(Shape1(ndim), stream);
CUDA_CALL(hipMemcpy(workspace.dptr_, inshape.data(), sizeof(dim_t) * ndim,
hipMemcpyHostToDevice));
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<IndexArrayDefaultKernel<req_type>, gpu>::Launch(stream, in_data.Size(),
out_data.dptr<int64_t>(), ndim, workspace.dptr_);
});
}
}
NNVM_REGISTER_OP(_contrib_index_array)
.set_attr<FCompute>("FCompute<gpu>", IndexArrayForwardGPU);
} // namespace op
} // namespace mxnet
| 2c7cbea6800c6967d60f818d617f8420891f41f7.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <mshadow/tensor.h>
#include "./index_array-inl.h"
namespace mxnet {
namespace op {
using namespace mshadow::cuda;
void IndexArrayForwardGPU(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const IndexArrayParam& param = nnvm::get<IndexArrayParam>(attrs.parsed);
const TShape inshape = in_data.shape_;
const int ndim = inshape.ndim();
Stream<gpu> *stream = ctx.get_stream<gpu>();
using namespace mxnet_op;
if (param.axes.has_value()) {
const mxnet::Tuple<int>& axes = param.axes.value();
const int naxes = axes.ndim();
std::vector<int64_t> index_products = IndexArrayComputeIndexProducts(inshape);
std::vector<int64_t> cpu_workspace(2 * naxes);
IndexArrayBuildSelectedAxesWorkspace(axes, index_products, cpu_workspace.data(), ndim);
Tensor<gpu, 1, int64_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, int64_t>(Shape1(2 * naxes), stream);
CUDA_CALL(cudaMemcpy(workspace.dptr_, cpu_workspace.data(), sizeof(int64_t) * (2 * naxes),
cudaMemcpyHostToDevice));
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<IndexArrayKernel<req_type>, gpu>::Launch(stream, in_data.Size(),
out_data.dptr<int64_t>(), naxes, workspace.dptr_);
});
} else {
Tensor<gpu, 1, dim_t> workspace =
ctx.requested[0].get_space_typed<gpu, 1, dim_t>(Shape1(ndim), stream);
CUDA_CALL(cudaMemcpy(workspace.dptr_, inshape.data(), sizeof(dim_t) * ndim,
cudaMemcpyHostToDevice));
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<IndexArrayDefaultKernel<req_type>, gpu>::Launch(stream, in_data.Size(),
out_data.dptr<int64_t>(), ndim, workspace.dptr_);
});
}
}
NNVM_REGISTER_OP(_contrib_index_array)
.set_attr<FCompute>("FCompute<gpu>", IndexArrayForwardGPU);
} // namespace op
} // namespace mxnet
|
d07830e251f950fa58f5401cbea5c842ba5e643a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lin_gpu.h"
size_t block_size = 128;
size_t max_grid_x = 65535;
// first version of matrix multiplication
// use nr_C*nc_C threads to compute A*B
extern "C" void gmm1(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// compute how many threads and blocks are needed
size_t num_cell = nr_A*nc_B;
size_t num_block = (num_cell-1)/block_size+1;
// compute grid dimension
size_t num_grid_y = (num_block-1)/max_grid_x+1;
size_t num_grid_x = num_block < max_grid_x ? num_block : max_grid_x;
// launch kernel
dim3 dimBlock(block_size, 1);
dim3 dimGrid(num_grid_x, num_grid_y);
hipLaunchKernelGGL(( gmm1_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, gA, nr_A, nc_A,
gB, nr_B, nc_B, gC, nr_C, nc_C);
// check kernel result
hipDeviceSynchronize();
hipError_t crc = hipGetLastError();
if(crc) {
printf("emptyKernel error=%d:%s\n", crc, hipGetErrorString(crc));
exit(1);
}
}
// kernel function for mat_mult_gpu_v1
__global__ void gmm1_kernel(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// get absoluate idx of thread
size_t j = threadIdx.x+blockDim.x*(blockIdx.x+gridDim.x*blockIdx.y);
// check if j is within range
if(j < nr_C*nc_C) {
// obtain row and column of the cell thread j to compute
size_t r = j / nc_C;
size_t c = j % nc_C;
// compute the inner product of r-th row of A and c-th column of B
float val = 0.0;
for(size_t i=0; i<nc_A; i++) {
val += gA[r*nc_A+i]*gB[i*nc_B+c];
}
// save results
gC[j] = val;
}
}
// use compute A*B using tiling
extern "C" void gmm2(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// define tile size
size_t tilesize_r = 32;
size_t tilesize_c = 32;
size_t tilesize_m = 32;
// check for small matrix conditions
tilesize_r = tilesize_r>nr_C ? nr_C : tilesize_r;
tilesize_c = tilesize_c>nc_C ? nc_C : tilesize_c;
tilesize_m = tilesize_m>nc_A ? nc_A : tilesize_m;
// compute the number of tiles
size_t ntiles_r = (nr_C-1)/tilesize_r+1;
size_t ntiles_c = (nc_C-1)/tilesize_c+1;
size_t ntiles = ntiles_r*ntiles_c;
// compute number of col/row tiles for A/B
size_t ntiles_c_A = (nc_A-1)/tilesize_m+1;
size_t ntiles_c_B = (nc_B-1)/tilesize_c+1;
// number of cells in a tile
size_t ntcells_C = tilesize_r*tilesize_c;
size_t ntcells_A = tilesize_r*tilesize_m;
size_t ntcells_B = tilesize_m*tilesize_c;
// number of cells
size_t ncells_C = nr_C*nc_C;
size_t ncells_A = nr_A*nc_A;
size_t ncells_B = nr_B*nc_B;
// allocate shared memory
size_t nshmem = ntcells_C+ntcells_A+ntcells_B;
nshmem *= sizeof(float);
// compute how many threads and blocks are needed
size_t num_block = ntiles;
// compute grid dimension
size_t num_grid_y = (num_block-1)/max_grid_x+1;
size_t num_grid_x = num_block<max_grid_x ? num_block : max_grid_x;
// launch kernel
dim3 dimBlock(block_size, 1);
dim3 dimGrid(num_grid_x, num_grid_y);
hipLaunchKernelGGL(( gmm2_kernel), dim3(dimGrid),dim3(dimBlock),nshmem, 0,
gA, nr_A, nc_A,
gB, nr_B, nc_B, gC, nr_C, nc_C,
ntcells_A, ntcells_B, ntcells_C,
tilesize_r, tilesize_c, tilesize_m,
ntiles_c, ntiles_c_A, ntiles_c_B,
ncells_A, ncells_B, ncells_C
);
// check kernel result
hipDeviceSynchronize();
hipError_t crc = hipGetLastError();
if(crc) {
printf("emptyKernel error=%d:%s\n", crc, hipGetErrorString(crc));
exit(1);
}
}
// kernel function for mat_mult_gpu_v1
__global__ void gmm2_kernel(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C,
size_t ntcells_A, size_t ntcells_B, size_t ntcells_C,
size_t tilesize_r, size_t tilesize_c, size_t tilesize_m,
size_t ntiles_c, size_t ntiles_c_A, size_t ntiles_c_B,
size_t ncells_A, size_t ncells_B, size_t ncells_C) {
// get index for tile
size_t i = blockIdx.x+gridDim.x*blockIdx.y;
// get block and thread information
size_t blksz = blockDim.x;
size_t tidx = threadIdx.x;
// get shared memory
extern __shared__ float shmem[];
float *ptra = &shmem[0];
float *ptrb = &shmem[ntcells_A];
float *ptrc = &shmem[ntcells_A+ntcells_B];
// initialize shared memory
#pragma unroll
for(size_t ii=tidx; ii<ntcells_C; ii+=blksz) {
ptrc[ii] = 0.0;
}
// get tile r and tile c
size_t ir = i/ntiles_c;
size_t ic = i%ntiles_c;
// check boundary condition
size_t ntcbm1 = ntiles_c_B-1;
size_t ncolb = ic==ntcbm1 ? (nc_B-ntcbm1*tilesize_c) : tilesize_c;
size_t ncolc = ncolb;
// iterate through corresponding tiles for A and B
for(size_t j=0; j<ntiles_c_A; j++) {
// check boundary condition
size_t ntcam1 = ntiles_c_A-1;
size_t ncola = j==ntcam1 ? (nc_A-ntcam1*tilesize_m) : tilesize_m;
// load tiles for A into shared memory
for(size_t ii=tidx; ii<ntcells_A; ii+=blksz) {
// within cell idx
size_t tr = ii/ncola;
size_t tc = ii%ncola;
// absolute A index
size_t aidx = (ir*tilesize_r+tr)*nc_A;
aidx += j*tilesize_m+tc;
// load to shared memory
if(aidx < ncells_A) {
ptra[tr*ncola+tc] = gA[aidx];
}
}
// load tiles for B into shared memory
for(size_t ii=tidx; ii<ntcells_B; ii+=blksz) {
// within tile idx
size_t tr = ii/ncolb;
size_t tc = ii%ncolb;
// absolute B index
size_t bidx = (j*tilesize_m+tr)*nc_B;
bidx += ic*tilesize_c+tc;
// load to shared memory
if(bidx < ncells_B) {
ptrb[tr*ncolb+tc] = gB[bidx];
}
}
__syncthreads();
// make mat mult in shared memory
for(size_t ii=tidx; ii<tilesize_r*ncolc; ii+=blksz) {
// within tile idex
size_t tr = ii/ncolc;
size_t tc = ii%ncolc;
// vector dot product
#pragma unroll
for(size_t jj=0; jj<ncola; jj++){
float prod = ptra[tr*ncola+jj]*ptrb[jj*ncolb+tc];
ptrc[tr*ncolc+tc] += prod;
}
}
}
// write from shared memory to global memory
#pragma unroll
for(size_t ii=tidx; ii<tilesize_r*ncolc; ii+=blksz) {
// within tile idx
size_t tr = ii/ncolc;
size_t tc = ii%ncolc;
// absolute C index
size_t cidx = (ir*tilesize_r+tr)*nc_C;
cidx += ic*tilesize_c+tc;
// write to global memory
if(cidx < ncells_C) {
gC[cidx] = ptrc[tr*ncolc+tc];
}
}
}
| d07830e251f950fa58f5401cbea5c842ba5e643a.cu | #include "lin_gpu.h"
size_t block_size = 128;
size_t max_grid_x = 65535;
// first version of matrix multiplication
// use nr_C*nc_C threads to compute A*B
extern "C" void gmm1(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// compute how many threads and blocks are needed
size_t num_cell = nr_A*nc_B;
size_t num_block = (num_cell-1)/block_size+1;
// compute grid dimension
size_t num_grid_y = (num_block-1)/max_grid_x+1;
size_t num_grid_x = num_block < max_grid_x ? num_block : max_grid_x;
// launch kernel
dim3 dimBlock(block_size, 1);
dim3 dimGrid(num_grid_x, num_grid_y);
gmm1_kernel<<<dimGrid,dimBlock>>>(gA, nr_A, nc_A,
gB, nr_B, nc_B, gC, nr_C, nc_C);
// check kernel result
cudaThreadSynchronize();
cudaError_t crc = cudaGetLastError();
if(crc) {
printf("emptyKernel error=%d:%s\n", crc, cudaGetErrorString(crc));
exit(1);
}
}
// kernel function for mat_mult_gpu_v1
__global__ void gmm1_kernel(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// get absoluate idx of thread
size_t j = threadIdx.x+blockDim.x*(blockIdx.x+gridDim.x*blockIdx.y);
// check if j is within range
if(j < nr_C*nc_C) {
// obtain row and column of the cell thread j to compute
size_t r = j / nc_C;
size_t c = j % nc_C;
// compute the inner product of r-th row of A and c-th column of B
float val = 0.0;
for(size_t i=0; i<nc_A; i++) {
val += gA[r*nc_A+i]*gB[i*nc_B+c];
}
// save results
gC[j] = val;
}
}
// use compute A*B using tiling
extern "C" void gmm2(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C) {
// define tile size
size_t tilesize_r = 32;
size_t tilesize_c = 32;
size_t tilesize_m = 32;
// check for small matrix conditions
tilesize_r = tilesize_r>nr_C ? nr_C : tilesize_r;
tilesize_c = tilesize_c>nc_C ? nc_C : tilesize_c;
tilesize_m = tilesize_m>nc_A ? nc_A : tilesize_m;
// compute the number of tiles
size_t ntiles_r = (nr_C-1)/tilesize_r+1;
size_t ntiles_c = (nc_C-1)/tilesize_c+1;
size_t ntiles = ntiles_r*ntiles_c;
// compute number of col/row tiles for A/B
size_t ntiles_c_A = (nc_A-1)/tilesize_m+1;
size_t ntiles_c_B = (nc_B-1)/tilesize_c+1;
// number of cells in a tile
size_t ntcells_C = tilesize_r*tilesize_c;
size_t ntcells_A = tilesize_r*tilesize_m;
size_t ntcells_B = tilesize_m*tilesize_c;
// number of cells
size_t ncells_C = nr_C*nc_C;
size_t ncells_A = nr_A*nc_A;
size_t ncells_B = nr_B*nc_B;
// allocate shared memory
size_t nshmem = ntcells_C+ntcells_A+ntcells_B;
nshmem *= sizeof(float);
// compute how many threads and blocks are needed
size_t num_block = ntiles;
// compute grid dimension
size_t num_grid_y = (num_block-1)/max_grid_x+1;
size_t num_grid_x = num_block<max_grid_x ? num_block : max_grid_x;
// launch kernel
dim3 dimBlock(block_size, 1);
dim3 dimGrid(num_grid_x, num_grid_y);
gmm2_kernel<<<dimGrid,dimBlock,nshmem>>>(
gA, nr_A, nc_A,
gB, nr_B, nc_B, gC, nr_C, nc_C,
ntcells_A, ntcells_B, ntcells_C,
tilesize_r, tilesize_c, tilesize_m,
ntiles_c, ntiles_c_A, ntiles_c_B,
ncells_A, ncells_B, ncells_C
);
// check kernel result
cudaThreadSynchronize();
cudaError_t crc = cudaGetLastError();
if(crc) {
printf("emptyKernel error=%d:%s\n", crc, cudaGetErrorString(crc));
exit(1);
}
}
// kernel function for mat_mult_gpu_v1
__global__ void gmm2_kernel(float *gA, size_t nr_A, size_t nc_A,
float *gB, size_t nr_B, size_t nc_B,
float *gC, size_t nr_C, size_t nc_C,
size_t ntcells_A, size_t ntcells_B, size_t ntcells_C,
size_t tilesize_r, size_t tilesize_c, size_t tilesize_m,
size_t ntiles_c, size_t ntiles_c_A, size_t ntiles_c_B,
size_t ncells_A, size_t ncells_B, size_t ncells_C) {
// get index for tile
size_t i = blockIdx.x+gridDim.x*blockIdx.y;
// get block and thread information
size_t blksz = blockDim.x;
size_t tidx = threadIdx.x;
// get shared memory
extern __shared__ float shmem[];
float *ptra = &shmem[0];
float *ptrb = &shmem[ntcells_A];
float *ptrc = &shmem[ntcells_A+ntcells_B];
// initialize shared memory
#pragma unroll
for(size_t ii=tidx; ii<ntcells_C; ii+=blksz) {
ptrc[ii] = 0.0;
}
// get tile r and tile c
size_t ir = i/ntiles_c;
size_t ic = i%ntiles_c;
// check boundary condition
size_t ntcbm1 = ntiles_c_B-1;
size_t ncolb = ic==ntcbm1 ? (nc_B-ntcbm1*tilesize_c) : tilesize_c;
size_t ncolc = ncolb;
// iterate through corresponding tiles for A and B
for(size_t j=0; j<ntiles_c_A; j++) {
// check boundary condition
size_t ntcam1 = ntiles_c_A-1;
size_t ncola = j==ntcam1 ? (nc_A-ntcam1*tilesize_m) : tilesize_m;
// load tiles for A into shared memory
for(size_t ii=tidx; ii<ntcells_A; ii+=blksz) {
// within cell idx
size_t tr = ii/ncola;
size_t tc = ii%ncola;
// absolute A index
size_t aidx = (ir*tilesize_r+tr)*nc_A;
aidx += j*tilesize_m+tc;
// load to shared memory
if(aidx < ncells_A) {
ptra[tr*ncola+tc] = gA[aidx];
}
}
// load tiles for B into shared memory
for(size_t ii=tidx; ii<ntcells_B; ii+=blksz) {
// within tile idx
size_t tr = ii/ncolb;
size_t tc = ii%ncolb;
// absolute B index
size_t bidx = (j*tilesize_m+tr)*nc_B;
bidx += ic*tilesize_c+tc;
// load to shared memory
if(bidx < ncells_B) {
ptrb[tr*ncolb+tc] = gB[bidx];
}
}
__syncthreads();
// make mat mult in shared memory
for(size_t ii=tidx; ii<tilesize_r*ncolc; ii+=blksz) {
// within tile idex
size_t tr = ii/ncolc;
size_t tc = ii%ncolc;
// vector dot product
#pragma unroll
for(size_t jj=0; jj<ncola; jj++){
float prod = ptra[tr*ncola+jj]*ptrb[jj*ncolb+tc];
ptrc[tr*ncolc+tc] += prod;
}
}
}
// write from shared memory to global memory
#pragma unroll
for(size_t ii=tidx; ii<tilesize_r*ncolc; ii+=blksz) {
// within tile idx
size_t tr = ii/ncolc;
size_t tc = ii%ncolc;
// absolute C index
size_t cidx = (ir*tilesize_r+tr)*nc_C;
cidx += ic*tilesize_c+tc;
// write to global memory
if(cidx < ncells_C) {
gC[cidx] = ptrc[tr*ncolc+tc];
}
}
}
|
42646da01895992a37fd9ab57e87e40b74e2ed4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 42646da01895992a37fd9ab57e87e40b74e2ed4f.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
95d7b8f76b694eafd13a2a7f2fb7ccfb253b5274.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void EntropyLossForwardGPU(const int nthreads,
const Dtype* prob_data, Dtype* entropy, const int num, const int dim,
const int spatial_dim) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
for (int c = 0; c < channels; ++c) {
const Dtype p = prob_data[n * dim + c * spatial_dim + s];
entropy[n * spatial_dim + s] -= p * log(max(p, Dtype(FLT_MIN)));
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* entropy_data = entropy_.mutable_gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Init buffers
caffe_gpu_set(nthreads, Dtype(0), entropy_data);
// Compute element-wise entropy
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( EntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, entropy_data, outer_num_,
dim, inner_num_);
Dtype loss = 0;
caffe_gpu_asum(nthreads, entropy_data, &loss);
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_);
}
template <typename Dtype>
__global__ void EntropyLossBackwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* entropy_data, Dtype* bottom_diff,
const int num, const int dim, const int spatial_dim) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const Dtype H = entropy_data[n * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
const Dtype p = prob_data[n * dim + c * spatial_dim + s];
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(-1.) * p * (
H + log(max(p, Dtype(FLT_MIN))));
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* entropy_data = entropy_.gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( EntropyLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, entropy_data,
bottom_diff, outer_num_, dim, inner_num_);
// Scale gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_);
caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
| 95d7b8f76b694eafd13a2a7f2fb7ccfb253b5274.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void EntropyLossForwardGPU(const int nthreads,
const Dtype* prob_data, Dtype* entropy, const int num, const int dim,
const int spatial_dim) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
for (int c = 0; c < channels; ++c) {
const Dtype p = prob_data[n * dim + c * spatial_dim + s];
entropy[n * spatial_dim + s] -= p * log(max(p, Dtype(FLT_MIN)));
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
Dtype* entropy_data = entropy_.mutable_gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Init buffers
caffe_gpu_set(nthreads, Dtype(0), entropy_data);
// Compute element-wise entropy
// NOLINT_NEXT_LINE(whitespace/operators)
EntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, entropy_data, outer_num_,
dim, inner_num_);
Dtype loss = 0;
caffe_gpu_asum(nthreads, entropy_data, &loss);
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_);
}
template <typename Dtype>
__global__ void EntropyLossBackwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* entropy_data, Dtype* bottom_diff,
const int num, const int dim, const int spatial_dim) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const Dtype H = entropy_data[n * spatial_dim + s];
for (int c = 0; c < channels; ++c) {
const Dtype p = prob_data[n * dim + c * spatial_dim + s];
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(-1.) * p * (
H + log(max(p, Dtype(FLT_MIN))));
}
}
}
template <typename Dtype>
void EntropyLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* entropy_data = entropy_.gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// NOLINT_NEXT_LINE(whitespace/operators)
EntropyLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, entropy_data,
bottom_diff, outer_num_, dim, inner_num_);
// Scale gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_);
caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EntropyLossLayer);
} // namespace caffe
|
682d0e9b36fc46d3ab42139bf95cf544ed438b79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / (double) n;
}
extern "C"
__global__ void mean_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
| 682d0e9b36fc46d3ab42139bf95cf544ed438b79.cu | #include "reduce.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
__device__ double update(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction / (double) n;
}
extern "C"
__global__ void mean_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
|
3a9085ac6d0e1325f0f4d3ae917c741a251460e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpuMatMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpuMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,p);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpuMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpuMatMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,m,n,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3a9085ac6d0e1325f0f4d3ae917c741a251460e3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpuMatMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int m = 2;
int n = XSIZE*YSIZE;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpuMatMul<<<gridBlock,threadBlock>>>(a,b,c,m,n,p);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpuMatMul<<<gridBlock,threadBlock>>>(a,b,c,m,n,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpuMatMul<<<gridBlock,threadBlock>>>(a,b,c,m,n,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
516bd49339514d7955aeb0a86b17615f0a6889ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Computation of eigenvalues of a large symmetric, tridiagonal matrix */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include "cutil_inline.h"
#include "config.h"
#include "structs.h"
#include "util.h"
#include "matlab.h"
#include "bisect_large.cuh"
// includes, kernels
#include "bisect_kernel_large.cu"
#include "bisect_kernel_large_onei.cu"
#include "bisect_kernel_large_multi.cu"
////////////////////////////////////////////////////////////////////////////////
//! Initialize variables and memory for result
//! @param result handles to memory
//! @param matrix_size size of the matrix
////////////////////////////////////////////////////////////////////////////////
void
initResultDataLargeMatrix( ResultDataLarge& result, const unsigned int mat_size) {
// helper variables to initialize memory
unsigned int zero = 0;
unsigned int mat_size_f = sizeof(float) * mat_size;
unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
float* tempf = (float*) malloc( mat_size_f);
unsigned int* tempui = (unsigned int*) malloc( mat_size_ui);
for( unsigned int i = 0; i < mat_size; ++i) {
tempf[i] = 0.0f;
tempui[i] = 0;
}
// number of intervals containing only one eigenvalue after the first step
cutilSafeCall( hipMalloc( (void**) &result.g_num_one,
sizeof( unsigned int)) );
cutilSafeCall( hipMemcpy( result.g_num_one, &zero, sizeof(unsigned int),
hipMemcpyHostToDevice));
// number of (thread) blocks of intervals with multiple eigenvalues after
// the first iteration
cutilSafeCall( hipMalloc( (void**) &result.g_num_blocks_mult,
sizeof(unsigned int)));
cutilSafeCall( hipMemcpy( result.g_num_blocks_mult, &zero,
sizeof(unsigned int),
hipMemcpyHostToDevice ));
cutilSafeCall( hipMalloc( (void**) &result.g_left_one, mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_right_one, mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_pos_one, mat_size_ui));
cutilSafeCall( hipMalloc( (void**) &result.g_left_mult, mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_right_mult, mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_left_count_mult,
mat_size_ui));
cutilSafeCall( hipMalloc( (void**) &result.g_right_count_mult,
mat_size_ui));
cutilSafeCall( hipMemcpy( result.g_left_one, tempf, mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_right_one, tempf, mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_pos_one, tempui, mat_size_ui,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_left_mult, tempf, mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_right_mult, tempf, mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_left_count_mult, tempui, mat_size_ui,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_right_count_mult, tempui, mat_size_ui,
hipMemcpyHostToDevice));
cutilSafeCall( hipMalloc( (void**) &result.g_blocks_mult, mat_size_ui));
cutilSafeCall( hipMemcpy( result.g_blocks_mult, tempui, mat_size_ui,
hipMemcpyHostToDevice ));
cutilSafeCall(hipMalloc((void**) &result.g_blocks_mult_sum, mat_size_ui));
cutilSafeCall( hipMemcpy( result.g_blocks_mult_sum, tempui, mat_size_ui,
hipMemcpyHostToDevice ));
cutilSafeCall( hipMalloc( (void**) &result.g_lambda_mult, mat_size_f));
cutilSafeCall( hipMemcpy( result.g_lambda_mult, tempf, mat_size_f,
hipMemcpyHostToDevice ));
cutilSafeCall( hipMalloc( (void**) &result.g_pos_mult, mat_size_ui));
cutilSafeCall( hipMemcpy( result.g_pos_mult, tempf, mat_size_ui,
hipMemcpyHostToDevice ));
}
////////////////////////////////////////////////////////////////////////////////
//! Cleanup result memory
//! @param result handles to memory
////////////////////////////////////////////////////////////////////////////////
void
cleanupResultDataLargeMatrix( ResultDataLarge& result) {
cutilSafeCall( hipFree( result.g_num_one));
cutilSafeCall( hipFree( result.g_num_blocks_mult));
cutilSafeCall( hipFree( result.g_left_one));
cutilSafeCall( hipFree( result.g_right_one));
cutilSafeCall( hipFree( result.g_pos_one));
cutilSafeCall( hipFree( result.g_left_mult));
cutilSafeCall( hipFree( result.g_right_mult));
cutilSafeCall( hipFree( result.g_left_count_mult));
cutilSafeCall( hipFree( result.g_right_count_mult));
cutilSafeCall( hipFree( result.g_blocks_mult));
cutilSafeCall( hipFree( result.g_blocks_mult_sum));
cutilSafeCall( hipFree( result.g_lambda_mult));
cutilSafeCall( hipFree( result.g_pos_mult));
}
////////////////////////////////////////////////////////////////////////////////
//! Run the kernels to compute the eigenvalues for large matrices
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param precision desired precision of eigenvalues
//! @param lg lower limit of Gerschgorin interval
//! @param ug upper limit of Gerschgorin interval
//! @param iterations number of iterations (for timing)
////////////////////////////////////////////////////////////////////////////////
void
computeEigenvaluesLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size, const float precision,
const float lg, const float ug,
const unsigned int iterations )
{
dim3 blocks( 1, 1, 1);
dim3 threads( MAX_THREADS_BLOCK, 1, 1);
unsigned int timer_step1 = 0;
unsigned int timer_step2_one = 0;
unsigned int timer_step2_mult = 0;
unsigned int timer_total = 0;
cutilCheckError( cutCreateTimer( &timer_step1));
cutilCheckError( cutCreateTimer( &timer_step2_one));
cutilCheckError( cutCreateTimer( &timer_step2_mult));
cutilCheckError( cutCreateTimer( &timer_total));
cutilCheckError( cutStartTimer( timer_total));
// do for multiple iterations to improve timing accuracy
for( unsigned int iter = 0; iter < iterations; ++iter) {
cutilCheckError( cutStartTimer( timer_step1));
hipLaunchKernelGGL(( bisectKernelLarge), dim3(blocks), dim3(threads) , 0, 0,
input.g_a, input.g_b, mat_size,
lg, ug, 0, mat_size, precision,
result.g_num_one, result.g_num_blocks_mult,
result.g_left_one, result.g_right_one, result.g_pos_one,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_blocks_mult, result.g_blocks_mult_sum
);
cutilSafeCall( hipDeviceSynchronize());
cutilCheckError( cutStopTimer( timer_step1));
cutilCheckMsg( "Kernel launch failed.");
// get the number of intervals containing one eigenvalue after the first
// processing step
unsigned int num_one_intervals;
cutilSafeCall( hipMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
hipMemcpyDeviceToHost));
dim3 grid_onei;
grid_onei.x = getNumBlocksLinear( num_one_intervals, MAX_THREADS_BLOCK);
dim3 threads_onei;
// use always max number of available threads to better balance load times
// for matrix data
threads_onei.x = MAX_THREADS_BLOCK;
// compute eigenvalues for intervals that contained only one eigenvalue
// after the first processing step
cutilCheckError( cutStartTimer( timer_step2_one));
hipLaunchKernelGGL(( bisectKernelLarge_OneIntervals), dim3(grid_onei) , dim3(threads_onei) , 0, 0,
input.g_a, input.g_b, mat_size, num_one_intervals,
result.g_left_one, result.g_right_one, result.g_pos_one,
precision
);
cutilSafeCall( hipDeviceSynchronize());
cutilCheckError( cutStopTimer( timer_step2_one));
// process intervals that contained more than one eigenvalue after
// the first processing step
// get the number of blocks of intervals that contain, in total when
// each interval contains only one eigenvalue, not more than
// MAX_THREADS_BLOCK threads
unsigned int num_blocks_mult = 0;
cutilSafeCall( hipMemcpy( &num_blocks_mult, result.g_num_blocks_mult,
sizeof( unsigned int),
hipMemcpyDeviceToHost));
// setup the execution environment
dim3 grid_mult( num_blocks_mult, 1, 1);
dim3 threads_mult( MAX_THREADS_BLOCK, 1, 1);
cutilCheckError( cutStartTimer( timer_step2_mult));
hipLaunchKernelGGL(( bisectKernelLarge_MultIntervals), dim3(grid_mult), dim3(threads_mult) , 0, 0,
input.g_a, input.g_b, mat_size,
result.g_blocks_mult, result.g_blocks_mult_sum,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_lambda_mult, result.g_pos_mult,
precision
);
cutilCheckError( cutStopTimer( timer_step2_mult));
cutilCheckMsg( "bisectKernelLarge_MultIntervals() FAILED.");
}
cutilCheckError( cutStopTimer( timer_total));
printf( "Average time step 1: %f ms\n",
cutGetTimerValue( timer_step1) / (float) iterations );
printf( "Average time step 2, one intervals: %f ms\n",
cutGetTimerValue( timer_step2_one) / (float) iterations );
printf( "Average time step 2, mult intervals: %f ms\n",
cutGetTimerValue( timer_step2_mult) / (float) iterations );
printf( "Average time TOTAL: %f ms\n",
cutGetTimerValue( timer_total) / (float) iterations );
cutilCheckError( cutDeleteTimer( timer_step1));
cutilCheckError( cutDeleteTimer( timer_step2_one));
cutilCheckError( cutDeleteTimer( timer_step2_mult));
cutilCheckError( cutDeleteTimer( timer_total));
}
////////////////////////////////////////////////////////////////////////////////
//! Process the result, that is obtain result from device and do simple sanity
//! checking
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param filename output filename
////////////////////////////////////////////////////////////////////////////////
void
processResultDataLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size,
const char* filename,
const unsigned int user_defined, char* exec_path)
{
const unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
const unsigned int mat_size_f = sizeof(float) * mat_size;
// copy data from intervals that contained more than one eigenvalue after
// the first processing step
float* lambda_mult = (float*) malloc( sizeof(float) * mat_size);
cutilSafeCall( hipMemcpy( lambda_mult, result.g_lambda_mult,
sizeof(float) * mat_size,
hipMemcpyDeviceToHost ));
unsigned int* pos_mult =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( hipMemcpy( pos_mult, result.g_pos_mult,
sizeof(unsigned int) * mat_size,
hipMemcpyDeviceToHost ));
unsigned int* blocks_mult_sum =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( hipMemcpy( blocks_mult_sum, result.g_blocks_mult_sum,
sizeof( unsigned int) * mat_size,
hipMemcpyDeviceToHost ));
unsigned int num_one_intervals;
cutilSafeCall( hipMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
hipMemcpyDeviceToHost));
unsigned int sum_blocks_mult = mat_size - num_one_intervals;
// copy data for intervals that contained one eigenvalue after the first
// processing step
float* left_one = (float*) malloc( mat_size_f);
float* right_one = (float*) malloc( mat_size_f);
unsigned int* pos_one = (unsigned int*) malloc( mat_size_ui);
cutilSafeCall( hipMemcpy( left_one, result.g_left_one, mat_size_f,
hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy( right_one, result.g_right_one, mat_size_f,
hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy( pos_one, result.g_pos_one, mat_size_ui,
hipMemcpyDeviceToHost) );
// extract eigenvalues
float* eigenvals = (float*) malloc( mat_size_f);
// singleton intervals generated in the second step
for( unsigned int i = 0; i < sum_blocks_mult; ++i) {
eigenvals[pos_mult[i] - 1] = lambda_mult[i];
}
// singleton intervals generated in the first step
unsigned int index = 0;
for( unsigned int i = 0; i < num_one_intervals; ++i, ++index) {
eigenvals[pos_one[i] - 1] = left_one[i];
}
if( 1 == user_defined) {
// store result
writeTridiagSymMatlab( filename, input.a, input.b+1, eigenvals, mat_size);
// cutilCheckError( cutWriteFilef( filename, eigenvals, mat_size, 0.0f));
}
else {
// compare with reference solution
float* reference = NULL;
unsigned int input_data_size = 0;
char* ref_path = cutFindFilePath( "reference.dat", exec_path);
cutilCondition( 0 != ref_path);
cutilCheckError( cutReadFilef( ref_path, &reference, &input_data_size));
cutilCondition( input_data_size == mat_size);
// there's an imprecision of Sturm count computation which makes an
// additional offset necessary
float tolerance = 1.0e-5f + 5.0e-6f;
if( CUTTrue == cutComparefe( reference, eigenvals, mat_size, tolerance)) {
printf( "\nPASSED.\n");
}
else {
printf( "FAILED.\n");
}
cutFree( ref_path);
cutFree( reference);
}
freePtr( eigenvals);
freePtr( lambda_mult);
freePtr( pos_mult);
freePtr( blocks_mult_sum);
freePtr( left_one);
freePtr( right_one);
freePtr( pos_one);
}
| 516bd49339514d7955aeb0a86b17615f0a6889ec.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Computation of eigenvalues of a large symmetric, tridiagonal matrix */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include "cutil_inline.h"
#include "config.h"
#include "structs.h"
#include "util.h"
#include "matlab.h"
#include "bisect_large.cuh"
// includes, kernels
#include "bisect_kernel_large.cu"
#include "bisect_kernel_large_onei.cu"
#include "bisect_kernel_large_multi.cu"
////////////////////////////////////////////////////////////////////////////////
//! Initialize variables and memory for result
//! @param result handles to memory
//! @param matrix_size size of the matrix
////////////////////////////////////////////////////////////////////////////////
void
initResultDataLargeMatrix( ResultDataLarge& result, const unsigned int mat_size) {
// helper variables to initialize memory
unsigned int zero = 0;
unsigned int mat_size_f = sizeof(float) * mat_size;
unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
float* tempf = (float*) malloc( mat_size_f);
unsigned int* tempui = (unsigned int*) malloc( mat_size_ui);
for( unsigned int i = 0; i < mat_size; ++i) {
tempf[i] = 0.0f;
tempui[i] = 0;
}
// number of intervals containing only one eigenvalue after the first step
cutilSafeCall( cudaMalloc( (void**) &result.g_num_one,
sizeof( unsigned int)) );
cutilSafeCall( cudaMemcpy( result.g_num_one, &zero, sizeof(unsigned int),
cudaMemcpyHostToDevice));
// number of (thread) blocks of intervals with multiple eigenvalues after
// the first iteration
cutilSafeCall( cudaMalloc( (void**) &result.g_num_blocks_mult,
sizeof(unsigned int)));
cutilSafeCall( cudaMemcpy( result.g_num_blocks_mult, &zero,
sizeof(unsigned int),
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_one, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_one, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_pos_one, mat_size_ui));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_mult, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_mult, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_count_mult,
mat_size_ui));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_count_mult,
mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_left_one, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_one, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_pos_one, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_left_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_left_count_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_count_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMalloc( (void**) &result.g_blocks_mult, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_blocks_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice ));
cutilSafeCall(cudaMalloc((void**) &result.g_blocks_mult_sum, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_blocks_mult_sum, tempui, mat_size_ui,
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_lambda_mult, mat_size_f));
cutilSafeCall( cudaMemcpy( result.g_lambda_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_pos_mult, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_pos_mult, tempf, mat_size_ui,
cudaMemcpyHostToDevice ));
}
////////////////////////////////////////////////////////////////////////////////
//! Cleanup result memory
//! @param result handles to memory
////////////////////////////////////////////////////////////////////////////////
void
cleanupResultDataLargeMatrix( ResultDataLarge& result) {
cutilSafeCall( cudaFree( result.g_num_one));
cutilSafeCall( cudaFree( result.g_num_blocks_mult));
cutilSafeCall( cudaFree( result.g_left_one));
cutilSafeCall( cudaFree( result.g_right_one));
cutilSafeCall( cudaFree( result.g_pos_one));
cutilSafeCall( cudaFree( result.g_left_mult));
cutilSafeCall( cudaFree( result.g_right_mult));
cutilSafeCall( cudaFree( result.g_left_count_mult));
cutilSafeCall( cudaFree( result.g_right_count_mult));
cutilSafeCall( cudaFree( result.g_blocks_mult));
cutilSafeCall( cudaFree( result.g_blocks_mult_sum));
cutilSafeCall( cudaFree( result.g_lambda_mult));
cutilSafeCall( cudaFree( result.g_pos_mult));
}
////////////////////////////////////////////////////////////////////////////////
//! Run the kernels to compute the eigenvalues for large matrices
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param precision desired precision of eigenvalues
//! @param lg lower limit of Gerschgorin interval
//! @param ug upper limit of Gerschgorin interval
//! @param iterations number of iterations (for timing)
////////////////////////////////////////////////////////////////////////////////
void
computeEigenvaluesLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size, const float precision,
const float lg, const float ug,
const unsigned int iterations )
{
dim3 blocks( 1, 1, 1);
dim3 threads( MAX_THREADS_BLOCK, 1, 1);
unsigned int timer_step1 = 0;
unsigned int timer_step2_one = 0;
unsigned int timer_step2_mult = 0;
unsigned int timer_total = 0;
cutilCheckError( cutCreateTimer( &timer_step1));
cutilCheckError( cutCreateTimer( &timer_step2_one));
cutilCheckError( cutCreateTimer( &timer_step2_mult));
cutilCheckError( cutCreateTimer( &timer_total));
cutilCheckError( cutStartTimer( timer_total));
// do for multiple iterations to improve timing accuracy
for( unsigned int iter = 0; iter < iterations; ++iter) {
cutilCheckError( cutStartTimer( timer_step1));
bisectKernelLarge<<< blocks, threads >>>
( input.g_a, input.g_b, mat_size,
lg, ug, 0, mat_size, precision,
result.g_num_one, result.g_num_blocks_mult,
result.g_left_one, result.g_right_one, result.g_pos_one,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_blocks_mult, result.g_blocks_mult_sum
);
cutilSafeCall( cudaThreadSynchronize());
cutilCheckError( cutStopTimer( timer_step1));
cutilCheckMsg( "Kernel launch failed.");
// get the number of intervals containing one eigenvalue after the first
// processing step
unsigned int num_one_intervals;
cutilSafeCall( cudaMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
cudaMemcpyDeviceToHost));
dim3 grid_onei;
grid_onei.x = getNumBlocksLinear( num_one_intervals, MAX_THREADS_BLOCK);
dim3 threads_onei;
// use always max number of available threads to better balance load times
// for matrix data
threads_onei.x = MAX_THREADS_BLOCK;
// compute eigenvalues for intervals that contained only one eigenvalue
// after the first processing step
cutilCheckError( cutStartTimer( timer_step2_one));
bisectKernelLarge_OneIntervals<<< grid_onei , threads_onei >>>
( input.g_a, input.g_b, mat_size, num_one_intervals,
result.g_left_one, result.g_right_one, result.g_pos_one,
precision
);
cutilSafeCall( cudaThreadSynchronize());
cutilCheckError( cutStopTimer( timer_step2_one));
// process intervals that contained more than one eigenvalue after
// the first processing step
// get the number of blocks of intervals that contain, in total when
// each interval contains only one eigenvalue, not more than
// MAX_THREADS_BLOCK threads
unsigned int num_blocks_mult = 0;
cutilSafeCall( cudaMemcpy( &num_blocks_mult, result.g_num_blocks_mult,
sizeof( unsigned int),
cudaMemcpyDeviceToHost));
// setup the execution environment
dim3 grid_mult( num_blocks_mult, 1, 1);
dim3 threads_mult( MAX_THREADS_BLOCK, 1, 1);
cutilCheckError( cutStartTimer( timer_step2_mult));
bisectKernelLarge_MultIntervals<<< grid_mult, threads_mult >>>
( input.g_a, input.g_b, mat_size,
result.g_blocks_mult, result.g_blocks_mult_sum,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_lambda_mult, result.g_pos_mult,
precision
);
cutilCheckError( cutStopTimer( timer_step2_mult));
cutilCheckMsg( "bisectKernelLarge_MultIntervals() FAILED.");
}
cutilCheckError( cutStopTimer( timer_total));
printf( "Average time step 1: %f ms\n",
cutGetTimerValue( timer_step1) / (float) iterations );
printf( "Average time step 2, one intervals: %f ms\n",
cutGetTimerValue( timer_step2_one) / (float) iterations );
printf( "Average time step 2, mult intervals: %f ms\n",
cutGetTimerValue( timer_step2_mult) / (float) iterations );
printf( "Average time TOTAL: %f ms\n",
cutGetTimerValue( timer_total) / (float) iterations );
cutilCheckError( cutDeleteTimer( timer_step1));
cutilCheckError( cutDeleteTimer( timer_step2_one));
cutilCheckError( cutDeleteTimer( timer_step2_mult));
cutilCheckError( cutDeleteTimer( timer_total));
}
////////////////////////////////////////////////////////////////////////////////
//! Process the result, that is obtain result from device and do simple sanity
//! checking
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param filename output filename
////////////////////////////////////////////////////////////////////////////////
void
processResultDataLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size,
const char* filename,
const unsigned int user_defined, char* exec_path)
{
const unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
const unsigned int mat_size_f = sizeof(float) * mat_size;
// copy data from intervals that contained more than one eigenvalue after
// the first processing step
float* lambda_mult = (float*) malloc( sizeof(float) * mat_size);
cutilSafeCall( cudaMemcpy( lambda_mult, result.g_lambda_mult,
sizeof(float) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int* pos_mult =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( cudaMemcpy( pos_mult, result.g_pos_mult,
sizeof(unsigned int) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int* blocks_mult_sum =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( cudaMemcpy( blocks_mult_sum, result.g_blocks_mult_sum,
sizeof( unsigned int) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int num_one_intervals;
cutilSafeCall( cudaMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
cudaMemcpyDeviceToHost));
unsigned int sum_blocks_mult = mat_size - num_one_intervals;
// copy data for intervals that contained one eigenvalue after the first
// processing step
float* left_one = (float*) malloc( mat_size_f);
float* right_one = (float*) malloc( mat_size_f);
unsigned int* pos_one = (unsigned int*) malloc( mat_size_ui);
cutilSafeCall( cudaMemcpy( left_one, result.g_left_one, mat_size_f,
cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( right_one, result.g_right_one, mat_size_f,
cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( pos_one, result.g_pos_one, mat_size_ui,
cudaMemcpyDeviceToHost) );
// extract eigenvalues
float* eigenvals = (float*) malloc( mat_size_f);
// singleton intervals generated in the second step
for( unsigned int i = 0; i < sum_blocks_mult; ++i) {
eigenvals[pos_mult[i] - 1] = lambda_mult[i];
}
// singleton intervals generated in the first step
unsigned int index = 0;
for( unsigned int i = 0; i < num_one_intervals; ++i, ++index) {
eigenvals[pos_one[i] - 1] = left_one[i];
}
if( 1 == user_defined) {
// store result
writeTridiagSymMatlab( filename, input.a, input.b+1, eigenvals, mat_size);
// cutilCheckError( cutWriteFilef( filename, eigenvals, mat_size, 0.0f));
}
else {
// compare with reference solution
float* reference = NULL;
unsigned int input_data_size = 0;
char* ref_path = cutFindFilePath( "reference.dat", exec_path);
cutilCondition( 0 != ref_path);
cutilCheckError( cutReadFilef( ref_path, &reference, &input_data_size));
cutilCondition( input_data_size == mat_size);
// there's an imprecision of Sturm count computation which makes an
// additional offset necessary
float tolerance = 1.0e-5f + 5.0e-6f;
if( CUTTrue == cutComparefe( reference, eigenvals, mat_size, tolerance)) {
printf( "\nPASSED.\n");
}
else {
printf( "FAILED.\n");
}
cutFree( ref_path);
cutFree( reference);
}
freePtr( eigenvals);
freePtr( lambda_mult);
freePtr( pos_mult);
freePtr( blocks_mult_sum);
freePtr( left_one);
freePtr( right_one);
freePtr( pos_one);
}
|
9b2cb851310f653aed7fea6cebd4cf181248d32f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph_contracting_visitor.hxx>
namespace nvgraph
{
//------------------------- Graph Contraction: ----------------------
//
CsrGraph<int>* contract_graph_csr_max(CsrGraph<int>& graph,
int* pV, size_t n,
hipStream_t stream,
const int& VCombine,
const int& VReduce,
const int& ECombine,
const int& EReduce)
{
return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Max, double>::FctrType >(graph, pV, n, stream,
static_cast<SemiRingFunctorTypes>(VCombine),
static_cast<SemiRingFunctorTypes>(VReduce),
static_cast<SemiRingFunctorTypes>(ECombine),
static_cast<SemiRingFunctorTypes>(EReduce));
}
}
| 9b2cb851310f653aed7fea6cebd4cf181248d32f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph_contracting_visitor.hxx>
namespace nvgraph
{
//------------------------- Graph Contraction: ----------------------
//
CsrGraph<int>* contract_graph_csr_max(CsrGraph<int>& graph,
int* pV, size_t n,
cudaStream_t stream,
const int& VCombine,
const int& VReduce,
const int& ECombine,
const int& EReduce)
{
return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Max, double>::FctrType >(graph, pV, n, stream,
static_cast<SemiRingFunctorTypes>(VCombine),
static_cast<SemiRingFunctorTypes>(VReduce),
static_cast<SemiRingFunctorTypes>(ECombine),
static_cast<SemiRingFunctorTypes>(EReduce));
}
}
|
3f6210bf532628f55bbd1d75c411b6313c3a1c3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int diverge_gpu(float c_re, float c_im, int max) {
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < max; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(int *c, float lowerX, float lowerY, float stepX, float stepY, int resX, int resY, int maxIterations) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int thisX = index % resX;
int thisY = index / resX;
//int *partc = (int *) ((char *) c + thisX * pitch);
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
c[index] = diverge_gpu(x, y, maxIterations);
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int size = resX * resY;
int *h_c;
int *d_c;
size_t pitch;
//h_c = (int *)malloc(size * sizeof(int));
hipHostMalloc(&h_c, sizeof(int)*size, hipHostMallocMapped);
hipHostGetDevicePointer(&d_c, h_c, 0);
//hipMalloc(&d_c, size * sizeof(int));
hipMallocPitch((void **)&d_c, &pitch, sizeof(int)*resX, resY);
//hipMemcpy2D(d_c, pitch, h_c, sizeof(int)*resX, sizeof(int)*resX, resY, hipMemcpyHostToDevice);
int block_size = 1024;
//int block_size = 32;
//int grid_size = 1;
//dim3 dimBlock(block_size, block_size);
//dim3 dimGrid(1, 1);
hipLaunchKernelGGL(( mandelKernel), dim3(size/block_size), dim3(block_size), 0, 0, d_c, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
hipDeviceSynchronize();
//hipMemcpy2D(h_c, sizeof(int)*resX, d_c, pitch, sizeof(int)*resX, resY, hipMemcpyDeviceToHost);
hipMemcpy(h_c, d_c, size*sizeof(int), hipMemcpyDeviceToHost);
memcpy(img, h_c, size * sizeof(int));
hipFree(d_c);
//free(h_c);
hipHostFree(h_c);
}
| 3f6210bf532628f55bbd1d75c411b6313c3a1c3e.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int diverge_gpu(float c_re, float c_im, int max) {
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < max; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(int *c, float lowerX, float lowerY, float stepX, float stepY, int resX, int resY, int maxIterations) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int thisX = index % resX;
int thisY = index / resX;
//int *partc = (int *) ((char *) c + thisX * pitch);
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
c[index] = diverge_gpu(x, y, maxIterations);
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int size = resX * resY;
int *h_c;
int *d_c;
size_t pitch;
//h_c = (int *)malloc(size * sizeof(int));
cudaHostAlloc(&h_c, sizeof(int)*size, cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_c, h_c, 0);
//cudaMalloc(&d_c, size * sizeof(int));
cudaMallocPitch((void **)&d_c, &pitch, sizeof(int)*resX, resY);
//cudaMemcpy2D(d_c, pitch, h_c, sizeof(int)*resX, sizeof(int)*resX, resY, cudaMemcpyHostToDevice);
int block_size = 1024;
//int block_size = 32;
//int grid_size = 1;
//dim3 dimBlock(block_size, block_size);
//dim3 dimGrid(1, 1);
mandelKernel<<<size/block_size, block_size>>>(d_c, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
cudaDeviceSynchronize();
//cudaMemcpy2D(h_c, sizeof(int)*resX, d_c, pitch, sizeof(int)*resX, resY, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c, d_c, size*sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img, h_c, size * sizeof(int));
cudaFree(d_c);
//free(h_c);
cudaFreeHost(h_c);
}
|
b1dece6842fbf78560cd7b7903b592c7325347eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <map>
#include <vector>
#ifdef USE_ROCM
#include "thrust/functional.h"
#include "thrust/sort.h"
#endif //USE_ROCM
#include "caffe/common.hpp"
#include "caffe/device.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
#ifdef USE_ROCM
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
#endif //USE_ROCM
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
std::string kernel_name{};
switch(code_type) {
case PriorBoxParameter_CodeType_CORNER:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCORNER");
break;
case PriorBoxParameter_CodeType_CENTER_SIZE:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCENTER_SIZE");
break;
case PriorBoxParameter_CodeType_CORNER_SIZE:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCORNER_SIZE");
break;
default:
break;
}
viennacl::ocl::kernel &oclk_debbox = program.get_kernel(kernel_name);
viennacl::ocl::enqueue(
oclk_debbox(nthreads,
WrapHandle((cl_mem)loc_data, &ctx),
WrapHandle((cl_mem)prior_data, &ctx),
(int)variance_encoded_in_target,
num_priors,
(int)share_location,
num_loc_classes,
background_label_id,
(int)clip_bbox,
WrapHandle((cl_mem)bbox_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void DecodeBBoxesGPU(const int nthreads,
const half* loc_data, const half* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, half* bbox_data);
#endif
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
#ifdef USE_ROCM
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
#endif //USE_ROCM
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
viennacl::ocl::kernel &oclk_permute_data =
program.get_kernel(CL_KERNEL_SELECT("PermuteData"));
viennacl::ocl::enqueue(
oclk_permute_data(nthreads,
WrapHandle((cl_mem)data, &ctx),
num_classes,
num_data,
num_dim,
WrapHandle((cl_mem)new_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void PermuteDataGPU(const int nthreads,
const half* data, const int num_classes, const int num_data,
const int num_dim, half* new_data);
#endif
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
#ifdef USE_ROCM
template <typename Dtype>
__global__ void PermuteData24Kernel(const int nthreads,
const Dtype* data, const int num_channels, const int num_height,
const int num_width, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % num_channels;
const int w = (index / num_channels) % num_width;
const int h = (index / num_channels / num_width) % num_height;
const int n = index / num_width / num_height / num_channels;
const int new_index = ((n * num_channels + c) * num_height + h) * num_width + w;
new_data[index] = data[new_index];
}
}
#endif //USE_ROCM
template <typename Dtype>
void PermuteData24GPU(const int nthreads,
const Dtype* data, const int num_channels, const int num_height,
const int num_width, Dtype* new_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteData24Kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_channels, num_height,
num_width, new_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
viennacl::ocl::kernel &oclk_permute_data =
program.get_kernel(CL_KERNEL_SELECT("PermuteData24"));
viennacl::ocl::enqueue(
oclk_permute_data(nthreads,
WrapHandle((cl_mem)data, &ctx),
num_channels,
num_height,
num_width,
WrapHandle((cl_mem)new_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void PermuteData24GPU(const int nthreads,
const half* data, const int num_classes, const int num_data,
const int num_dim, half* new_data);
#endif
template void PermuteData24GPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteData24GPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
#ifdef USE_ROCM
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
#endif //USE_ROCM
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
scale_data, prob);
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
#ifdef USE_ROCM
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
#endif //USE_ROCM
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
#ifdef USE_ROCM
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
#endif //USE_ROCM
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_ROCM
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
#ifdef USE_ROCM
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
#endif
#ifdef USE_ROCM
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
#endif
#ifdef USE_ROCM
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
#endif //USE_ROCM
} // namespace caffe
| b1dece6842fbf78560cd7b7903b592c7325347eb.cu | #include <algorithm>
#include <functional>
#include <map>
#include <vector>
#ifdef USE_CUDA
#include "thrust/functional.h"
#include "thrust/sort.h"
#endif //USE_CUDA
#include "caffe/common.hpp"
#include "caffe/device.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
#ifdef USE_CUDA
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
#endif //USE_CUDA
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
std::string kernel_name{};
switch(code_type) {
case PriorBoxParameter_CodeType_CORNER:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCORNER");
break;
case PriorBoxParameter_CodeType_CENTER_SIZE:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCENTER_SIZE");
break;
case PriorBoxParameter_CodeType_CORNER_SIZE:
kernel_name = CL_KERNEL_SELECT("DecodeBBoxesCORNER_SIZE");
break;
default:
break;
}
viennacl::ocl::kernel &oclk_debbox = program.get_kernel(kernel_name);
viennacl::ocl::enqueue(
oclk_debbox(nthreads,
WrapHandle((cl_mem)loc_data, &ctx),
WrapHandle((cl_mem)prior_data, &ctx),
(int)variance_encoded_in_target,
num_priors,
(int)share_location,
num_loc_classes,
background_label_id,
(int)clip_bbox,
WrapHandle((cl_mem)bbox_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void DecodeBBoxesGPU(const int nthreads,
const half* loc_data, const half* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, half* bbox_data);
#endif
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
#ifdef USE_CUDA
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
#endif //USE_CUDA
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
viennacl::ocl::kernel &oclk_permute_data =
program.get_kernel(CL_KERNEL_SELECT("PermuteData"));
viennacl::ocl::enqueue(
oclk_permute_data(nthreads,
WrapHandle((cl_mem)data, &ctx),
num_classes,
num_data,
num_dim,
WrapHandle((cl_mem)new_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void PermuteDataGPU(const int nthreads,
const half* data, const int num_classes, const int num_data,
const int num_dim, half* new_data);
#endif
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
#ifdef USE_CUDA
template <typename Dtype>
__global__ void PermuteData24Kernel(const int nthreads,
const Dtype* data, const int num_channels, const int num_height,
const int num_width, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % num_channels;
const int w = (index / num_channels) % num_width;
const int h = (index / num_channels / num_width) % num_height;
const int n = index / num_width / num_height / num_channels;
const int new_index = ((n * num_channels + c) * num_height + h) * num_width + w;
new_data[index] = data[new_index];
}
}
#endif //USE_CUDA
template <typename Dtype>
void PermuteData24GPU(const int nthreads,
const Dtype* data, const int num_channels, const int num_height,
const int num_width, Dtype* new_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteData24Kernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_channels, num_height,
num_width, new_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
Caffe::GetDefaultDevice()->id());
viennacl::ocl::program &program = Caffe::GetDefaultDevice()->program();
viennacl::ocl::kernel &oclk_permute_data =
program.get_kernel(CL_KERNEL_SELECT("PermuteData24"));
viennacl::ocl::enqueue(
oclk_permute_data(nthreads,
WrapHandle((cl_mem)data, &ctx),
num_channels,
num_height,
num_width,
WrapHandle((cl_mem)new_data, &ctx)),
ctx.get_queue());
#endif //USE_GREENTEA
}
}
#ifdef HAS_HALF_SUPPORT
template void PermuteData24GPU(const int nthreads,
const half* data, const int num_classes, const int num_data,
const int num_dim, half* new_data);
#endif
template void PermuteData24GPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteData24GPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
#ifdef USE_CUDA
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
#endif //USE_CUDA
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
scale_data, prob);
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
#ifdef USE_CUDA
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
#endif //USE_CUDA
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
#ifdef USE_CUDA
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
#endif //USE_CUDA
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
if (Caffe::GetDefaultDevice()->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
#endif //USE_CUDA
} else {
#ifdef USE_GREENTEA
NOT_IMPLEMENTED;
#endif //USE_GREENTEA
}
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
#ifdef USE_CUDA
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
#endif
#ifdef USE_CUDA
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det),
CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
#endif
#ifdef USE_CUDA
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeConfLossKernel<Dtype><<<CAFFE_GET_BLOCKS(num_threads),
CAFFE_CUDA_NUM_THREADS>>>(num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
#endif //USE_CUDA
} // namespace caffe
|
d796bd5898be79463bcf3c3cef5b0b3293a5fa17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergeidr.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| d796bd5898be79463bcf3c3cef5b0b3293a5fa17.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergeidr.cu normal z -> s, Tue Feb 9 16:05:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
cd4060d84b364cc2042809165ac5063a3071e407.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hoomd/ParticleData.cuh>
#include <hoomd/HOOMDMath.h>
extern __shared__ unsigned int coords[];
__global__ void gpu_update_grid_kernel(unsigned int num_elements,
unsigned int *lengths,
unsigned int dim,
Scalar *current_val,
Scalar *grid,
Scalar *cv_min,
Scalar *cv_max,
Scalar *cv_sigma_inv,
Scalar scal,
Scalar W,
Scalar T)
{
unsigned int grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx >= num_elements) return;
// obtain d-dimensional coordinates
unsigned int factor = 1;
for (int j = 1; j < dim; j++)
factor *= lengths[j-1];
unsigned int rest = grid_idx;
for (int i = dim-1; i >= 0; i--)
{
unsigned int c = rest/factor;
coords[i+dim*threadIdx.x] = c;
rest -= c*factor;
if (i >0) factor /= lengths[i-1];
}
Scalar gauss_exp = Scalar(0.0);
// evaluate Gaussian on grid point
for (unsigned int cv_i = 0; cv_i < dim; ++cv_i)
{
Scalar min_i = cv_min[cv_i];
Scalar max_i = cv_max[cv_i];
Scalar delta_i = (max_i - min_i)/(Scalar)(lengths[cv_i] - 1);
Scalar val_i = min_i + (Scalar)coords[cv_i+dim*threadIdx.x]*delta_i;
Scalar d_i = val_i - current_val[cv_i];
for (unsigned int cv_j = 0; cv_j < dim; ++cv_j)
{
Scalar min_j = cv_min[cv_j];
Scalar max_j = cv_max[cv_j];
Scalar delta_j = (max_j - min_j)/(Scalar)(lengths[cv_j] - 1);
Scalar val_j = min_j + (Scalar)coords[cv_j+dim*threadIdx.x]*delta_j;
Scalar d_j = val_j - current_val[cv_j];
Scalar sigma_inv_ij = cv_sigma_inv[cv_i*dim+cv_j];
gauss_exp -= d_i*d_j*Scalar(1.0/2.0)*(sigma_inv_ij*sigma_inv_ij);
}
}
Scalar gauss = expf(gauss_exp);
// add Gaussian to grid
grid[grid_idx] += W*scal*gauss;
}
hipError_t gpu_update_grid(unsigned int num_elements,
unsigned int *d_lengths,
unsigned int dim,
Scalar *d_current_val,
Scalar *d_grid,
Scalar *d_cv_min,
Scalar *d_cv_max,
Scalar *d_cv_sigma_inv,
Scalar scal,
Scalar W,
Scalar T)
{
unsigned int block_size = 512;
unsigned int smem_size = dim*sizeof(unsigned int)*block_size;
hipLaunchKernelGGL(( gpu_update_grid_kernel), dim3(num_elements/block_size+1), dim3(block_size), smem_size, 0, num_elements,
d_lengths,
dim,
d_current_val,
d_grid,
d_cv_min,
d_cv_max,
d_cv_sigma_inv,
scal,
W,
T);
return hipSuccess;
}
__global__ void gpu_update_histograms_kernel(Scalar val,
Scalar cv_min,
Scalar delta,
unsigned int num_points,
Scalar sigma,
bool state,
Scalar *histogram,
Scalar *histogram_plus)
{
unsigned int grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx >= num_points) return;
Scalar norm = Scalar(1.0)/sqrtf(Scalar(2.0*M_PI)*sigma*sigma);
Scalar val_grid = cv_min + grid_idx*delta;
Scalar d = val - val_grid;
Scalar gauss_exp = d*d/Scalar(2.0)/sigma/sigma;
Scalar gauss = norm*expf(-gauss_exp);
// add Gaussian to grid
histogram[grid_idx] += gauss;
if (state == true)
histogram_plus[grid_idx] += gauss;
}
hipError_t gpu_update_histograms(Scalar val,
Scalar cv_min,
Scalar delta,
unsigned int num_points,
Scalar sigma,
bool state,
Scalar *d_histogram,
Scalar *d_histogram_plus)
{
unsigned int block_size = 512;
hipLaunchKernelGGL(( gpu_update_histograms_kernel), dim3(num_points/block_size+1), dim3(block_size), 0, 0, val,
cv_min,
delta,
num_points,
sigma,
state,
d_histogram,
d_histogram_plus);
return hipSuccess;
}
| cd4060d84b364cc2042809165ac5063a3071e407.cu | #include <hoomd/ParticleData.cuh>
#include <hoomd/HOOMDMath.h>
extern __shared__ unsigned int coords[];
__global__ void gpu_update_grid_kernel(unsigned int num_elements,
unsigned int *lengths,
unsigned int dim,
Scalar *current_val,
Scalar *grid,
Scalar *cv_min,
Scalar *cv_max,
Scalar *cv_sigma_inv,
Scalar scal,
Scalar W,
Scalar T)
{
unsigned int grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx >= num_elements) return;
// obtain d-dimensional coordinates
unsigned int factor = 1;
for (int j = 1; j < dim; j++)
factor *= lengths[j-1];
unsigned int rest = grid_idx;
for (int i = dim-1; i >= 0; i--)
{
unsigned int c = rest/factor;
coords[i+dim*threadIdx.x] = c;
rest -= c*factor;
if (i >0) factor /= lengths[i-1];
}
Scalar gauss_exp = Scalar(0.0);
// evaluate Gaussian on grid point
for (unsigned int cv_i = 0; cv_i < dim; ++cv_i)
{
Scalar min_i = cv_min[cv_i];
Scalar max_i = cv_max[cv_i];
Scalar delta_i = (max_i - min_i)/(Scalar)(lengths[cv_i] - 1);
Scalar val_i = min_i + (Scalar)coords[cv_i+dim*threadIdx.x]*delta_i;
Scalar d_i = val_i - current_val[cv_i];
for (unsigned int cv_j = 0; cv_j < dim; ++cv_j)
{
Scalar min_j = cv_min[cv_j];
Scalar max_j = cv_max[cv_j];
Scalar delta_j = (max_j - min_j)/(Scalar)(lengths[cv_j] - 1);
Scalar val_j = min_j + (Scalar)coords[cv_j+dim*threadIdx.x]*delta_j;
Scalar d_j = val_j - current_val[cv_j];
Scalar sigma_inv_ij = cv_sigma_inv[cv_i*dim+cv_j];
gauss_exp -= d_i*d_j*Scalar(1.0/2.0)*(sigma_inv_ij*sigma_inv_ij);
}
}
Scalar gauss = expf(gauss_exp);
// add Gaussian to grid
grid[grid_idx] += W*scal*gauss;
}
cudaError_t gpu_update_grid(unsigned int num_elements,
unsigned int *d_lengths,
unsigned int dim,
Scalar *d_current_val,
Scalar *d_grid,
Scalar *d_cv_min,
Scalar *d_cv_max,
Scalar *d_cv_sigma_inv,
Scalar scal,
Scalar W,
Scalar T)
{
unsigned int block_size = 512;
unsigned int smem_size = dim*sizeof(unsigned int)*block_size;
gpu_update_grid_kernel<<<num_elements/block_size+1, block_size, smem_size>>>(num_elements,
d_lengths,
dim,
d_current_val,
d_grid,
d_cv_min,
d_cv_max,
d_cv_sigma_inv,
scal,
W,
T);
return cudaSuccess;
}
__global__ void gpu_update_histograms_kernel(Scalar val,
Scalar cv_min,
Scalar delta,
unsigned int num_points,
Scalar sigma,
bool state,
Scalar *histogram,
Scalar *histogram_plus)
{
unsigned int grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx >= num_points) return;
Scalar norm = Scalar(1.0)/sqrtf(Scalar(2.0*M_PI)*sigma*sigma);
Scalar val_grid = cv_min + grid_idx*delta;
Scalar d = val - val_grid;
Scalar gauss_exp = d*d/Scalar(2.0)/sigma/sigma;
Scalar gauss = norm*expf(-gauss_exp);
// add Gaussian to grid
histogram[grid_idx] += gauss;
if (state == true)
histogram_plus[grid_idx] += gauss;
}
cudaError_t gpu_update_histograms(Scalar val,
Scalar cv_min,
Scalar delta,
unsigned int num_points,
Scalar sigma,
bool state,
Scalar *d_histogram,
Scalar *d_histogram_plus)
{
unsigned int block_size = 512;
gpu_update_histograms_kernel<<<num_points/block_size+1, block_size>>>(val,
cv_min,
delta,
num_points,
sigma,
state,
d_histogram,
d_histogram_plus);
return cudaSuccess;
}
|
37bdd7860090dafa1c796587b837292f79611863.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += ConstArray2[(tid)%THREADS_PER_BLOCK]* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipDeviceSynchronize();
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | 37bdd7860090dafa1c796587b837292f79611863.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += ConstArray2[(tid)%THREADS_PER_BLOCK]* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaThreadSynchronize();
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} |
c99adb1764577c92e632303f22111aedc63464dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
//#define SHARED_SIZE_LIMIT 1024
#define NUM_THREADS 1024
#define NUM_BLOCKS 32768
#define NUM_VALS NUM_THREADS*NUM_BLOCKS
#define SHARED_SIZE_LIMIT 1024
int random_float() {
return (int)rand()/(int)RAND_MAX;
}
void array_print(int *arr, int length) {
int i;
for (i = 0; i < length; ++i) {
printf("%d ", arr[i]);
}
printf("\n");
}
void array_fill(int *v) {
int i;
for (i = 0; i < NUM_VALS; i++) {
v[i] = rand();
}
}
void test (int *v) {
int i;
int val = v[0];
for (i = 1; i < NUM_VALS; ++i) {
if (val < v[i]) {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
printf("TEST FAIL\n\n");
return;
} else {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
val = v[i];
}
}
printf("TEST OK\n\n");
}
/*
void array_fill(int *arr, int length) {
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = length-i;//random_float();
}
}*/
void array_copy(int *dst, int *src, int length) {
int i;
for (i=0; i<length; ++i) {
dst[i] = src[i];
}
}
//Comparamos dos elementos y en caso de ser decrecientes, los swapeamos.
__device__ inline void comparator(int &A, int &B, uint dir) {
int temp;
if ((A <= B) == dir) {
temp = A;
A = B;
B = temp;
}
}
/*La cosa en este bitonicsort es que compartimos memoria.
Asues pese que la idea principal es la misma, nosotros lo que hacemos es
comparaciones entre elementos de las distintas memorias para hacer que,
finalmente el vector termine ordenado.
*/
__global__ void bitonicSortShared(int *dev_values)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
__shared__ int sh_values[SHARED_SIZE_LIMIT];
sh_values[tx] = dev_values[index];
sh_values[tx + (SHARED_SIZE_LIMIT/2)] = dev_values[index + (SHARED_SIZE_LIMIT/2)];
for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1) {
uint ddd = (tx & (size / 2)) == 0;//direction: ascending or descending
for (uint stride = size/2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * tx - (tx & (stride - 1));
comparator(sh_values[pos], sh_values[pos + stride], ddd);
}
}
uint ddd = ((bx&1) == 0); // uint ddd = ((bx&1)==0);
{
for (uint stride = SHARED_SIZE_LIMIT/2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * tx - (tx & (stride - 1));
comparator(sh_values[pos + 0], sh_values[pos + stride], ddd);
}
}
__syncthreads();
dev_values[index] = sh_values[tx];
dev_values[index+(SHARED_SIZE_LIMIT/2)] = sh_values[tx+(SHARED_SIZE_LIMIT/2)];
}
void bitonic_sort(int *values)
{
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
hipMalloc((void**) &dev_values, size);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
hipDeviceSynchronize();
uint blockCount = NUM_VALS / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
printf("blockCount=%d, threadCount=%d, SHARED_SIZE_LIMIT=%d\n", blockCount, threadCount, SHARED_SIZE_LIMIT);
hipLaunchKernelGGL(( bitonicSortShared), dim3(blockCount), dim3(threadCount), 0, 0, dev_values);
hipDeviceSynchronize();
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
}
int main(void)
{
//int *values = (int*) malloc( NUM_VALS * sizeof(int));
//int *ref = (int*) malloc( NUM_VALS * sizeof(int));
int *host_values;
hipHostMalloc( &host_values, NUM_VALS * sizeof(int));
// hipHostMalloc( &original_values, numBytes);
float TiempoKernel;
hipEvent_t E1, E2;
hipEventCreate(&E1);
hipEventCreate(&E2);
array_fill(host_values);
hipEventRecord(E1, 0);
hipEventSynchronize(E1);
hipFuncSetCacheConfig(bitonicSortShared, hipFuncCachePreferL1);
bitonic_sort(host_values);
test(host_values);
hipEventRecord(E2, 0);
hipEventSynchronize(E2);
hipEventElapsedTime(&TiempoKernel, E1, E2);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
hipFree(host_values);
}
| c99adb1764577c92e632303f22111aedc63464dd.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
//#define SHARED_SIZE_LIMIT 1024
#define NUM_THREADS 1024
#define NUM_BLOCKS 32768
#define NUM_VALS NUM_THREADS*NUM_BLOCKS
#define SHARED_SIZE_LIMIT 1024
int random_float() {
return (int)rand()/(int)RAND_MAX;
}
void array_print(int *arr, int length) {
int i;
for (i = 0; i < length; ++i) {
printf("%d ", arr[i]);
}
printf("\n");
}
void array_fill(int *v) {
int i;
for (i = 0; i < NUM_VALS; i++) {
v[i] = rand();
}
}
void test (int *v) {
int i;
int val = v[0];
for (i = 1; i < NUM_VALS; ++i) {
if (val < v[i]) {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
printf("TEST FAIL\n\n");
return;
} else {
printf("val: %d, v[%d]: %d.\n", val, i, v[i]);
val = v[i];
}
}
printf("TEST OK\n\n");
}
/*
void array_fill(int *arr, int length) {
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = length-i;//random_float();
}
}*/
void array_copy(int *dst, int *src, int length) {
int i;
for (i=0; i<length; ++i) {
dst[i] = src[i];
}
}
//Comparamos dos elementos y en caso de ser decrecientes, los swapeamos.
__device__ inline void comparator(int &A, int &B, uint dir) {
int temp;
if ((A <= B) == dir) {
temp = A;
A = B;
B = temp;
}
}
/*La cosa en este bitonicsort es que compartimos memoria.
Asíues pese que la idea principal es la misma, nosotros lo que hacemos es
comparaciones entre elementos de las distintas memorias para hacer que,
finalmente el vector termine ordenado.
*/
__global__ void bitonicSortShared(int *dev_values)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
__shared__ int sh_values[SHARED_SIZE_LIMIT];
sh_values[tx] = dev_values[index];
sh_values[tx + (SHARED_SIZE_LIMIT/2)] = dev_values[index + (SHARED_SIZE_LIMIT/2)];
for (uint size = 2; size < SHARED_SIZE_LIMIT; size <<= 1) {
uint ddd = (tx & (size / 2)) == 0;//direction: ascending or descending
for (uint stride = size/2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * tx - (tx & (stride - 1));
comparator(sh_values[pos], sh_values[pos + stride], ddd);
}
}
uint ddd = ((bx&1) == 0); // uint ddd = ((bx&1)==0);
{
for (uint stride = SHARED_SIZE_LIMIT/2; stride > 0; stride >>= 1) {
__syncthreads();
uint pos = 2 * tx - (tx & (stride - 1));
comparator(sh_values[pos + 0], sh_values[pos + stride], ddd);
}
}
__syncthreads();
dev_values[index] = sh_values[tx];
dev_values[index+(SHARED_SIZE_LIMIT/2)] = sh_values[tx+(SHARED_SIZE_LIMIT/2)];
}
void bitonic_sort(int *values)
{
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**) &dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
cudaDeviceSynchronize();
uint blockCount = NUM_VALS / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
printf("blockCount=%d, threadCount=%d, SHARED_SIZE_LIMIT=%d\n", blockCount, threadCount, SHARED_SIZE_LIMIT);
bitonicSortShared<<<blockCount, threadCount>>>(dev_values);
cudaDeviceSynchronize();
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
int main(void)
{
//int *values = (int*) malloc( NUM_VALS * sizeof(int));
//int *ref = (int*) malloc( NUM_VALS * sizeof(int));
int *host_values;
cudaMallocHost( &host_values, NUM_VALS * sizeof(int));
// cudaMallocHost( &original_values, numBytes);
float TiempoKernel;
cudaEvent_t E1, E2;
cudaEventCreate(&E1);
cudaEventCreate(&E2);
array_fill(host_values);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
cudaFuncSetCacheConfig(bitonicSortShared, cudaFuncCachePreferL1);
bitonic_sort(host_values);
test(host_values);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
cudaFree(host_values);
}
|
8ea5adc23d593e08fea754336079b1ef6386079c.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "cuda_fp16.hpp"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void OutPutHalfMemory(half* data, int iSize, char* txt)
{
float* fnewData = cuda_make_array(0, iSize);
cuda_convert_f16_to_f32(data, iSize, fnewData);
OutPutGPUMemory(fnewData, iSize, txt);
}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(hipPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
hipMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), hipMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
OutPutGPUMemory(tempBuffer, l.outputs, 0);
exit(0);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
hipMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),hipMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
OutPutHalfMemory((half *)output, l.outputs, "memory.txt");
exit(0);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
if (l.binary || l.xnor) swap_binary(&l);
} | 8ea5adc23d593e08fea754336079b1ef6386079c.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda_fp16.hpp"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void OutPutHalfMemory(half* data, int iSize, char* txt)
{
float* fnewData = cuda_make_array(0, iSize);
cuda_convert_f16_to_f32(data, iSize, fnewData);
OutPutGPUMemory(fnewData, iSize, txt);
}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(cudaPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
cudaMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), cudaMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
OutPutGPUMemory(tempBuffer, l.outputs, 0);
exit(0);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
cudaMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),cudaMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
OutPutHalfMemory((half *)output, l.outputs, "memory.txt");
exit(0);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
if (l.binary || l.xnor) swap_binary(&l);
} |
e3e2fe49dee10c67de91fd30981f4c8741ff2cc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
namespace Saiga {
namespace CUDA {
__global__ static
void addFive(float *g_idata, float *g_odata)
{
g_odata[threadIdx.x] = g_idata[threadIdx.x]+5;
}
void testCuda(){
CUDA_SYNC_CHECK_ERROR();
unsigned int num_threads = 32;
unsigned int mem_size = sizeof(float) * num_threads;
// allocate host memory
float *h_idata = (float *) malloc(mem_size);
// initalize the memory
for (unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (float) i;
}
// allocate device memory
float *d_idata;
hipMalloc((void **) &d_idata, mem_size);
CUDA_SYNC_CHECK_ERROR();
// copy host memory to device
hipMemcpy(d_idata, h_idata, mem_size,hipMemcpyHostToDevice);
CUDA_SYNC_CHECK_ERROR();
// allocate device memory for result
float *d_odata;
hipMalloc((void **) &d_odata, mem_size);
CUDA_SYNC_CHECK_ERROR();
// execute the kernel
hipLaunchKernelGGL(( addFive), dim3(1), dim3(num_threads) , 0, 0, d_idata, d_odata);
CUDA_SYNC_CHECK_ERROR();
// allocate mem for the result on host side
float *h_odata = (float *) malloc(mem_size);
// copy result from device to host
hipMemcpy(h_odata, d_odata, sizeof(float) * num_threads,hipMemcpyDeviceToHost);
CUDA_SYNC_CHECK_ERROR();
bool result = true;
for (unsigned int i = 0; i < num_threads; ++i)
{
if(h_odata[i]!=i+5)
result = false;
}
if(result){
std::cout<<"CUDA test: SUCCESS!"<<std::endl;
}else{
std::cout<<"CUDA test: FAILED!"<<std::endl;
SAIGA_ASSERT(0);
}
// cleanup memory
free(h_idata);
free(h_odata);
hipFree(d_idata);
hipFree(d_odata);
CUDA_SYNC_CHECK_ERROR();
}
struct MySortStruct{
int value;
float key;
__host__ __device__ MySortStruct(){}
__host__ __device__ MySortStruct(int v, float k):value(v),key(k){}
};
__host__ __device__ bool operator<(const MySortStruct& a, const MySortStruct& b){
return a.key < b.key;
}
__host__ __device__ bool operator==(const MySortStruct& a, const MySortStruct& b){
return a.key == b.key && a.value == b.value;
}
struct ReduceMySortStructOp{
__host__ __device__ MySortStruct operator()(const MySortStruct& a, const MySortStruct& b){
MySortStruct res;
res.key = a.key + b.key;
res.value = a.value + b.value;
return res;
}
};
void testThrust(){
CUDA_SYNC_CHECK_ERROR();
{
// simple sort test
thrust::host_vector<int> H(4);
H[0] = 38;
H[1] = 20;
H[2] = 42;
H[3] = 5;
thrust::device_vector<int> D = H;
thrust::sort(H.begin(),H.end());
thrust::sort(D.begin(),D.end());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(H==D);
}
{
//sort of custom struct test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
thrust::sort(H.begin(),H.end());
thrust::sort(D.begin(),D.end());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(H==D);
}
{
//find maximum test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
auto max = thrust::max_element(D.begin(),D.end());
MySortStruct maxel = *max;
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(maxel.key == 573.0f);
}
{
//reduce test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
auto sum = thrust::reduce(D.begin(),D.end(),MySortStruct(0,0),ReduceMySortStructOp());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(sum.value == 10 && sum.key == -358.0f);
}
std::cout<<"Thrust test: SUCCESS!"<<std::endl;
CUDA_SYNC_CHECK_ERROR();
}
}
}
| e3e2fe49dee10c67de91fd30981f4c8741ff2cc5.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
namespace Saiga {
namespace CUDA {
__global__ static
void addFive(float *g_idata, float *g_odata)
{
g_odata[threadIdx.x] = g_idata[threadIdx.x]+5;
}
void testCuda(){
CUDA_SYNC_CHECK_ERROR();
unsigned int num_threads = 32;
unsigned int mem_size = sizeof(float) * num_threads;
// allocate host memory
float *h_idata = (float *) malloc(mem_size);
// initalize the memory
for (unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i] = (float) i;
}
// allocate device memory
float *d_idata;
cudaMalloc((void **) &d_idata, mem_size);
CUDA_SYNC_CHECK_ERROR();
// copy host memory to device
cudaMemcpy(d_idata, h_idata, mem_size,cudaMemcpyHostToDevice);
CUDA_SYNC_CHECK_ERROR();
// allocate device memory for result
float *d_odata;
cudaMalloc((void **) &d_odata, mem_size);
CUDA_SYNC_CHECK_ERROR();
// execute the kernel
addFive<<< 1, num_threads >>>(d_idata, d_odata);
CUDA_SYNC_CHECK_ERROR();
// allocate mem for the result on host side
float *h_odata = (float *) malloc(mem_size);
// copy result from device to host
cudaMemcpy(h_odata, d_odata, sizeof(float) * num_threads,cudaMemcpyDeviceToHost);
CUDA_SYNC_CHECK_ERROR();
bool result = true;
for (unsigned int i = 0; i < num_threads; ++i)
{
if(h_odata[i]!=i+5)
result = false;
}
if(result){
std::cout<<"CUDA test: SUCCESS!"<<std::endl;
}else{
std::cout<<"CUDA test: FAILED!"<<std::endl;
SAIGA_ASSERT(0);
}
// cleanup memory
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
CUDA_SYNC_CHECK_ERROR();
}
struct MySortStruct{
int value;
float key;
__host__ __device__ MySortStruct(){}
__host__ __device__ MySortStruct(int v, float k):value(v),key(k){}
};
__host__ __device__ bool operator<(const MySortStruct& a, const MySortStruct& b){
return a.key < b.key;
}
__host__ __device__ bool operator==(const MySortStruct& a, const MySortStruct& b){
return a.key == b.key && a.value == b.value;
}
struct ReduceMySortStructOp{
__host__ __device__ MySortStruct operator()(const MySortStruct& a, const MySortStruct& b){
MySortStruct res;
res.key = a.key + b.key;
res.value = a.value + b.value;
return res;
}
};
void testThrust(){
CUDA_SYNC_CHECK_ERROR();
{
// simple sort test
thrust::host_vector<int> H(4);
H[0] = 38;
H[1] = 20;
H[2] = 42;
H[3] = 5;
thrust::device_vector<int> D = H;
thrust::sort(H.begin(),H.end());
thrust::sort(D.begin(),D.end());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(H==D);
}
{
//sort of custom struct test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
thrust::sort(H.begin(),H.end());
thrust::sort(D.begin(),D.end());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(H==D);
}
{
//find maximum test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
auto max = thrust::max_element(D.begin(),D.end());
MySortStruct maxel = *max;
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(maxel.key == 573.0f);
}
{
//reduce test
thrust::host_vector<MySortStruct> H(4);
H[0] = {1,2.0f};
H[1] = {2,1.0f};
H[2] = {3,573.0f};
H[3] = {4,-934.0f};
thrust::device_vector<MySortStruct> D = H;
auto sum = thrust::reduce(D.begin(),D.end(),MySortStruct(0,0),ReduceMySortStructOp());
CUDA_SYNC_CHECK_ERROR();
SAIGA_ASSERT(sum.value == 10 && sum.key == -358.0f);
}
std::cout<<"Thrust test: SUCCESS!"<<std::endl;
CUDA_SYNC_CHECK_ERROR();
}
}
}
|
9f89425d386698c7fcb28d3d748b085a4b75c95d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rectified_linear_layer_hessian_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
__global__ void rectified_linear_hess_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = max(0.0F, val.x);
val.y = max(0.0F, val.y);
val.z = max(0.0F, val.z);
val.w = max(0.0F, val.w);
output[elem_id] = val;
}
}
__global__ void rectified_linear_square_deriviative_hess_kernel(
float4 * __restrict errors,
const float4 * __restrict output_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = output_neurons[elem_id];
float4 current_error = errors[elem_id];
if (val.x == 0.0F)
current_error.x = 0.0F;
if (val.y == 0.0F)
current_error.y = 0.0F;
if (val.z == 0.0F)
current_error.z = 0.0F;
if (val.w == 0.0F)
current_error.w = 0.0F;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
rectified_linear_layer_hessian_cuda::rectified_linear_layer_hessian_cuda()
{
}
rectified_linear_layer_hessian_cuda::~rectified_linear_layer_hessian_cuda()
{
}
void rectified_linear_layer_hessian_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( rectified_linear_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void rectified_linear_layer_hessian_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( rectified_linear_square_deriviative_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_errors_buffer,
*output_neurons_buffer,
elem_count);
}
bool rectified_linear_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
| 9f89425d386698c7fcb28d3d748b085a4b75c95d.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rectified_linear_layer_hessian_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
__global__ void rectified_linear_hess_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = max(0.0F, val.x);
val.y = max(0.0F, val.y);
val.z = max(0.0F, val.z);
val.w = max(0.0F, val.w);
output[elem_id] = val;
}
}
__global__ void rectified_linear_square_deriviative_hess_kernel(
float4 * __restrict errors,
const float4 * __restrict output_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = output_neurons[elem_id];
float4 current_error = errors[elem_id];
if (val.x == 0.0F)
current_error.x = 0.0F;
if (val.y == 0.0F)
current_error.y = 0.0F;
if (val.z == 0.0F)
current_error.z = 0.0F;
if (val.w == 0.0F)
current_error.w = 0.0F;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
rectified_linear_layer_hessian_cuda::rectified_linear_layer_hessian_cuda()
{
}
rectified_linear_layer_hessian_cuda::~rectified_linear_layer_hessian_cuda()
{
}
void rectified_linear_layer_hessian_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
rectified_linear_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void rectified_linear_layer_hessian_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
rectified_linear_square_deriviative_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*output_neurons_buffer,
elem_count);
}
bool rectified_linear_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
|
bb867b09acf572644828899e7b22a7bdc4ca9086.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_batched_q(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, magma_int_t ldda,
magmaDoubleComplex **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
hipLaunchKernelGGL(( zgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_zgeadd_batched_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, magma_int_t ldda,
magmaDoubleComplex **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_zgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
| bb867b09acf572644828899e7b22a7bdc4ca9086.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches zlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
zgeadd_batched_kernel(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
// dA and dB iterate across row i
const magmaDoubleComplex *dA = dAarray[ blockIdx.y ];
magmaDoubleComplex *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const magmaDoubleComplex *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i],
for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDA,N)
The m by n matrices dAarray[i].
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[in,out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a COMPLEX_16 array, dimension (LDDB,N)
The m by n matrices dBarray[i].
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_batched_q(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, magma_int_t ldda,
magmaDoubleComplex **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
zgeadd_batched_kernel<<< grid, threads, 0, queue >>>(
m, n, alpha, dAarray, ldda, dBarray, lddb );
}
/**
@see magmablas_zgeadd_batched_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zgeadd_batched(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex * const *dAarray, magma_int_t ldda,
magmaDoubleComplex **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_zgeadd_batched_q(
m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
96f5631f1870c38bdda39bb9349a7bab8ffc936f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void update_disp_veloc_kernel(float * displ, float * veloc, float * accel, const int size, const float deltat, const float deltatsqover2, const float deltatover2){
int id;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x));
if (id < size) {
displ[id] = displ[id] + (deltat) * (veloc[id]) + (deltatsqover2) * (accel[id]);
veloc[id] = veloc[id] + (deltatover2) * (accel[id]);
accel[id] = 0.0f;
}
} | 96f5631f1870c38bdda39bb9349a7bab8ffc936f.cu | #include "includes.h"
__global__ void update_disp_veloc_kernel(float * displ, float * veloc, float * accel, const int size, const float deltat, const float deltatsqover2, const float deltatover2){
int id;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x));
if (id < size) {
displ[id] = displ[id] + (deltat) * (veloc[id]) + (deltatsqover2) * (accel[id]);
veloc[id] = veloc[id] + (deltatover2) * (accel[id]);
accel[id] = 0.0f;
}
} |
a143b22e5f562565cd10ca28ac30a07aa65ef684.hip | // !!! This is a file automatically generated by hipify!!!
//!< Initialize GPU device.
#include "device_function.cuh"
#include "gpu_info.h"
inline bool IsGPUCapableP2P(hipDeviceProp_t *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
extern void init_cuda(MPI_info *mpi_info,GPU_info *gpu_info,int display){
int gpu_count;
int i,j;
hipDeviceProp_t prop[64];
int *gpuid;
int can_access_peer_0_1;
gpu_count=0;
gpuid=(int*)malloc(sizeof(int));
if(gpu_info->GPU_N==0){
//checkCudaErrors(hipGetDeviceCount(&gpu_info->GPU_N));
if(gpu_info->GPU_N==8)
gpu_info->GPU_N=1;
/*for(int i=0;i<gpu_info->GPU_N;i++)
gpu_info->whichGPUs[i]=i; //!Define on these GPU to calculate
*/
}
//gpu_info->whichGPUs=(int*)malloc(sizeof(int)*(gpu_info->GPU_N));
gpu_info->stream=(hipStream_t*)malloc(sizeof(hipStream_t)*gpu_info->GPU_N);
gpu_info->state.resize(gpu_info->GPU_N);
printf("CPU %d:",mpi_info->current_node);
for(i=0;i<(gpu_info->GPU_N);i++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(hipStreamCreate ( &gpu_info->stream[i])) ;
printf("%d ",i);//gpu_info->whichGPUs[i]
}
printf("is avilable \n");
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(hipGetDeviceProperties(&gpu_info->prop[i], gpu_info->whichGPUs[i]));
// Only boards based on Fermi can support P2P
gpuid[gpu_count++] = gpu_info->whichGPUs[i];
if(display==1){
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, gpu_info->prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("maxThreadsDim %d %d %d\n",gpu_info->prop[i].maxThreadsDim[0],gpu_info->prop[i].maxThreadsDim[1],gpu_info->prop[i].maxThreadsDim[2]);
printf("maxThreadsPerBlock %d\n",gpu_info->prop[i].maxThreadsPerBlock);
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("> %s (GPU%d) supports UVA: %s\n", gpu_info->prop[i].name, i, (gpu_info->prop[i].unifiedAddressing ? "Yes" : "No"));
}
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_0_1, gpu_info->whichGPUs[i], gpu_info->whichGPUs[j]));
if(can_access_peer_0_1) {
//checkCudaErrors(hipDeviceEnablePeerAccess(gpu_info->whichGPUs[j], 0));
}// if can_acesss
}//if i!=j
}// for j
}// for i
free(gpuid);
}//end routine
| a143b22e5f562565cd10ca28ac30a07aa65ef684.cu | //!< Initialize GPU device.
#include "device_function.cuh"
#include "gpu_info.h"
inline bool IsGPUCapableP2P(cudaDeviceProp *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
extern void init_cuda(MPI_info *mpi_info,GPU_info *gpu_info,int display){
int gpu_count;
int i,j;
cudaDeviceProp prop[64];
int *gpuid;
int can_access_peer_0_1;
gpu_count=0;
gpuid=(int*)malloc(sizeof(int));
if(gpu_info->GPU_N==0){
//checkCudaErrors(cudaGetDeviceCount(&gpu_info->GPU_N));
if(gpu_info->GPU_N==8)
gpu_info->GPU_N=1;
/*for(int i=0;i<gpu_info->GPU_N;i++)
gpu_info->whichGPUs[i]=i; //!Define on these GPU to calculate
*/
}
//gpu_info->whichGPUs=(int*)malloc(sizeof(int)*(gpu_info->GPU_N));
gpu_info->stream=(cudaStream_t*)malloc(sizeof(cudaStream_t)*gpu_info->GPU_N);
gpu_info->state.resize(gpu_info->GPU_N);
printf("CPU %d:",mpi_info->current_node);
for(i=0;i<(gpu_info->GPU_N);i++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(cudaStreamCreate ( &gpu_info->stream[i])) ;
printf("%d ",i);//gpu_info->whichGPUs[i]
}
printf("is avilable \n");
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(cudaGetDeviceProperties(&gpu_info->prop[i], gpu_info->whichGPUs[i]));
// Only boards based on Fermi can support P2P
gpuid[gpu_count++] = gpu_info->whichGPUs[i];
if(display==1){
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, gpu_info->prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("maxThreadsDim %d %d %d\n",gpu_info->prop[i].maxThreadsDim[0],gpu_info->prop[i].maxThreadsDim[1],gpu_info->prop[i].maxThreadsDim[2]);
printf("maxThreadsPerBlock %d\n",gpu_info->prop[i].maxThreadsPerBlock);
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("> %s (GPU%d) supports UVA: %s\n", gpu_info->prop[i].name, i, (gpu_info->prop[i].unifiedAddressing ? "Yes" : "No"));
}
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpu_info->whichGPUs[i], gpu_info->whichGPUs[j]));
if(can_access_peer_0_1) {
//checkCudaErrors(cudaDeviceEnablePeerAccess(gpu_info->whichGPUs[j], 0));
}// if can_acesss
}//if i!=j
}// for j
}// for i
free(gpuid);
}//end routine
|
4c7fdc1c3c0360869ea2283031d7f7941cee5ac4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include "array2d.h"
#include "cuda_helper.h"
#include "mat_bench.h"
#define value_t float
#define index_t int
__global__ void kernel(index_t Nx, index_t Ny, value_t *x, value_t *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int gid = i * Ny + j;
if (i < Nx && j < Ny)
y[gid] = x[gid];
}
struct mat_copy : public mat_bench<value_t, index_t>
{
void benchmark()
{
print_bench();
std::cout << "\nSimulation info: 2d mat copy\n";
value_t **x = create_array2d<value_t, index_t>(side_size, side_size);
value_t **y = create_array2d<value_t, index_t>(side_size, side_size);
#pragma omp parallel for
for (index_t i = 0; i < side_size; i++)
{
for (index_t j = 0; j < side_size; j++)
{
x[i][j] = 1.0;
y[i][j] = 0.0;
}
}
value_t *d_x, *d_y;
value_t *h_x = x[0], *h_y = y[0];
checkCudaErrors(hipMalloc(&d_x, total_size * sizeof(value_t)));
checkCudaErrors(hipMalloc(&d_y, total_size * sizeof(value_t)));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMemcpy(d_x, h_x, total_size * sizeof(value_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, h_y, total_size * sizeof(value_t), hipMemcpyHostToDevice));
dim3 blockd3 = dim3(block0, block1, 1);
dim3 grid = calc_grid2d(blockd3, side_size, side_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(blockd3), 0, 0, side_size, side_size, d_x, d_y);
checkCudaErrorsAfterKernels;
loops++;
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float du = 0;
checkCudaErrors(hipEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(hipMemcpy(h_y, d_y, total_size * sizeof(value_t), hipMemcpyDeviceToHost));
test_result(y, value_t(total_size));
print_performance();
delete[] x;
delete[] y;
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
}
mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg)
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(total_size) /
(1024.0 * 1024.0 * 1024.0);
}
};
int main(int narg, char **arg)
{
check_cuda_device();
mat_copy test(narg, arg);
test.benchmark();
} | 4c7fdc1c3c0360869ea2283031d7f7941cee5ac4.cu | #include <chrono>
#include "array2d.h"
#include "cuda_helper.h"
#include "mat_bench.h"
#define value_t float
#define index_t int
__global__ void kernel(index_t Nx, index_t Ny, value_t *x, value_t *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int gid = i * Ny + j;
if (i < Nx && j < Ny)
y[gid] = x[gid];
}
struct mat_copy : public mat_bench<value_t, index_t>
{
void benchmark()
{
print_bench();
std::cout << "\nSimulation info: 2d mat copy\n";
value_t **x = create_array2d<value_t, index_t>(side_size, side_size);
value_t **y = create_array2d<value_t, index_t>(side_size, side_size);
#pragma omp parallel for
for (index_t i = 0; i < side_size; i++)
{
for (index_t j = 0; j < side_size; j++)
{
x[i][j] = 1.0;
y[i][j] = 0.0;
}
}
value_t *d_x, *d_y;
value_t *h_x = x[0], *h_y = y[0];
checkCudaErrors(cudaMalloc(&d_x, total_size * sizeof(value_t)));
checkCudaErrors(cudaMalloc(&d_y, total_size * sizeof(value_t)));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMemcpy(d_x, h_x, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, h_y, total_size * sizeof(value_t), cudaMemcpyHostToDevice));
dim3 blockd3 = dim3(block0, block1, 1);
dim3 grid = calc_grid2d(blockd3, side_size, side_size);
std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n"
<< " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
kernel<<<grid, blockd3>>>(side_size, side_size, d_x, d_y);
checkCudaErrorsAfterKernels;
loops++;
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float du = 0;
checkCudaErrors(cudaEventElapsedTime(&du, start, stop));
duration = 1.0e-3 * du;
checkCudaErrors(cudaMemcpy(h_y, d_y, total_size * sizeof(value_t), cudaMemcpyDeviceToHost));
test_result(y, value_t(total_size));
print_performance();
delete[] x;
delete[] y;
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
}
mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg)
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(total_size) /
(1024.0 * 1024.0 * 1024.0);
}
};
int main(int narg, char **arg)
{
check_cuda_device();
mat_copy test(narg, arg);
test.benchmark();
} |
87575e50efe97dc7a82e3ed8f280773e848ac9bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Computation of eigenvalues of a small symmetric, tridiagonal matrix */
#include <prof.h>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include "cutil_inline.h"
#include "config.h"
#include "structs.h"
#include "matlab.h"
// includes, kernels
#include "bisect_kernel_small.cu"
// includes, file
#include "bisect_small.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Determine eigenvalues for matrices smaller than MAX_SMALL_MATRIX
//! @param TimingIterations number of iterations for timing
//! @param input handles to input data of kernel
//! @param result handles to result of kernel
//! @param mat_size matrix size
//! @param lg lower limit of Gerschgorin interval
//! @param ug upper limit of Gerschgorin interval
//! @param precision desired precision of eigenvalues
//! @param iterations number of iterations for timing
////////////////////////////////////////////////////////////////////////////////
void
computeEigenvaluesSmallMatrix( const InputData& input, ResultDataSmall& result,
const unsigned int mat_size,
const float lg, const float ug,
const float precision,
const unsigned int iterations )
{
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
for( unsigned int i = 0; i < iterations; ++i) {
dim3 blocks( 1, 1, 1);
dim3 threads( MAX_THREADS_BLOCK_SMALL_MATRIX, 1, 1);
GpuProfiling::prepareProfiling( blocks, threads );
hipLaunchKernelGGL(( bisectKernel), dim3(blocks), dim3(threads) , 0, 0, input.g_a, input.g_b, mat_size,
result.g_left, result.g_right,
result.g_left_count,
result.g_right_count,
lg, ug, 0, mat_size,
precision
);
GpuProfiling::addResults("bisectKernel");
}
cutilSafeCall( hipDeviceSynchronize());
cutilCheckError( cutStopTimer( timer));
cutilCheckMsg( "Kernel launch failed");
printf( "Average time: %f ms (%i iterations)\n",
cutGetTimerValue( timer) / (float) iterations, iterations );
cutilCheckError( cutDeleteTimer( timer));
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize variables and memory for the result for small matrices
//! @param result handles to the necessary memory
//! @param mat_size matrix_size
////////////////////////////////////////////////////////////////////////////////
void
initResultSmallMatrix( ResultDataSmall& result, const unsigned int mat_size) {
result.mat_size_f = sizeof(float) * mat_size;
result.mat_size_ui = sizeof(unsigned int) * mat_size;
result.eigenvalues = (float*) malloc( result.mat_size_f);
// helper variables
result.zero_f = (float*) malloc( result.mat_size_f);
result.zero_ui = (unsigned int*) malloc( result.mat_size_ui);
for( unsigned int i = 0; i < mat_size; ++i) {
result.zero_f[i] = 0.0f;
result.zero_ui[i] = 0;
result.eigenvalues[i] = 0.0f;
}
cutilSafeCall( hipMalloc( (void**) &result.g_left, result.mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_right, result.mat_size_f));
cutilSafeCall( hipMalloc( (void**) &result.g_left_count,
result.mat_size_ui));
cutilSafeCall( hipMalloc( (void**) &result.g_right_count,
result.mat_size_ui));
// initialize result memory
cutilSafeCall( hipMemcpy( result.g_left, result.zero_f, result.mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_right, result.zero_f, result.mat_size_f,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_right_count, result.zero_ui,
result.mat_size_ui,
hipMemcpyHostToDevice));
cutilSafeCall( hipMemcpy( result.g_left_count, result.zero_ui,
result.mat_size_ui,
hipMemcpyHostToDevice));
}
////////////////////////////////////////////////////////////////////////////////
//! Cleanup memory and variables for result for small matrices
//! @param result handle to variables
////////////////////////////////////////////////////////////////////////////////
void
cleanupResultSmallMatrix( ResultDataSmall& result) {
freePtr( result.eigenvalues);
freePtr( result.zero_f);
freePtr( result.zero_ui);
cutilSafeCall( hipFree( result.g_left));
cutilSafeCall( hipFree( result.g_right));
cutilSafeCall( hipFree( result.g_left_count));
cutilSafeCall( hipFree( result.g_right_count));
}
////////////////////////////////////////////////////////////////////////////////
//! Process the result obtained on the device, that is transfer to host and
//! perform basic sanity checking
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param filename output filename
////////////////////////////////////////////////////////////////////////////////
void
processResultSmallMatrix( const InputData& input, const ResultDataSmall& result,
const unsigned int mat_size,
const char* filename ) {
const unsigned int mat_size_f = sizeof(float) * mat_size;
const unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
// copy data back to host
float* left = (float*) malloc( mat_size_f);
unsigned int* left_count = (unsigned int*) malloc( mat_size_ui);
cutilSafeCall( hipMemcpy( left, result.g_left, mat_size_f,
hipMemcpyDeviceToHost));
cutilSafeCall( hipMemcpy( left_count, result.g_left_count, mat_size_ui,
hipMemcpyDeviceToHost));
float* eigenvalues = (float*) malloc( mat_size_f);
for( unsigned int i = 0; i < mat_size; ++i) {
eigenvalues[left_count[i]] = left[i];
}
// save result in matlab format
writeTridiagSymMatlab( filename, input.a, input.b+1, eigenvalues, mat_size);
// GpuProfiling::printResults();
freePtr( left);
freePtr( left_count);
freePtr( eigenvalues);
}
| 87575e50efe97dc7a82e3ed8f280773e848ac9bb.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Computation of eigenvalues of a small symmetric, tridiagonal matrix */
#include <prof.h>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include "cutil_inline.h"
#include "config.h"
#include "structs.h"
#include "matlab.h"
// includes, kernels
#include "bisect_kernel_small.cu"
// includes, file
#include "bisect_small.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Determine eigenvalues for matrices smaller than MAX_SMALL_MATRIX
//! @param TimingIterations number of iterations for timing
//! @param input handles to input data of kernel
//! @param result handles to result of kernel
//! @param mat_size matrix size
//! @param lg lower limit of Gerschgorin interval
//! @param ug upper limit of Gerschgorin interval
//! @param precision desired precision of eigenvalues
//! @param iterations number of iterations for timing
////////////////////////////////////////////////////////////////////////////////
void
computeEigenvaluesSmallMatrix( const InputData& input, ResultDataSmall& result,
const unsigned int mat_size,
const float lg, const float ug,
const float precision,
const unsigned int iterations )
{
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
for( unsigned int i = 0; i < iterations; ++i) {
dim3 blocks( 1, 1, 1);
dim3 threads( MAX_THREADS_BLOCK_SMALL_MATRIX, 1, 1);
GpuProfiling::prepareProfiling( blocks, threads );
bisectKernel<<< blocks, threads >>>( input.g_a, input.g_b, mat_size,
result.g_left, result.g_right,
result.g_left_count,
result.g_right_count,
lg, ug, 0, mat_size,
precision
);
GpuProfiling::addResults("bisectKernel");
}
cutilSafeCall( cudaThreadSynchronize());
cutilCheckError( cutStopTimer( timer));
cutilCheckMsg( "Kernel launch failed");
printf( "Average time: %f ms (%i iterations)\n",
cutGetTimerValue( timer) / (float) iterations, iterations );
cutilCheckError( cutDeleteTimer( timer));
}
////////////////////////////////////////////////////////////////////////////////
//! Initialize variables and memory for the result for small matrices
//! @param result handles to the necessary memory
//! @param mat_size matrix_size
////////////////////////////////////////////////////////////////////////////////
void
initResultSmallMatrix( ResultDataSmall& result, const unsigned int mat_size) {
result.mat_size_f = sizeof(float) * mat_size;
result.mat_size_ui = sizeof(unsigned int) * mat_size;
result.eigenvalues = (float*) malloc( result.mat_size_f);
// helper variables
result.zero_f = (float*) malloc( result.mat_size_f);
result.zero_ui = (unsigned int*) malloc( result.mat_size_ui);
for( unsigned int i = 0; i < mat_size; ++i) {
result.zero_f[i] = 0.0f;
result.zero_ui[i] = 0;
result.eigenvalues[i] = 0.0f;
}
cutilSafeCall( cudaMalloc( (void**) &result.g_left, result.mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_right, result.mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_count,
result.mat_size_ui));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_count,
result.mat_size_ui));
// initialize result memory
cutilSafeCall( cudaMemcpy( result.g_left, result.zero_f, result.mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right, result.zero_f, result.mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_count, result.zero_ui,
result.mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_left_count, result.zero_ui,
result.mat_size_ui,
cudaMemcpyHostToDevice));
}
////////////////////////////////////////////////////////////////////////////////
//! Cleanup memory and variables for result for small matrices
//! @param result handle to variables
////////////////////////////////////////////////////////////////////////////////
void
cleanupResultSmallMatrix( ResultDataSmall& result) {
freePtr( result.eigenvalues);
freePtr( result.zero_f);
freePtr( result.zero_ui);
cutilSafeCall( cudaFree( result.g_left));
cutilSafeCall( cudaFree( result.g_right));
cutilSafeCall( cudaFree( result.g_left_count));
cutilSafeCall( cudaFree( result.g_right_count));
}
////////////////////////////////////////////////////////////////////////////////
//! Process the result obtained on the device, that is transfer to host and
//! perform basic sanity checking
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param filename output filename
////////////////////////////////////////////////////////////////////////////////
void
processResultSmallMatrix( const InputData& input, const ResultDataSmall& result,
const unsigned int mat_size,
const char* filename ) {
const unsigned int mat_size_f = sizeof(float) * mat_size;
const unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
// copy data back to host
float* left = (float*) malloc( mat_size_f);
unsigned int* left_count = (unsigned int*) malloc( mat_size_ui);
cutilSafeCall( cudaMemcpy( left, result.g_left, mat_size_f,
cudaMemcpyDeviceToHost));
cutilSafeCall( cudaMemcpy( left_count, result.g_left_count, mat_size_ui,
cudaMemcpyDeviceToHost));
float* eigenvalues = (float*) malloc( mat_size_f);
for( unsigned int i = 0; i < mat_size; ++i) {
eigenvalues[left_count[i]] = left[i];
}
// save result in matlab format
writeTridiagSymMatlab( filename, input.a, input.b+1, eigenvalues, mat_size);
// GpuProfiling::printResults();
freePtr( left);
freePtr( left_count);
freePtr( eigenvalues);
}
|
181e22e8eb2c355b63151f5f253a5a8044135ba1.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main()
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
#pragma region
g.get_info(i);
if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0)
{
puts("Cannot be zero");
abort();
}
else if (i.w % 2 != 0 || i.h % 2 != 0) {
puts("Only even frame size is supported");
abort();
}
#pragma endregion
unsigned FRAME_SIZE = i.w * i.h * 3 / 2; // y4m subsample W/2 x H/2
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(int8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
| 181e22e8eb2c355b63151f5f253a5a8044135ba1.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include "SyncedMemory.h"
#include "lab1.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main()
{
Lab1VideoGenerator g;
Lab1VideoInfo i;
#pragma region 拿影片的資料
g.get_info(i);
if (i.w == 0 || i.h == 0 || i.n_frame == 0 || i.fps_n == 0 || i.fps_d == 0)
{
puts("Cannot be zero");
abort();
}
else if (i.w % 2 != 0 || i.h % 2 != 0) {
puts("Only even frame size is supported");
abort();
}
#pragma endregion
unsigned FRAME_SIZE = i.w * i.h * 3 / 2; // 因為 y4m 需要 subsample W/2 x H/2
MemoryBuffer<uint8_t> frameb(FRAME_SIZE);
auto frames = frameb.CreateSync(FRAME_SIZE);
FILE *fp = fopen("result.y4m", "wb");
fprintf(fp, "YUV4MPEG2 W%d H%d F%d:%d Ip A1:1 C420\n", i.w, i.h, i.fps_n, i.fps_d);
for (unsigned j = 0; j < i.n_frame; ++j) {
fputs("FRAME\n", fp);
g.Generate(frames.get_gpu_wo());
fwrite(frames.get_cpu_ro(), sizeof(int8_t), FRAME_SIZE, fp);
}
fclose(fp);
return 0;
}
|
782743810122905895beb8b0e02e6cebfa7327b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <gptl.h>
#include <gptl_cuda.h>
__global__ void runit (float, float, int *, int *);
__global__ void setup_handles (int *, int *);
int *runit_handle;
int *percall_handle;
int main ()
{
int blocksize;
int warpsize = -1;
int khz = -1;
int devnum = -1;
int smcount = -1;
int cores_per_sm = -1;
int cores_per_gpu = -1;
int oversub = -1;
int nwarps;
int nthreads;
int nblocks;
int ret;
float sleep_tot;
float sleep_percall;
ret = GPTLget_gpu_props (&khz, &warpsize, &devnum, &smcount, &cores_per_sm, &cores_per_gpu);
printf ("Enter oversubsubscription factor\n");
scanf ("%d", &oversub);
printf ("oversub=%d\n", oversub);
nwarps = (cores_per_gpu * oversub) / warpsize;
printf ("nwarps=%d\n", nwarps);
if (nwarps * warpsize != cores_per_gpu * oversub)
printf ("NOTE: warpsize=%d does not divide evenly into cores_per_gpu(%d) * oversub(%d)=%d\n",
warpsize, cores_per_gpu, oversub, cores_per_gpu * oversub);
ret = GPTLsetoption (GPTLmaxwarps_gpu, nwarps);
printf ("Enter sleep_tot sleep_percall (both in floating point seconds)\n");
scanf ("%f%f", &sleep_tot, &sleep_percall);
printf ("sleep_tot=%f sec sleep_percall=%f sec\n", sleep_tot, sleep_percall);
ret = GPTLinitialize ();
(void) (hipMallocManaged (&runit_handle, sizeof (int)));
(void) (hipMallocManaged (&percall_handle, sizeof (int)));
hipLaunchKernelGGL(( setup_handles) , dim3(1),dim3(1), 0, 0, runit_handle, percall_handle);
hipDeviceSynchronize ();
blocksize = cores_per_sm;
nthreads = nwarps * warpsize;
nblocks = nthreads / blocksize;
printf ("nblocks=%d blocksize=%d\n", nblocks, blocksize);
ret = GPTLstart ("total");
hipLaunchKernelGGL(( runit) , dim3(nblocks),dim3(blocksize), 0, 0, sleep_tot, sleep_percall, runit_handle, percall_handle);
hipDeviceSynchronize ();
ret = GPTLstop ("total");
ret = GPTLpr (0);
return 0;
}
__global__ void setup_handles (int *runit_handle, int *percall_handle)
{
int ret;
ret = GPTLinit_handle_gpu ("runit", runit_handle);
ret = GPTLinit_handle_gpu ("percall", percall_handle);
}
__global__ void runit (float sleep_tot, float sleep_percall, int *runit_handle, int *percall_handle)
{
int ret;
double slept = 0.;
ret = GPTLstart_gpu (*runit_handle);
while (slept < sleep_tot) {
ret = GPTLstart_gpu (*percall_handle);
ret = GPTLmy_sleep (sleep_percall);
ret = GPTLstop_gpu (*percall_handle);
slept += sleep_percall;
}
ret = GPTLstop_gpu (*runit_handle);
}
| 782743810122905895beb8b0e02e6cebfa7327b6.cu | #include <stdio.h>
#include <cuda.h>
#include <gptl.h>
#include <gptl_cuda.h>
__global__ void runit (float, float, int *, int *);
__global__ void setup_handles (int *, int *);
int *runit_handle;
int *percall_handle;
int main ()
{
int blocksize;
int warpsize = -1;
int khz = -1;
int devnum = -1;
int smcount = -1;
int cores_per_sm = -1;
int cores_per_gpu = -1;
int oversub = -1;
int nwarps;
int nthreads;
int nblocks;
int ret;
float sleep_tot;
float sleep_percall;
ret = GPTLget_gpu_props (&khz, &warpsize, &devnum, &smcount, &cores_per_sm, &cores_per_gpu);
printf ("Enter oversubsubscription factor\n");
scanf ("%d", &oversub);
printf ("oversub=%d\n", oversub);
nwarps = (cores_per_gpu * oversub) / warpsize;
printf ("nwarps=%d\n", nwarps);
if (nwarps * warpsize != cores_per_gpu * oversub)
printf ("NOTE: warpsize=%d does not divide evenly into cores_per_gpu(%d) * oversub(%d)=%d\n",
warpsize, cores_per_gpu, oversub, cores_per_gpu * oversub);
ret = GPTLsetoption (GPTLmaxwarps_gpu, nwarps);
printf ("Enter sleep_tot sleep_percall (both in floating point seconds)\n");
scanf ("%f%f", &sleep_tot, &sleep_percall);
printf ("sleep_tot=%f sec sleep_percall=%f sec\n", sleep_tot, sleep_percall);
ret = GPTLinitialize ();
(void) (cudaMallocManaged (&runit_handle, sizeof (int)));
(void) (cudaMallocManaged (&percall_handle, sizeof (int)));
setup_handles <<<1,1>>> (runit_handle, percall_handle);
cudaDeviceSynchronize ();
blocksize = cores_per_sm;
nthreads = nwarps * warpsize;
nblocks = nthreads / blocksize;
printf ("nblocks=%d blocksize=%d\n", nblocks, blocksize);
ret = GPTLstart ("total");
runit <<<nblocks,blocksize>>> (sleep_tot, sleep_percall, runit_handle, percall_handle);
cudaDeviceSynchronize ();
ret = GPTLstop ("total");
ret = GPTLpr (0);
return 0;
}
__global__ void setup_handles (int *runit_handle, int *percall_handle)
{
int ret;
ret = GPTLinit_handle_gpu ("runit", runit_handle);
ret = GPTLinit_handle_gpu ("percall", percall_handle);
}
__global__ void runit (float sleep_tot, float sleep_percall, int *runit_handle, int *percall_handle)
{
int ret;
double slept = 0.;
ret = GPTLstart_gpu (*runit_handle);
while (slept < sleep_tot) {
ret = GPTLstart_gpu (*percall_handle);
ret = GPTLmy_sleep (sleep_percall);
ret = GPTLstop_gpu (*percall_handle);
slept += sleep_percall;
}
ret = GPTLstop_gpu (*runit_handle);
}
|
561ba48e436f93c7d298a0ddc2f41e70d79b7bcd.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================
Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
which has the following license...
https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "trainable_joint_bilateral.h"
//#include "../utils/cuda_error_check.h"
#include "utils/meta_macros.h"
#include "utils/tensor_description.h"
__constant__ int cBatchStride;
__constant__ int cColorStride;
__constant__ int cSizes[3];
__constant__ int cStrides[3];
__constant__ int cKernelSizes[3];
__constant__ int cHalfWindowSize_arr[3];
__constant__ float cGaussianKernel_x[256];
__constant__ float cGaussianKernel_y[256];
__constant__ float cGaussianKernel_z[256];
__constant__ float cXDistanceSquared[256];
__constant__ float cYDistanceSquared[256];
__constant__ float cZDistanceSquared[256];
__constant__ float cColorExponentConstant;
__constant__ float cSigma_x;
__constant__ float cSigma_y;
__constant__ float cSigma_z;
__constant__ float cColorSigma;
template <typename scalar_t, int C>
__global__ void JointBilateralFilterCudaKernel3DForward(
scalar_t* input,
scalar_t* guidance,
scalar_t* output,
scalar_t* outputWeightsTensor,
scalar_t* dO_dz_ki,
scalar_t* dO_dsig_r,
scalar_t* dO_dsig_x,
scalar_t* dO_dsig_y,
scalar_t* dO_dsig_z) {
int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
int batchOffset = blockIdx.y * cBatchStride;
if (homeOffset >= cColorStride)
return;
int homeX = homeOffset / cStrides[0];
int homeY = (homeOffset - homeX * cStrides[0]) / cStrides[1];
int homeZ = (homeOffset - homeX * cStrides[0] - homeY * cStrides[1]) / cStrides[2];
int homeIndex[] = {homeX, homeY, homeZ};
// Zero kernel aggregates.
scalar_t valueSum = 0;
scalar_t dw_dz_ki = 0;
scalar_t dfilter_dz_ki = 0;
scalar_t colorSum_w = 0;
scalar_t colorSum_alpha = 0;
scalar_t xSum_w = 0;
scalar_t xSum_alpha = 0;
scalar_t ySum_w = 0;
scalar_t ySum_alpha = 0;
scalar_t zSum_w = 0;
scalar_t zSum_alpha = 0;
scalar_t weightSum = 0;
for (int kernelX = 0; kernelX < cKernelSizes[0]; kernelX++) {
int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arr[0]), cSizes[0] - 1));
scalar_t gaussianX = cGaussianKernel_x[kernelX];
for (int kernelY = 0; kernelY < cKernelSizes[1]; kernelY++) {
int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arr[1]), cSizes[1] - 1));
scalar_t gaussianY = cGaussianKernel_y[kernelY];
for (int kernelZ = 0; kernelZ < cKernelSizes[2]; kernelZ++) {
int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arr[2]), cSizes[2] - 1));
scalar_t gaussianZ = cGaussianKernel_z[kernelZ];
int neighbourOffset = neighbourX * cStrides[0] + neighbourY * cStrides[1] + neighbourZ;
bool flagNotClamped = true;
int kernelIndex[] = {kernelX, kernelY, kernelZ};
int dimensions = 3; // Must equal the number of spatial dimensions.
for (int i = 0; i < dimensions; i++) {
int HalfWindowSizeBack = cHalfWindowSize_arr[i]; // Define constant memory as new variable here (!!),
// otherwise: hipErrorMisalignedAddress
int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack;
int neighbourIndexClamped = min(cSizes[i] - 1, max(0, neighbourIndex));
if (neighbourIndex != neighbourIndexClamped) {
flagNotClamped = false;
}
}
scalar_t colorDistance = 0;
scalar_t colorDistanceSquared = 0;
#pragma unroll
for (int c = 0; c < C; c++) {
scalar_t a = guidance[batchOffset + homeOffset + c * cColorStride];
scalar_t b = guidance[batchOffset + neighbourOffset + c * cColorStride]; // Home - neighbor (!!) in backward
// the other way around !!
scalar_t diff = a - b;
colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
colorDistanceSquared += diff * diff;
}
scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ;
scalar_t colorWeight = exp(cColorExponentConstant * colorDistanceSquared);
scalar_t totalWeight = spatialWeight * colorWeight;
// Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
if (flagNotClamped) {
#pragma unroll
for (int c = 0; c < C; c++) {
valueSum += input[batchOffset + neighbourOffset + c * cColorStride] * totalWeight;
// Derivative of weights with respect to X_i while i=k.
dw_dz_ki += (-1) * totalWeight * colorDistance / (cColorSigma * cColorSigma);
// Derivative of convolved image with respect to X_i while i=k.
dfilter_dz_ki += (-1) * totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
colorDistance /
(cColorSigma *
cColorSigma); // Be careful, the +1 is missing here -> Added before filling dfilter_dx_kiData
colorSum_w += totalWeight * colorDistanceSquared / std::abs(cColorSigma * cColorSigma * cColorSigma);
colorSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
colorDistanceSquared / std::abs(cColorSigma * cColorSigma * cColorSigma);
xSum_w += totalWeight * cXDistanceSquared[kernelX] / std::abs(cSigma_x * cSigma_x * cSigma_x);
xSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cXDistanceSquared[kernelX] / std::abs(cSigma_x * cSigma_x * cSigma_x);
ySum_w += totalWeight * cYDistanceSquared[kernelY] / std::abs(cSigma_y * cSigma_y * cSigma_y);
ySum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cYDistanceSquared[kernelY] / std::abs(cSigma_y * cSigma_y * cSigma_y);
zSum_w += totalWeight * cZDistanceSquared[kernelZ] / std::abs(cSigma_z * cSigma_z * cSigma_z);
zSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cZDistanceSquared[kernelZ] / std::abs(cSigma_z * cSigma_z * cSigma_z);
}
weightSum += totalWeight;
}
}
}
}
#pragma unroll
for (int c = 0; c < C; c++) {
// output[batchOffset + homeOffset + c * cColorStride] /= weightSum;
output[batchOffset + homeOffset + c * cColorStride] = valueSum / weightSum;
// Pre-computations for the backward pass:
outputWeightsTensor[batchOffset + homeOffset + c * cColorStride] = weightSum;
dO_dz_ki[batchOffset + homeOffset + c * cColorStride] = -(1 / weightSum) * (valueSum / weightSum) * dw_dz_ki +
(1 / weightSum) * (dfilter_dz_ki); // no +1 for dfilter_dz_ki for JBF added here!
dO_dsig_r[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * colorSum_w + (1 / weightSum) * colorSum_alpha;
dO_dsig_x[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * xSum_w + (1 / weightSum) * xSum_alpha;
dO_dsig_y[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * ySum_w + (1 / weightSum) * ySum_alpha;
dO_dsig_z[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * zSum_w + (1 / weightSum) * zSum_alpha;
}
}
template <int C, int D>
void JointBilateralFilterCudaForwardFunction(
torch::Tensor inputTensor,
torch::Tensor guidanceTensor,
torch::Tensor outputTensor,
torch::Tensor outputWeightsTensor,
torch::Tensor dO_dz_ki,
torch::Tensor dO_dsig_r,
torch::Tensor dO_dsig_x,
torch::Tensor dO_dsig_y,
torch::Tensor dO_dsig_z,
float sigma_x,
float sigma_y,
float sigma_z,
float colorSigma) {
// Getting tensor description.
TensorDescription desc = TensorDescription(inputTensor);
// Pre-calculating gaussian kernel.
int windowSize_x = ::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
int windowSize_y = ::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
int windowSize_z = ::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
int halfWindowSize_x = floor(0.5f * windowSize_x);
int halfWindowSize_y = floor(0.5f * windowSize_y);
int halfWindowSize_z = floor(0.5f * windowSize_z);
int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
int* kernelSizes = new int[desc.dimensions];
kernelSizes[0] = windowSize_x;
kernelSizes[1] = windowSize_y;
kernelSizes[2] = windowSize_z;
auto* gaussianKernel_x = new float[windowSize_x];
auto* gaussianKernel_y = new float[windowSize_y];
auto* gaussianKernel_z = new float[windowSize_z];
auto* xDistanceSquared = new float[windowSize_x];
auto* yDistanceSquared = new float[windowSize_y];
auto* zDistanceSquared = new float[windowSize_z];
for (int i = 0; i < windowSize_x; i++) {
int distance = i - halfWindowSize_x;
gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
xDistanceSquared[i] = distance * distance;
}
for (int i = 0; i < windowSize_y; i++) {
int distance = i - halfWindowSize_y;
gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
yDistanceSquared[i] = distance * distance;
}
for (int i = 0; i < windowSize_z; i++) {
int distance = i - halfWindowSize_z;
gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
zDistanceSquared[i] = distance * distance;
}
// Writing constant memory.
hipMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
hipMemcpyToSymbol(cColorStride, &desc.channelStride, sizeof(int));
hipMemcpyToSymbol(cSizes, desc.sizes, sizeof(int) * 3);
hipMemcpyToSymbol(cStrides, desc.strides, sizeof(int) * 3);
hipMemcpyToSymbol(cKernelSizes, kernelSizes, sizeof(int) * desc.dimensions);
hipMemcpyToSymbol(cHalfWindowSize_arr, halfWindowSize_arr, sizeof(int) * desc.dimensions);
hipMemcpyToSymbol(cGaussianKernel_x, gaussianKernel_x, sizeof(float) * windowSize_x);
hipMemcpyToSymbol(cGaussianKernel_y, gaussianKernel_y, sizeof(float) * windowSize_y);
hipMemcpyToSymbol(cGaussianKernel_z, gaussianKernel_z, sizeof(float) * windowSize_z);
hipMemcpyToSymbol(cXDistanceSquared, xDistanceSquared, sizeof(float) * windowSize_x);
hipMemcpyToSymbol(cYDistanceSquared, yDistanceSquared, sizeof(float) * windowSize_y);
hipMemcpyToSymbol(cZDistanceSquared, zDistanceSquared, sizeof(float) * windowSize_z);
hipMemcpyToSymbol(cColorExponentConstant, &colorExpConstant, sizeof(float));
hipMemcpyToSymbol(cSigma_x, &sigma_x, sizeof(float));
hipMemcpyToSymbol(cSigma_y, &sigma_y, sizeof(float));
hipMemcpyToSymbol(cSigma_z, &sigma_z, sizeof(float));
hipMemcpyToSymbol(cColorSigma, &colorSigma, sizeof(float));
// cuda_error_check("Cuda check before kernel call.");
#define BLOCK_SIZE 32
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
inputTensor.scalar_type(), "JointBilateralFilterCudaKernel3DForward", ([&] {
hipLaunchKernelGGL(( JointBilateralFilterCudaKernel3DForward<scalar_t, C>)
, dim3(dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount)), dim3(dim3(BLOCK_SIZE, 1)), 0, 0,
inputTensor.data_ptr<scalar_t>(),
guidanceTensor.data_ptr<scalar_t>(),
outputTensor.data_ptr<scalar_t>(),
outputWeightsTensor.data_ptr<scalar_t>(),
dO_dz_ki.data_ptr<scalar_t>(),
dO_dsig_r.data_ptr<scalar_t>(),
dO_dsig_x.data_ptr<scalar_t>(),
dO_dsig_y.data_ptr<scalar_t>(),
dO_dsig_z.data_ptr<scalar_t>());
}));
// cuda_error_check("Cuda check after kernel call.");
// delete[] kernel;
delete[] kernelSizes;
delete[] gaussianKernel_x;
delete[] gaussianKernel_y;
delete[] gaussianKernel_z;
delete[] xDistanceSquared;
delete[] yDistanceSquared;
delete[] zDistanceSquared;
}
// Function to choose template implementation based on dynamic, channels and dimensions
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
JointBilateralFilterCudaForward(
torch::Tensor inputTensor,
torch::Tensor guidanceTensor,
float sigma_x,
float sigma_y,
float sigma_z,
float colorSigma) {
torch::Tensor outputTensor = torch::zeros_like(inputTensor);
torch::Tensor outputWeightsTensor = torch::zeros_like(inputTensor);
torch::Tensor dO_dz_ki = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_r = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_x = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_y = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_z = torch::zeros_like(inputTensor);
// cuda_error_check("beginning");
#define CASE(c, d) \
JointBilateralFilterCudaForwardFunction<c, d>( \
inputTensor, \
guidanceTensor, \
outputTensor, \
outputWeightsTensor, \
dO_dz_ki, \
dO_dsig_r, \
dO_dsig_x, \
dO_dsig_y, \
dO_dsig_z, \
sigma_x, \
sigma_y, \
sigma_z, \
colorSigma);
SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2);
return {outputTensor, outputWeightsTensor, dO_dz_ki, dO_dsig_r, dO_dsig_x, dO_dsig_y, dO_dsig_z};
}
| 561ba48e436f93c7d298a0ddc2f41e70d79b7bcd.cu | /*
Copyright (c) MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================
Adapted from https://github.com/faebstn96/trainable-joint-bilateral-filter-source
which has the following license...
https://github.com/faebstn96/trainable-joint-bilateral-filter-source/blob/main/LICENSE
Copyright 2022 Fabian Wagner, Pattern Recognition Lab, FAU Erlangen-Nuernberg, Erlangen, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "trainable_joint_bilateral.h"
//#include "../utils/cuda_error_check.h"
#include "utils/meta_macros.h"
#include "utils/tensor_description.h"
__constant__ int cBatchStride;
__constant__ int cColorStride;
__constant__ int cSizes[3];
__constant__ int cStrides[3];
__constant__ int cKernelSizes[3];
__constant__ int cHalfWindowSize_arr[3];
__constant__ float cGaussianKernel_x[256];
__constant__ float cGaussianKernel_y[256];
__constant__ float cGaussianKernel_z[256];
__constant__ float cXDistanceSquared[256];
__constant__ float cYDistanceSquared[256];
__constant__ float cZDistanceSquared[256];
__constant__ float cColorExponentConstant;
__constant__ float cSigma_x;
__constant__ float cSigma_y;
__constant__ float cSigma_z;
__constant__ float cColorSigma;
template <typename scalar_t, int C>
__global__ void JointBilateralFilterCudaKernel3DForward(
scalar_t* input,
scalar_t* guidance,
scalar_t* output,
scalar_t* outputWeightsTensor,
scalar_t* dO_dz_ki,
scalar_t* dO_dsig_r,
scalar_t* dO_dsig_x,
scalar_t* dO_dsig_y,
scalar_t* dO_dsig_z) {
int homeOffset = blockIdx.x * blockDim.x + threadIdx.x;
int batchOffset = blockIdx.y * cBatchStride;
if (homeOffset >= cColorStride)
return;
int homeX = homeOffset / cStrides[0];
int homeY = (homeOffset - homeX * cStrides[0]) / cStrides[1];
int homeZ = (homeOffset - homeX * cStrides[0] - homeY * cStrides[1]) / cStrides[2];
int homeIndex[] = {homeX, homeY, homeZ};
// Zero kernel aggregates.
scalar_t valueSum = 0;
scalar_t dw_dz_ki = 0;
scalar_t dfilter_dz_ki = 0;
scalar_t colorSum_w = 0;
scalar_t colorSum_alpha = 0;
scalar_t xSum_w = 0;
scalar_t xSum_alpha = 0;
scalar_t ySum_w = 0;
scalar_t ySum_alpha = 0;
scalar_t zSum_w = 0;
scalar_t zSum_alpha = 0;
scalar_t weightSum = 0;
for (int kernelX = 0; kernelX < cKernelSizes[0]; kernelX++) {
int neighbourX = max(0, min(homeX + (kernelX - cHalfWindowSize_arr[0]), cSizes[0] - 1));
scalar_t gaussianX = cGaussianKernel_x[kernelX];
for (int kernelY = 0; kernelY < cKernelSizes[1]; kernelY++) {
int neighbourY = max(0, min(homeY + (kernelY - cHalfWindowSize_arr[1]), cSizes[1] - 1));
scalar_t gaussianY = cGaussianKernel_y[kernelY];
for (int kernelZ = 0; kernelZ < cKernelSizes[2]; kernelZ++) {
int neighbourZ = max(0, min(homeZ + (kernelZ - cHalfWindowSize_arr[2]), cSizes[2] - 1));
scalar_t gaussianZ = cGaussianKernel_z[kernelZ];
int neighbourOffset = neighbourX * cStrides[0] + neighbourY * cStrides[1] + neighbourZ;
bool flagNotClamped = true;
int kernelIndex[] = {kernelX, kernelY, kernelZ};
int dimensions = 3; // Must equal the number of spatial dimensions.
for (int i = 0; i < dimensions; i++) {
int HalfWindowSizeBack = cHalfWindowSize_arr[i]; // Define constant memory as new variable here (!!),
// otherwise: cudaErrorMisalignedAddress
int neighbourIndex = homeIndex[i] + kernelIndex[i] - HalfWindowSizeBack;
int neighbourIndexClamped = min(cSizes[i] - 1, max(0, neighbourIndex));
if (neighbourIndex != neighbourIndexClamped) {
flagNotClamped = false;
}
}
scalar_t colorDistance = 0;
scalar_t colorDistanceSquared = 0;
#pragma unroll
for (int c = 0; c < C; c++) {
scalar_t a = guidance[batchOffset + homeOffset + c * cColorStride];
scalar_t b = guidance[batchOffset + neighbourOffset + c * cColorStride]; // Home - neighbor (!!) in backward
// the other way around !!
scalar_t diff = a - b;
colorDistance += diff; // Do not take the absolute value here. Be careful with the signs.
colorDistanceSquared += diff * diff;
}
scalar_t spatialWeight = gaussianX * gaussianY * gaussianZ;
scalar_t colorWeight = exp(cColorExponentConstant * colorDistanceSquared);
scalar_t totalWeight = spatialWeight * colorWeight;
// Aggregating values. Only do this if flagNotClamped: Pixels outside the image are disregarded.
if (flagNotClamped) {
#pragma unroll
for (int c = 0; c < C; c++) {
valueSum += input[batchOffset + neighbourOffset + c * cColorStride] * totalWeight;
// Derivative of weights with respect to X_i while i=k.
dw_dz_ki += (-1) * totalWeight * colorDistance / (cColorSigma * cColorSigma);
// Derivative of convolved image with respect to X_i while i=k.
dfilter_dz_ki += (-1) * totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
colorDistance /
(cColorSigma *
cColorSigma); // Be careful, the +1 is missing here -> Added before filling dfilter_dx_kiData
colorSum_w += totalWeight * colorDistanceSquared / std::abs(cColorSigma * cColorSigma * cColorSigma);
colorSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
colorDistanceSquared / std::abs(cColorSigma * cColorSigma * cColorSigma);
xSum_w += totalWeight * cXDistanceSquared[kernelX] / std::abs(cSigma_x * cSigma_x * cSigma_x);
xSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cXDistanceSquared[kernelX] / std::abs(cSigma_x * cSigma_x * cSigma_x);
ySum_w += totalWeight * cYDistanceSquared[kernelY] / std::abs(cSigma_y * cSigma_y * cSigma_y);
ySum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cYDistanceSquared[kernelY] / std::abs(cSigma_y * cSigma_y * cSigma_y);
zSum_w += totalWeight * cZDistanceSquared[kernelZ] / std::abs(cSigma_z * cSigma_z * cSigma_z);
zSum_alpha += totalWeight * input[batchOffset + neighbourOffset + c * cColorStride] *
cZDistanceSquared[kernelZ] / std::abs(cSigma_z * cSigma_z * cSigma_z);
}
weightSum += totalWeight;
}
}
}
}
#pragma unroll
for (int c = 0; c < C; c++) {
// output[batchOffset + homeOffset + c * cColorStride] /= weightSum;
output[batchOffset + homeOffset + c * cColorStride] = valueSum / weightSum;
// Pre-computations for the backward pass:
outputWeightsTensor[batchOffset + homeOffset + c * cColorStride] = weightSum;
dO_dz_ki[batchOffset + homeOffset + c * cColorStride] = -(1 / weightSum) * (valueSum / weightSum) * dw_dz_ki +
(1 / weightSum) * (dfilter_dz_ki); // no +1 for dfilter_dz_ki for JBF added here!
dO_dsig_r[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * colorSum_w + (1 / weightSum) * colorSum_alpha;
dO_dsig_x[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * xSum_w + (1 / weightSum) * xSum_alpha;
dO_dsig_y[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * ySum_w + (1 / weightSum) * ySum_alpha;
dO_dsig_z[batchOffset + homeOffset + c * cColorStride] =
-(1 / weightSum) * (valueSum / weightSum) * zSum_w + (1 / weightSum) * zSum_alpha;
}
}
template <int C, int D>
void JointBilateralFilterCudaForwardFunction(
torch::Tensor inputTensor,
torch::Tensor guidanceTensor,
torch::Tensor outputTensor,
torch::Tensor outputWeightsTensor,
torch::Tensor dO_dz_ki,
torch::Tensor dO_dsig_r,
torch::Tensor dO_dsig_x,
torch::Tensor dO_dsig_y,
torch::Tensor dO_dsig_z,
float sigma_x,
float sigma_y,
float sigma_z,
float colorSigma) {
// Getting tensor description.
TensorDescription desc = TensorDescription(inputTensor);
// Pre-calculating gaussian kernel.
int windowSize_x = std::max(((int)ceil(5.0f * sigma_x) | 1), 5); // ORing last bit to ensure odd window size
int windowSize_y = std::max(((int)ceil(5.0f * sigma_y) | 1), 5); // ORing last bit to ensure odd window size
int windowSize_z = std::max(((int)ceil(5.0f * sigma_z) | 1), 5); // ORing last bit to ensure odd window size
int halfWindowSize_x = floor(0.5f * windowSize_x);
int halfWindowSize_y = floor(0.5f * windowSize_y);
int halfWindowSize_z = floor(0.5f * windowSize_z);
int halfWindowSize_arr[] = {halfWindowSize_x, halfWindowSize_y, halfWindowSize_z};
float spatialExpConstant_x = -1.0f / (2 * sigma_x * sigma_x);
float spatialExpConstant_y = -1.0f / (2 * sigma_y * sigma_y);
float spatialExpConstant_z = -1.0f / (2 * sigma_z * sigma_z);
float colorExpConstant = -1.0f / (2 * colorSigma * colorSigma);
int* kernelSizes = new int[desc.dimensions];
kernelSizes[0] = windowSize_x;
kernelSizes[1] = windowSize_y;
kernelSizes[2] = windowSize_z;
auto* gaussianKernel_x = new float[windowSize_x];
auto* gaussianKernel_y = new float[windowSize_y];
auto* gaussianKernel_z = new float[windowSize_z];
auto* xDistanceSquared = new float[windowSize_x];
auto* yDistanceSquared = new float[windowSize_y];
auto* zDistanceSquared = new float[windowSize_z];
for (int i = 0; i < windowSize_x; i++) {
int distance = i - halfWindowSize_x;
gaussianKernel_x[i] = exp(distance * distance * spatialExpConstant_x);
xDistanceSquared[i] = distance * distance;
}
for (int i = 0; i < windowSize_y; i++) {
int distance = i - halfWindowSize_y;
gaussianKernel_y[i] = exp(distance * distance * spatialExpConstant_y);
yDistanceSquared[i] = distance * distance;
}
for (int i = 0; i < windowSize_z; i++) {
int distance = i - halfWindowSize_z;
gaussianKernel_z[i] = exp(distance * distance * spatialExpConstant_z);
zDistanceSquared[i] = distance * distance;
}
// Writing constant memory.
cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
cudaMemcpyToSymbol(cColorStride, &desc.channelStride, sizeof(int));
cudaMemcpyToSymbol(cSizes, desc.sizes, sizeof(int) * 3);
cudaMemcpyToSymbol(cStrides, desc.strides, sizeof(int) * 3);
cudaMemcpyToSymbol(cKernelSizes, kernelSizes, sizeof(int) * desc.dimensions);
cudaMemcpyToSymbol(cHalfWindowSize_arr, halfWindowSize_arr, sizeof(int) * desc.dimensions);
cudaMemcpyToSymbol(cGaussianKernel_x, gaussianKernel_x, sizeof(float) * windowSize_x);
cudaMemcpyToSymbol(cGaussianKernel_y, gaussianKernel_y, sizeof(float) * windowSize_y);
cudaMemcpyToSymbol(cGaussianKernel_z, gaussianKernel_z, sizeof(float) * windowSize_z);
cudaMemcpyToSymbol(cXDistanceSquared, xDistanceSquared, sizeof(float) * windowSize_x);
cudaMemcpyToSymbol(cYDistanceSquared, yDistanceSquared, sizeof(float) * windowSize_y);
cudaMemcpyToSymbol(cZDistanceSquared, zDistanceSquared, sizeof(float) * windowSize_z);
cudaMemcpyToSymbol(cColorExponentConstant, &colorExpConstant, sizeof(float));
cudaMemcpyToSymbol(cSigma_x, &sigma_x, sizeof(float));
cudaMemcpyToSymbol(cSigma_y, &sigma_y, sizeof(float));
cudaMemcpyToSymbol(cSigma_z, &sigma_z, sizeof(float));
cudaMemcpyToSymbol(cColorSigma, &colorSigma, sizeof(float));
// cuda_error_check("Cuda check before kernel call.");
#define BLOCK_SIZE 32
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
inputTensor.scalar_type(), "JointBilateralFilterCudaKernel3DForward", ([&] {
JointBilateralFilterCudaKernel3DForward<scalar_t, C>
<<<dim3(int(desc.channelStride / BLOCK_SIZE) + 1, desc.batchCount), dim3(BLOCK_SIZE, 1)>>>(
inputTensor.data_ptr<scalar_t>(),
guidanceTensor.data_ptr<scalar_t>(),
outputTensor.data_ptr<scalar_t>(),
outputWeightsTensor.data_ptr<scalar_t>(),
dO_dz_ki.data_ptr<scalar_t>(),
dO_dsig_r.data_ptr<scalar_t>(),
dO_dsig_x.data_ptr<scalar_t>(),
dO_dsig_y.data_ptr<scalar_t>(),
dO_dsig_z.data_ptr<scalar_t>());
}));
// cuda_error_check("Cuda check after kernel call.");
// delete[] kernel;
delete[] kernelSizes;
delete[] gaussianKernel_x;
delete[] gaussianKernel_y;
delete[] gaussianKernel_z;
delete[] xDistanceSquared;
delete[] yDistanceSquared;
delete[] zDistanceSquared;
}
// Function to choose template implementation based on dynamic, channels and dimensions
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
JointBilateralFilterCudaForward(
torch::Tensor inputTensor,
torch::Tensor guidanceTensor,
float sigma_x,
float sigma_y,
float sigma_z,
float colorSigma) {
torch::Tensor outputTensor = torch::zeros_like(inputTensor);
torch::Tensor outputWeightsTensor = torch::zeros_like(inputTensor);
torch::Tensor dO_dz_ki = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_r = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_x = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_y = torch::zeros_like(inputTensor);
torch::Tensor dO_dsig_z = torch::zeros_like(inputTensor);
// cuda_error_check("beginning");
#define CASE(c, d) \
JointBilateralFilterCudaForwardFunction<c, d>( \
inputTensor, \
guidanceTensor, \
outputTensor, \
outputWeightsTensor, \
dO_dz_ki, \
dO_dsig_r, \
dO_dsig_x, \
dO_dsig_y, \
dO_dsig_z, \
sigma_x, \
sigma_y, \
sigma_z, \
colorSigma);
SWITCH_AB(CASE, BF_CUDA_MAX_CHANNELS, BF_CUDA_MAX_SPATIAL_DIMENSION, inputTensor.size(1), inputTensor.dim() - 2);
return {outputTensor, outputWeightsTensor, dO_dz_ki, dO_dsig_r, dO_dsig_x, dO_dsig_y, dO_dsig_z};
}
|
7a8bc7bfe19fde23fde52cd108b5b417758dad4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
} | 7a8bc7bfe19fde23fde52cd108b5b417758dad4a.cu | #include "includes.h"
__device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
} |
bcefb4904ad66e8407efdfdc08a2ac0c5b49eee5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%writefile testdims.cu
#include <cstdlib>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void gerarFrames(int width, int frames, unsigned char* pic){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int frame = 0; frame < frames; frame++) {
for (int row = index; row < width; row += stride) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
}
int main(int argc, char *argv[])
{
if (argc != 3) {
printf("ERRO: usar %s largura_frame num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
printf("ERRO: largura_frame deve ser maior igual a 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
printf("ERRO: num_frames deve ser pelo menos 1\n");
exit(-1);
}
printf("Computando %d frames de dimenso %d por %d\n", frames, width, width);
unsigned char* pic;
hipMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// inicia tempo
timeval start, end;
gettimeofday(&start, NULL);
int blockSize = 512;
int numBlocks = (width + blockSize) / blockSize;
hipLaunchKernelGGL(( gerarFrames), dim3(numBlocks), dim3(blockSize), 0, 0, width,frames, pic);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// termina o tempo
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
hipFree(pic);
return 0;
} | bcefb4904ad66e8407efdfdc08a2ac0c5b49eee5.cu | %%writefile testdims.cu
#include <cstdlib>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void gerarFrames(int width, int frames, unsigned char* pic){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int frame = 0; frame < frames; frame++) {
for (int row = index; row < width; row += stride) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
}
int main(int argc, char *argv[])
{
if (argc != 3) {
printf("ERRO: usar %s largura_frame num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
printf("ERRO: largura_frame deve ser maior igual a 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
printf("ERRO: num_frames deve ser pelo menos 1\n");
exit(-1);
}
printf("Computando %d frames de dimensão %d por %d\n", frames, width, width);
unsigned char* pic;
cudaMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// inicia tempo
timeval start, end;
gettimeofday(&start, NULL);
int blockSize = 512;
int numBlocks = (width + blockSize) / blockSize;
gerarFrames<<<numBlocks, blockSize>>>(width,frames, pic);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// termina o tempo
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
cudaFree(pic);
return 0;
} |
ed0d597428ce2a49a360a41ea72053a04fc84794.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
Example : cuda-vector-vector-multiplication-mGPU.cu
Objective : Write a CUDA Program to perform Vector Vector multiplication
using global memory implementation to be executed on multiple GPUs.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<pthread.h>
#include<error.h>
#define EPS 1.0e-12
#define GRIDSIZE 10
#define BLOCKSIZE 16
#define SIZE 128
int blocksPerGrid;
int gridsPerBlock;
struct Data
{
int deviceId;
int size;
double* a;
double* b;
double retVal;
double Tsec;
};
__global__ void vvmul(int len,double* A,double* B,double *C)
{
int tid= blockIdx.x*blockDim.x*blockDim.y + threadIdx.x +threadIdx.y * blockDim.x;
while(tid < len)
{
C[tid] = A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
}
/* Check for safe return of all calls to the devic */
void CUDA_SAFE_CALL(hipError_t call)
{
hipError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case hipSuccess:
// printf("Success\n");
break;
/* case hipErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case hipErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case hipErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,hipGetErrorString(ret));
exit(-1);
break;
}
}
}
/* Get the number of GPU devices present on the host */
int get_DeviceCount()
{
int count;
hipGetDeviceCount(&count);
return count;
}
/* Function for vector multiplication on host*/
void host_vvmul(double* A,double* B,int len,double &C)
{
int i;
for(i = 0;i <len;i++)
C += A[i]*B[i];
}
/* Function to calulate Gflops */
double calculate_gflops(double &Tsec)
{
//printf("time taken is %.8lf\n",Tsec);
double gflops=(1.0e-9 * (( 2.0 * SIZE )/Tsec));
//printf("Gflops is \t%f\n",gflops);
return gflops;
}
/* Function to display output */
void display(double* arr,int size)
{
int i;
for(i=0;i<size;i++)
printf("%f ",arr[i]);
printf("\t%d\n",i);
}
/*Function doing device related computations */
void* routine(void* givendata)
{
Data *data = (Data*)givendata;
int len = data->size;
double *a,*b,*part_c;
double *d_a,*d_b,*d_part_c;
double c;
hipEvent_t start,stop;
float elapsedTime;
a=data->a;
b=data->b;
part_c = (double*)malloc(len*sizeof(double));
CUDA_SAFE_CALL(hipSetDevice(data->deviceId));
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
CUDA_SAFE_CALL(hipMalloc((void**)&d_a,len*sizeof(double)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_b,len*sizeof(double)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_part_c,len*sizeof(double)));
CUDA_SAFE_CALL(hipMemcpy(d_a,a,len*sizeof(double),hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b,b,len*sizeof(double),hipMemcpyHostToDevice));
dim3 threadsPerBlock(16,16);
int numBlocks;
if( len /256 == 0)
numBlocks=1;
else
numBlocks = len/100;
dim3 blocksPerGrid(numBlocks ,1);
CUDA_SAFE_CALL(hipEventRecord(start,0));
hipLaunchKernelGGL(( vvmul), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, len,d_a,d_b,d_part_c);
if(hipPeekAtLastError())
printf("KERNEL ERROR: %s\t on device:%d\n",hipGetErrorString(hipPeekAtLastError()),data->deviceId);
CUDA_SAFE_CALL(hipEventRecord(stop,0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
CUDA_SAFE_CALL(hipMemcpy(part_c,d_part_c,len*sizeof(double),hipMemcpyDeviceToHost));
int ind;
for(ind=0;ind<len;ind++)
c += part_c[ind];
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime,start,stop));
data->Tsec=elapsedTime*(1.0e-3);
CUDA_SAFE_CALL(hipFree(d_a));
CUDA_SAFE_CALL(hipFree(d_b));
CUDA_SAFE_CALL(hipFree(d_part_c));
free(part_c);
data->retVal=c;
return 0;
}
void relError(double* dRes,double* hRes,int size)
{
double relativeError=0.0,errorNorm=0.0;
int flag=0;
int i;
for( i = 0; i < size; ++i) {
if (fabs(hRes[i]) > fabs(dRes[i]))
relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]);
else
relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]);
if (relativeError > EPS && relativeError != 0.0e+00 )
{
if(errorNorm < relativeError)
{
errorNorm = relativeError;
flag=1;
}
}
}
if( flag == 1)
{
printf(" \n Results verfication : Failed");
printf(" \n Considered machine precision : %e", EPS);
printf(" \n Relative Error : %e\n", errorNorm);
}
else
printf("\n Results verfication : Success\n");
}
/* prints the result in screen */
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
int main(int argc,char** argv)
{
int devCount;
CUDA_SAFE_CALL(hipGetDeviceCount(&devCount));
if(devCount < 2)
{
printf("Atleast 2 GPU's are needed :%d\n",devCount);
exit(0);
}
double *hVectA,*hVectB,hRes,host_hRes;
int vlen=SIZE;
int ind;
hVectA=(double*)malloc(vlen*sizeof(double));
hVectB=(double*)malloc(vlen*sizeof(double));
for(ind=0;ind < vlen;ind++)
{
hVectA[ind]=2.00;
hVectB[ind]=2.00;
}
Data vector[2];
vector[0].deviceId = 0;
vector[0].size =vlen/2;
vector[0].a =hVectA;
vector[0].b =hVectB;
vector[1].deviceId = 1;
vector[1].size =vlen/2;
vector[1].a =hVectA + vlen/2 ;
vector[1].b =hVectB + vlen/2 ;
pthread_t thread;
if(pthread_create(&thread,NULL,routine,(void*)&vector[0]) != 0)
perror("Thread creation error\n");
routine(&vector[1]);
pthread_join(thread,NULL);
hRes=vector[0].retVal + vector[1].retVal;
/* ---------Check result with host CPU result ---------*/
host_vvmul(hVectA,hVectB,vlen,host_hRes);
relError(&hRes,&host_hRes,1);
print_on_screen("MatMatMult_mGPU",vector[0].Tsec,calculate_gflops(vector[0].Tsec),vlen,1);
print_on_screen("MatMatMult_mGPU",vector[1].Tsec,calculate_gflops(vector[1].Tsec),vlen,1);
free(hVectA);
free(hVectB);
return 0;
}
| ed0d597428ce2a49a360a41ea72053a04fc84794.cu |
/*****************************************************************************
Example : cuda-vector-vector-multiplication-mGPU.cu
Objective : Write a CUDA Program to perform Vector Vector multiplication
using global memory implementation to be executed on multiple GPUs.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<pthread.h>
#include<error.h>
#define EPS 1.0e-12
#define GRIDSIZE 10
#define BLOCKSIZE 16
#define SIZE 128
int blocksPerGrid;
int gridsPerBlock;
struct Data
{
int deviceId;
int size;
double* a;
double* b;
double retVal;
double Tsec;
};
__global__ void vvmul(int len,double* A,double* B,double *C)
{
int tid= blockIdx.x*blockDim.x*blockDim.y + threadIdx.x +threadIdx.y * blockDim.x;
while(tid < len)
{
C[tid] = A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
}
/* Check for safe return of all calls to the devic */
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/* Get the number of GPU devices present on the host */
int get_DeviceCount()
{
int count;
cudaGetDeviceCount(&count);
return count;
}
/* Function for vector multiplication on host*/
void host_vvmul(double* A,double* B,int len,double &C)
{
int i;
for(i = 0;i <len;i++)
C += A[i]*B[i];
}
/* Function to calulate Gflops */
double calculate_gflops(double &Tsec)
{
//printf("time taken is %.8lf\n",Tsec);
double gflops=(1.0e-9 * (( 2.0 * SIZE )/Tsec));
//printf("Gflops is \t%f\n",gflops);
return gflops;
}
/* Function to display output */
void display(double* arr,int size)
{
int i;
for(i=0;i<size;i++)
printf("%f ",arr[i]);
printf("\t%d\n",i);
}
/*Function doing device related computations */
void* routine(void* givendata)
{
Data *data = (Data*)givendata;
int len = data->size;
double *a,*b,*part_c;
double *d_a,*d_b,*d_part_c;
double c;
cudaEvent_t start,stop;
float elapsedTime;
a=data->a;
b=data->b;
part_c = (double*)malloc(len*sizeof(double));
CUDA_SAFE_CALL(cudaSetDevice(data->deviceId));
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_a,len*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_b,len*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_part_c,len*sizeof(double)));
CUDA_SAFE_CALL(cudaMemcpy(d_a,a,len*sizeof(double),cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b,b,len*sizeof(double),cudaMemcpyHostToDevice));
dim3 threadsPerBlock(16,16);
int numBlocks;
if( len /256 == 0)
numBlocks=1;
else
numBlocks = len/100;
dim3 blocksPerGrid(numBlocks ,1);
CUDA_SAFE_CALL(cudaEventRecord(start,0));
vvmul<<<blocksPerGrid,threadsPerBlock>>>(len,d_a,d_b,d_part_c);
if(cudaPeekAtLastError())
printf("KERNEL ERROR: %s\t on device:%d\n",cudaGetErrorString(cudaPeekAtLastError()),data->deviceId);
CUDA_SAFE_CALL(cudaEventRecord(stop,0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaMemcpy(part_c,d_part_c,len*sizeof(double),cudaMemcpyDeviceToHost));
int ind;
for(ind=0;ind<len;ind++)
c += part_c[ind];
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime,start,stop));
data->Tsec=elapsedTime*(1.0e-3);
CUDA_SAFE_CALL(cudaFree(d_a));
CUDA_SAFE_CALL(cudaFree(d_b));
CUDA_SAFE_CALL(cudaFree(d_part_c));
free(part_c);
data->retVal=c;
return 0;
}
void relError(double* dRes,double* hRes,int size)
{
double relativeError=0.0,errorNorm=0.0;
int flag=0;
int i;
for( i = 0; i < size; ++i) {
if (fabs(hRes[i]) > fabs(dRes[i]))
relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]);
else
relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]);
if (relativeError > EPS && relativeError != 0.0e+00 )
{
if(errorNorm < relativeError)
{
errorNorm = relativeError;
flag=1;
}
}
}
if( flag == 1)
{
printf(" \n Results verfication : Failed");
printf(" \n Considered machine precision : %e", EPS);
printf(" \n Relative Error : %e\n", errorNorm);
}
else
printf("\n Results verfication : Success\n");
}
/* prints the result in screen */
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
int main(int argc,char** argv)
{
int devCount;
CUDA_SAFE_CALL(cudaGetDeviceCount(&devCount));
if(devCount < 2)
{
printf("Atleast 2 GPU's are needed :%d\n",devCount);
exit(0);
}
double *hVectA,*hVectB,hRes,host_hRes;
int vlen=SIZE;
int ind;
hVectA=(double*)malloc(vlen*sizeof(double));
hVectB=(double*)malloc(vlen*sizeof(double));
for(ind=0;ind < vlen;ind++)
{
hVectA[ind]=2.00;
hVectB[ind]=2.00;
}
Data vector[2];
vector[0].deviceId = 0;
vector[0].size =vlen/2;
vector[0].a =hVectA;
vector[0].b =hVectB;
vector[1].deviceId = 1;
vector[1].size =vlen/2;
vector[1].a =hVectA + vlen/2 ;
vector[1].b =hVectB + vlen/2 ;
pthread_t thread;
if(pthread_create(&thread,NULL,routine,(void*)&vector[0]) != 0)
perror("Thread creation error\n");
routine(&vector[1]);
pthread_join(thread,NULL);
hRes=vector[0].retVal + vector[1].retVal;
/* ---------Check result with host CPU result ---------*/
host_vvmul(hVectA,hVectB,vlen,host_hRes);
relError(&hRes,&host_hRes,1);
print_on_screen("MatMatMult_mGPU",vector[0].Tsec,calculate_gflops(vector[0].Tsec),vlen,1);
print_on_screen("MatMatMult_mGPU",vector[1].Tsec,calculate_gflops(vector[1].Tsec),vlen,1);
free(hVectA);
free(hVectB);
return 0;
}
|
5440646cc3b96a32dd4ab310c43c4f1c79fbff32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
} | 5440646cc3b96a32dd4ab310c43c4f1c79fbff32.cu | #include "includes.h"
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
} |
b03d1c7a0f44784e8474308d2752577ab491d49a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
#define MEM_OFFSET gridDim.x*blockDim.x //the number of threads in the grid
#define BACK(x,y) back[startPosA[blockThread] + ( ( ((y) + 8) / 8) * rowWidth + (x) + 1 ) * MEM_OFFSET]
#define Y_STEPS 8
#define BLOCK_SIZE 128 //128 //blockDim.x
#define INT_INT -2147483647
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
//calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction)
//rowWidth=640
__global__ void Calculate_ben(int size, char *data, NUM_ADD * num_add, short4 *result, int2* AF_maxXY, unsigned int* back, short rowWidth)
{
int offset=blockIdx.x*blockDim.x+threadIdx.x;
short2 lengthXY;
char * read_base_array;
char4 * reference_base_array;
int mismatch;
int match;
int gapOp;
int gapEx;
__shared__ int startPosA[128]; //blockDim.x
while(offset<size)
{
int blockThread=threadIdx.x;
//if(offset>=2943) printf("id=%d\n",offset);
match=200;
mismatch=-150;
gapOp=-260;
gapEx=-11;
lengthXY=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(lengthXY.x+127)/128*128);
// direction_index=(short2 *)(direction+offset*640*1100);
startPosA[threadIdx.x] = offset; // startPosA
//startPosA threadgrid
// printf("%d %d\n", lengthXY.x, lengthXY.y);
//
//initialization of the -1 row in A matrix
// - 2 bytes for element of A matrix
// - 2 bytes for element of F matrix
//AF_maxXY
for(short x = 0; x < lengthXY.x; x++)
{
int2 tmp;
//(x + 1) because the first element should be -gapEx
tmp.x = 0;
tmp.y = INT_INT - gapEx;
AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET ] = tmp; //because of this operation, the total number of threads in the grid should be greater than size. Otherwise, AF would re-written.
//MEM_OFFSET gridthread
//AF_maxXY[startPosA[blockThread] + x * MEM_OFFSET] = tmp;
//fill the -1 row of "back" array
BACK(x,-1) = 9; //0000 0000 0000 0000 0000 0000 0000 1001 == 9
}
// if(offset>=2943) printf("id=%d\n",offset);
// printf("%d %d\n", lengthXY.x, lengthXY.y);
//fill the -1 column of "back" array
for(short y = 0; y < lengthXY.y; y+=Y_STEPS)
{
// if(offset>=2943) printf("id=%d %d %d\n",offset,y,startPosA[threadIdx.x] + ( ( ((y) + 8) / 8) * rowWidth + (-1) + 1 ) * MEM_OFFSET);
BACK(-1,y) = 1717986918; //0110 0110 0110 0110 0110 0110 0110 0110 = 1717986918
}
BACK(-1,-1) = 0; //stop element
//one element of AE_shared consist of:
// - one A element
// - one E element
__shared__ int2 AE_shared[Y_STEPS][BLOCK_SIZE];
//elements of Y sequence go to sharedYSeq
__shared__ char4 sharedYSeq[Y_STEPS/4][BLOCK_SIZE];
int2 AF_current;
AF_current.x = 0;
__shared__ int2 ymin_score[BLOCK_SIZE]; //stores ymin and score
ymin_score[threadIdx.x].y = 0;
__shared__ short4 maxXY[BLOCK_SIZE];
maxXY[threadIdx.x].x = lengthXY.x - 1;
maxXY[threadIdx.x].y = 0;
maxXY[threadIdx.x].z = 0;
// |
// |
// |
// V
for (short y = 0; y < lengthXY.y; y += Y_STEPS)
{
//printf("%d\n",y);
int2 A_init_upleft;
A_init_upleft.x = 0;
//initialialization of the -1 column in A matrix
// - one element of A matrix
// - one element of E matrix
for (short i = 0; i < Y_STEPS; i++)
{
int2 tmp;
tmp.x = 0;
tmp.y = INT_INT - gapEx;
AE_shared[i][threadIdx.x] = tmp;
}
//we read elements of the Y sequence
for (short i = 0; i < Y_STEPS/4; i++)
{
sharedYSeq[i][threadIdx.x] = reference_base_array[y/4+i];
//PACK_BYTES(tex1Dfetch(texSeqsY, startY + y + i*4 + 0),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 1),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 2),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 3));
//printf("%c %c %c %c\n", sharedYSeq[i][threadIdx.x].x,sharedYSeq[i][threadIdx.x].y,sharedYSeq[i][threadIdx.x].z,sharedYSeq[i][threadIdx.x].w);
}
ymin_score[threadIdx.x].x = min(Y_STEPS, lengthXY.y - y); //(i < Y_STEPS) && (i + y < lengthY)
//------>
for (short x = 0; x < lengthXY.x; x++)
{
//actual up_left gets a value of recent read value from the global memory
//and actual read value is stored in first two bites of A_upleft
A_init_upleft.y = A_init_upleft.x;
char2 XYSeq;
XYSeq.x = read_base_array[x];
// if(y==0) printf("%c\n",XYSeq.x);
//read from global memory
int2 AF_up = AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET];
//A_init -> up element read in previous iteration from global memory (up-left)
A_init_upleft.x = AF_up.x;
int2 AE_left;
int E_current;
int similarity;
unsigned int back8 = 0;
// | /| /|
// | / | / |
// |/ |/ V
// | /| /|
// | / | / |
// |/ |/ V
for(short i = 0; i < ymin_score[threadIdx.x].x; i++)
{
AE_left = AE_shared[i][threadIdx.x];
// XYSeq.y = sharedYSeq[i/4][threadIdx.x].x,y,z,w;
if(i%4==0)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].x;
if(i%4==1)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].y;
if(i%4==2)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].z;
if(i%4==3)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].w;
//(sharedYSeq[i/4][threadIdx.x] >> (((15-i)%4) * 8)) & 0xFF;
//similarity = substitutionMatrix[XYSeq.y*lettersCount + XYSeq.x];
similarity = (XYSeq.x==XYSeq.y? match:mismatch);
similarity += A_init_upleft.y;
E_current = max(AE_left.y + gapEx, AE_left.x + gapOp);
AF_current.y = max(AF_up.y + gapEx, AF_up.x + gapOp);
AF_current.x = max(E_current, AF_current.y);
AF_current.x = max(AF_current.x, similarity);
//"back" array
back8 <<= 1;
//back8 |= ((AF_current.x==E_current) && (AF_current.x!=AF_current.y)) || (AF_current.x==similarity); //if go left
back8 |= (AF_current.x==E_current) || (AF_current.x==similarity); //if go left
back8 <<= 1;
//back8 |= (AF_current.x==AF_current.y) || (AF_current.x==similarity); //if go up
back8 |=( (AF_current.x==AF_current.y)&& (AF_current.x!=E_current)) || (AF_current.x==similarity); //if go up
back8 <<= 1;
back8 |= (AF_current.y == (AF_up.y + gapEx)); //if continue up
back8 <<= 1;
back8 |= (E_current == (AE_left.y + gapEx)); //if continue left
//initialize variables for next iterations
int2 AE_tmp;
AE_tmp.x = AF_current.x;
AE_tmp.y = E_current;
AE_shared[i][threadIdx.x] = AE_tmp;
A_init_upleft.y = AE_left.x;
AF_up = AF_current;
// printf("%d ",AF_current.x);
} //end of i
//printf("\n");
//we want the last row of back8 to be completed
back8 <<= 4 * (Y_STEPS - ymin_score[threadIdx.x].x);
//write variables to global memory for next loop
AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET] = AF_current;
BACK(x,y) = back8;
//looking for max element in the last row
if( (y + ymin_score[threadIdx.x].x) == lengthXY.y )
{
if (AF_current.x > ymin_score[threadIdx.x].y)
{
maxXY[threadIdx.x].x = x;
maxXY[threadIdx.x].y = y + ymin_score[threadIdx.x].x - 1; //why minus 1???? Because 0+8=8,it should be 7.
maxXY[threadIdx.x].z=lengthXY.x-1-x;
}
//if result== last row
//result4.x=read_reference_number.y-1;
//result4.y=result_row_index;///result_row_index is the threadIdx.x,which is x.
//result4.z=read_reference_number.x-1-result_row_index;
ymin_score[threadIdx.x].y = max(ymin_score[threadIdx.x].y, AF_current.x);
}
} //end of x
//looking for max element in the last column
for(short i = 0; i < ymin_score[threadIdx.x].x; i++)
{
if (AE_shared[i][threadIdx.x].x > ymin_score[threadIdx.x].y||AE_shared[i][threadIdx.x].x==ymin_score[threadIdx.x].y&& maxXY[threadIdx.x].z>(lengthXY.y-(y+i)-1))
{
maxXY[threadIdx.x].x = lengthXY.x - 1; //
maxXY[threadIdx.x].y = y + i;
maxXY[threadIdx.x].z=0;
}
//result4.x=result_col_index; //result_col_index is the y.
//result4.y=read_reference_number.x-1;
//result4.z=0;
ymin_score[threadIdx.x].y = max(ymin_score[threadIdx.x].y, AE_shared[i][threadIdx.x].x);
}
}//end of y
// maxXY[threadIdx.x].w=ymin_score[threadIdx.x].y;
//here write result (AF_current) to global memory
// scores[startPosA[blockThread]] = ymin_score[blockThread].y;
// AF_maxXY[startPosA[threadIdx.x]] = maxXY[threadIdx.x];
result[offset]=maxXY[threadIdx.x];
// printf("%d %d %d %d %d\n",offset,result[offset].x,result[offset].y,result[offset].z,result[offset].w);
offset+=gridDim.x*blockDim.x;
}
}
#undef BACK
#define BACK(x,y) back[startPosA + ( ( ((y) + 8) / 8) * rowWidth + (x) + 1 ) * MEM_OFFSET]
#define STOP 0
#define UP 4
#define LEFT 8
#define CROSSWISE 12
#define DIRECTION 12
#define CONTIN_UP 2
#define CONTIN_LEFT 1
#define ELEMENT 15
#define ININTI 3
//calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //,
__global__ void Calculate_ben_back(int size, short4 * result, char * cigar, int *cigar_int, unsigned int* back, short rowWidth)
{
int offset=blockIdx.x*blockDim.x+threadIdx.x;
while(offset<size)
{
char * cigar_store;
int * cigar_int_store;
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
int segment_length;
//startPosA == thread number within whole grid
int startPosA = offset;
short4 myMaxXY = result[startPosA];
short2 indexXY;
indexXY.x=myMaxXY.x;
indexXY.y=myMaxXY.y;
int cigar_index=0;
if(myMaxXY.z>0)
{
cigar_store[cigar_index]='S';
cigar_int_store[cigar_index]=myMaxXY.z;
cigar_index++;
}
segment_length=0;
unsigned int back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4;
unsigned char back1 = back8 & ELEMENT; //current element of back array
back8 >>= 4;
unsigned char prevDirection = ININTI;// 1100 == 12 =>crosswise
unsigned todo;
//back 1 is the current element of back array
while(indexXY.x>=0 && indexXY.y>=0)//(back1 & DIRECTION) //while(direction != STOP)
{
if( ((prevDirection & DIRECTION) == UP) && (prevDirection & CONTIN_UP) )
{
todo = UP;
}
else if( ((prevDirection & DIRECTION) == LEFT) && (prevDirection & CONTIN_LEFT) )
{
todo = LEFT;
}
else if ((back1 & DIRECTION) == UP)
{
todo = UP;
}
else if ((back1 & DIRECTION) == LEFT)
{
todo = LEFT;
}
else //if (back1 & DIRECTION == CROSSWISE)
{
todo = CROSSWISE;
}
if(prevDirection==ININTI) prevDirection=todo;
if((prevDirection & DIRECTION)==todo)
{
segment_length++;
}
else
{
//printf(" prevDirectio= %d todo=%d\n",prevDirection,todo);
//if(prevDirection==LEFT);
cigar_store[cigar_index]=(prevDirection & DIRECTION);//'D'; //I D M????????
//if(prevDirection==UP)
// cigar_store[cigar_index]=UP;//'I'; //I D M????????
//if(prevDirection==CROSSWISE)
// cigar_store[cigar_index]=CROSSWISE;//'M'; //I D M????????
cigar_int_store[cigar_index]=segment_length;
cigar_index++;
segment_length=1;
prevDirection=todo;
}
if (todo == LEFT)
{
indexXY.x--;
back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4; //because of the last row of back array
}
else if (todo == UP)
{
indexXY.y--;
if((indexXY.y % 8) == 7)
back8 = BACK(indexXY.x, indexXY.y); //since up direction, 8 elements stored in the same int.
}
else //if (todo == CROSSWISE)
{
indexXY.x--;
indexXY.y--;
back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4; //because of the last row of back array
}
prevDirection = todo | back1&3; //Here, back1 is used to calculate preDirection.
//printf("prevDirection=%d %d %d \n",prevDirection,indexXY.x,indexXY.y);
back1 = back8 & ELEMENT;
back8 >>= 4;
}
//maybe S
//**********
cigar_store[cigar_index]=todo;
cigar_int_store[cigar_index]=segment_length;
cigar_index++;
// printf("%d\n",cigar_index);
if(indexXY.x>=0)
{
cigar_store[cigar_index]='S';
cigar_int_store[cigar_index]=indexXY.x+1;
cigar_index++;
}
myMaxXY.z=indexXY.x+1;
myMaxXY.w=cigar_index;
result[offset]=myMaxXY;
offset+=blockDim.x*gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
uint64_t total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
char data[200][1000]; //* Here, we read in 100 sequences.
for(int i=0;i<1;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{
//Here is the sequences pairs.
int index=0;
InputData * inputdata=(InputData* )malloc(100*(sizeof(InputData)));
for(int i=0;i<1;i++)
for(int j=0;j<1;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
// printf("%s\n",inputdata[index].reference_base);
// printf("%s\n",inputdata[index].read_base);
index++;
}
for(int j=1;j<99;j++)
{
strcpy(inputdata[j].reference_base,inputdata[0].reference_base);
strcpy(inputdata[j].read_base,inputdata[0].read_base);
}
size=100;
//data preparation.
//we put all the sequence pairs into a char* array
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
// printf("total size=%d\n",(size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
short * result_h=(short*) malloc(sizeof(short)*size*4);
// printf("%d\n",sizeof(short)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128); //Here the length of alignment is 128
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); //Here the length of alignment is 128
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
//printf("i=%d total_size=%d",i,total_size);
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
hipError_t err;
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
short4 * result_d=(short4 *) (data_d_total+data_size_to_copy);
//printf("data size to copy=%d\n",data_size_to_copy);
int blocksize=128;
dim3 block(blocksize);
dim3 grid((size+blocksize-1)/blocksize); //size/blocksize
char * cigar;
err=hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
if (err != hipSuccess)
{
printf("1 1 1 1 %s", hipGetErrorString(err));
}
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
unsigned int * direction;
int2 * AF_maxXY;
err=hipMalloc((int2 **)& AF_maxXY, 640*sizeof(int2)*(size+blocksize-1)/blocksize*blocksize);// vector
if (err != hipSuccess)
{
printf("2 23 %s", hipGetErrorString(err));
}
// hipMalloc( (unsigned int **) & direction, size * (640*640* sizeof (unsigned int)));
// hipMalloc( (unsigned int **) & direction, 640*640* sizeof (unsigned int)*(size+blocksize-1)/blocksize*blocksize);
err=hipMalloc( (unsigned int **) & direction, 640*(640/8)* sizeof (unsigned int)*(size+blocksize-1)/blocksize*blocksize);
if (err != hipSuccess)
{
printf("3 %s", hipGetErrorString(err));
}
hipLaunchKernelGGL(( Calculate_ben), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,AF_maxXY, direction, 640); //result
//Calculate_ben(int size, char *data, NUM_ADD * num_add, short4 *result, int2* AF_maxXY, unsigned int* back, short rowWidth)
hipDeviceSynchronize();
// Calculate_ben_back(int size, short4 * result, char * cigar, int *cigar_int, unsigned int* back, short rowWidth)
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipLaunchKernelGGL(( Calculate_ben_back), dim3(grid),dim3(block), 0, 0, size,result_d,cigar,cigar_int,direction,640); //result
// printf("%d\n", size*sizeof(short4));
hipDeviceSynchronize();
// hipMemcpy(result_h,result_d,size*sizeof(short4),hipMemcpyDeviceToHost);
// hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost);
// hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
// printf("%d\n",result_h[i*4]);
// printf("%d\n",result_h[i*4+1]);
// printf("%d\n",result_h[i*4+2]);
// printf("%d\n",result_h[i*4+3]);
printf("[");
for(int j=result_h[i*4+3]-1;j>=0;j--)
{
printf("%d",cigar_int_h[128*i+j]);
if(cigar_h[128*i+j]==UP)
printf("%c",'D');
if(cigar_h[128*i+j]==LEFT)
printf("%c",'I');
if(cigar_h[128*i+j]==CROSSWISE)
printf("%c",'M');
if(cigar_h[128*i+j]=='S')
printf("%c",'S');
if(j!=0) printf(", ");
}
printf("]\n");
}
*/
hipFree(AF_maxXY);
hipFree(direction);
free(data_h_total);
hipFree(data_d_total);
free(inputdata);
hipFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( total_size)/computation_time/1000000000);
return 0;
}
#undef STOP
#undef UP
#undef LEFT
#undef CROSSWISE
#undef DIRECTION
#undef CONTIN_UP
#undef CONTIN_LEFT
#undef ELEMENT
| b03d1c7a0f44784e8474308d2752577ab491d49a.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
#define MEM_OFFSET gridDim.x*blockDim.x //the number of threads in the grid
#define BACK(x,y) back[startPosA[blockThread] + ( ( ((y) + 8) / 8) * rowWidth + (x) + 1 ) * MEM_OFFSET]
#define Y_STEPS 8
#define BLOCK_SIZE 128 //128 //blockDim.x
#define INT_INT -2147483647
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
//calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction)
//rowWidth=640
__global__ void Calculate_ben(int size, char *data, NUM_ADD * num_add, short4 *result, int2* AF_maxXY, unsigned int* back, short rowWidth)
{
int offset=blockIdx.x*blockDim.x+threadIdx.x;
short2 lengthXY;
char * read_base_array;
char4 * reference_base_array;
int mismatch;
int match;
int gapOp;
int gapEx;
__shared__ int startPosA[128]; //blockDim.x
while(offset<size)
{
int blockThread=threadIdx.x;
//if(offset>=2943) printf("id=%d\n",offset);
match=200;
mismatch=-150;
gapOp=-260;
gapEx=-11;
lengthXY=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(lengthXY.x+127)/128*128);
// direction_index=(short2 *)(direction+offset*640*1100);
startPosA[threadIdx.x] = offset; // startPosA 是放的是什么?
//startPosA 在原程序中是代表 thread在grid中的编号
// printf("%d %d\n", lengthXY.x, lengthXY.y);
//下面是初始化
//initialization of the -1 row in A matrix
// - 2 bytes for element of A matrix
// - 2 bytes for element of F matrix
//还是不知道AF_maxXY 是放什么的?
for(short x = 0; x < lengthXY.x; x++)
{
int2 tmp;
//(x + 1) because the first element should be -gapEx
tmp.x = 0;
tmp.y = INT_INT - gapEx;
AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET ] = tmp; //because of this operation, the total number of threads in the grid should be greater than size. Otherwise, AF would re-written.
//MEM_OFFSET在原程序中是 grid的总的thread的个数。
//AF_maxXY[startPosA[blockThread] + x * MEM_OFFSET] = tmp;
//fill the -1 row of "back" array
BACK(x,-1) = 9; //0000 0000 0000 0000 0000 0000 0000 1001 == 9
}
// if(offset>=2943) printf("id=%d\n",offset);
// printf("%d %d\n", lengthXY.x, lengthXY.y);
//fill the -1 column of "back" array
for(short y = 0; y < lengthXY.y; y+=Y_STEPS)
{
// if(offset>=2943) printf("id=%d %d %d\n",offset,y,startPosA[threadIdx.x] + ( ( ((y) + 8) / 8) * rowWidth + (-1) + 1 ) * MEM_OFFSET);
BACK(-1,y) = 1717986918; //0110 0110 0110 0110 0110 0110 0110 0110 = 1717986918
}
BACK(-1,-1) = 0; //stop element
//one element of AE_shared consist of:
// - one A element
// - one E element
__shared__ int2 AE_shared[Y_STEPS][BLOCK_SIZE];
//elements of Y sequence go to sharedYSeq
__shared__ char4 sharedYSeq[Y_STEPS/4][BLOCK_SIZE];
int2 AF_current;
AF_current.x = 0;
__shared__ int2 ymin_score[BLOCK_SIZE]; //stores ymin and score
ymin_score[threadIdx.x].y = 0;
__shared__ short4 maxXY[BLOCK_SIZE];
maxXY[threadIdx.x].x = lengthXY.x - 1;
maxXY[threadIdx.x].y = 0;
maxXY[threadIdx.x].z = 0;
// |
// |
// |
// V
for (short y = 0; y < lengthXY.y; y += Y_STEPS)
{
//printf("%d\n",y);
int2 A_init_upleft;
A_init_upleft.x = 0;
//initialialization of the -1 column in A matrix
// - one element of A matrix
// - one element of E matrix
for (short i = 0; i < Y_STEPS; i++)
{
int2 tmp;
tmp.x = 0;
tmp.y = INT_INT - gapEx;
AE_shared[i][threadIdx.x] = tmp;
}
//we read elements of the Y sequence
for (short i = 0; i < Y_STEPS/4; i++)
{
sharedYSeq[i][threadIdx.x] = reference_base_array[y/4+i];
//PACK_BYTES(tex1Dfetch(texSeqsY, startY + y + i*4 + 0),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 1),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 2),
// tex1Dfetch(texSeqsY, startY + y + i*4 + 3));
//printf("%c %c %c %c\n", sharedYSeq[i][threadIdx.x].x,sharedYSeq[i][threadIdx.x].y,sharedYSeq[i][threadIdx.x].z,sharedYSeq[i][threadIdx.x].w);
}
ymin_score[threadIdx.x].x = min(Y_STEPS, lengthXY.y - y); //(i < Y_STEPS) && (i + y < lengthY)
//------>
for (short x = 0; x < lengthXY.x; x++)
{
//actual up_left gets a value of recent read value from the global memory
//and actual read value is stored in first two bites of A_upleft
A_init_upleft.y = A_init_upleft.x;
char2 XYSeq;
XYSeq.x = read_base_array[x];
// if(y==0) printf("%c\n",XYSeq.x);
//read from global memory
int2 AF_up = AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET];
//A_init -> up element read in previous iteration from global memory (up-left)
A_init_upleft.x = AF_up.x;
int2 AE_left;
int E_current;
int similarity;
unsigned int back8 = 0;
// | /| /|
// | / | / |
// |/ |/ V
// | /| /|
// | / | / |
// |/ |/ V
for(short i = 0; i < ymin_score[threadIdx.x].x; i++)
{
AE_left = AE_shared[i][threadIdx.x];
// XYSeq.y = sharedYSeq[i/4][threadIdx.x].x,y,z,w;
if(i%4==0)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].x;
if(i%4==1)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].y;
if(i%4==2)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].z;
if(i%4==3)
XYSeq.y = sharedYSeq[i/4][threadIdx.x].w;
//(sharedYSeq[i/4][threadIdx.x] >> (((15-i)%4) * 8)) & 0xFF;
//similarity = substitutionMatrix[XYSeq.y*lettersCount + XYSeq.x];
similarity = (XYSeq.x==XYSeq.y? match:mismatch);
similarity += A_init_upleft.y;
E_current = max(AE_left.y + gapEx, AE_left.x + gapOp);
AF_current.y = max(AF_up.y + gapEx, AF_up.x + gapOp);
AF_current.x = max(E_current, AF_current.y);
AF_current.x = max(AF_current.x, similarity);
//"back" array
back8 <<= 1;
//back8 |= ((AF_current.x==E_current) && (AF_current.x!=AF_current.y)) || (AF_current.x==similarity); //if go left
back8 |= (AF_current.x==E_current) || (AF_current.x==similarity); //if go left
back8 <<= 1;
//back8 |= (AF_current.x==AF_current.y) || (AF_current.x==similarity); //if go up
back8 |=( (AF_current.x==AF_current.y)&& (AF_current.x!=E_current)) || (AF_current.x==similarity); //if go up
back8 <<= 1;
back8 |= (AF_current.y == (AF_up.y + gapEx)); //if continue up
back8 <<= 1;
back8 |= (E_current == (AE_left.y + gapEx)); //if continue left
//initialize variables for next iterations
int2 AE_tmp;
AE_tmp.x = AF_current.x;
AE_tmp.y = E_current;
AE_shared[i][threadIdx.x] = AE_tmp;
A_init_upleft.y = AE_left.x;
AF_up = AF_current;
// printf("%d ",AF_current.x);
} //end of i
//printf("\n");
//we want the last row of back8 to be completed
back8 <<= 4 * (Y_STEPS - ymin_score[threadIdx.x].x);
//write variables to global memory for next loop
AF_maxXY[startPosA[threadIdx.x] + x * MEM_OFFSET] = AF_current;
BACK(x,y) = back8;
//looking for max element in the last row
if( (y + ymin_score[threadIdx.x].x) == lengthXY.y )
{
if (AF_current.x > ymin_score[threadIdx.x].y)
{
maxXY[threadIdx.x].x = x;
maxXY[threadIdx.x].y = y + ymin_score[threadIdx.x].x - 1; //why minus 1???? Because 0+8=8,it should be 7.
maxXY[threadIdx.x].z=lengthXY.x-1-x;
}
//if result== last row
//result4.x=read_reference_number.y-1;
//result4.y=result_row_index;///result_row_index is the threadIdx.x,which is x.
//result4.z=read_reference_number.x-1-result_row_index;
ymin_score[threadIdx.x].y = max(ymin_score[threadIdx.x].y, AF_current.x);
}
} //end of x
//looking for max element in the last column
for(short i = 0; i < ymin_score[threadIdx.x].x; i++)
{
if (AE_shared[i][threadIdx.x].x > ymin_score[threadIdx.x].y||AE_shared[i][threadIdx.x].x==ymin_score[threadIdx.x].y&& maxXY[threadIdx.x].z>(lengthXY.y-(y+i)-1))
{
maxXY[threadIdx.x].x = lengthXY.x - 1; //
maxXY[threadIdx.x].y = y + i;
maxXY[threadIdx.x].z=0;
}
//result4.x=result_col_index; //result_col_index is the y.
//result4.y=read_reference_number.x-1;
//result4.z=0;
ymin_score[threadIdx.x].y = max(ymin_score[threadIdx.x].y, AE_shared[i][threadIdx.x].x);
}
}//end of y
// maxXY[threadIdx.x].w=ymin_score[threadIdx.x].y;
//here write result (AF_current) to global memory
// scores[startPosA[blockThread]] = ymin_score[blockThread].y;
// AF_maxXY[startPosA[threadIdx.x]] = maxXY[threadIdx.x];
result[offset]=maxXY[threadIdx.x];
// printf("%d %d %d %d %d\n",offset,result[offset].x,result[offset].y,result[offset].z,result[offset].w);
offset+=gridDim.x*blockDim.x;
}
}
#undef BACK
#define BACK(x,y) back[startPosA + ( ( ((y) + 8) / 8) * rowWidth + (x) + 1 ) * MEM_OFFSET]
#define STOP 0
#define UP 4
#define LEFT 8
#define CROSSWISE 12
#define DIRECTION 12
#define CONTIN_UP 2
#define CONTIN_LEFT 1
#define ELEMENT 15
#define ININTI 3
//calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //,
__global__ void Calculate_ben_back(int size, short4 * result, char * cigar, int *cigar_int, unsigned int* back, short rowWidth)
{
int offset=blockIdx.x*blockDim.x+threadIdx.x;
while(offset<size)
{
char * cigar_store;
int * cigar_int_store;
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
int segment_length;
//startPosA == thread number within whole grid
int startPosA = offset;
short4 myMaxXY = result[startPosA];
short2 indexXY;
indexXY.x=myMaxXY.x;
indexXY.y=myMaxXY.y;
int cigar_index=0;
if(myMaxXY.z>0)
{
cigar_store[cigar_index]='S';
cigar_int_store[cigar_index]=myMaxXY.z;
cigar_index++;
}
segment_length=0;
unsigned int back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4;
unsigned char back1 = back8 & ELEMENT; //current element of back array
back8 >>= 4;
unsigned char prevDirection = ININTI;// 1100 == 12 =>crosswise
unsigned todo;
//back 1 is the current element of back array
while(indexXY.x>=0 && indexXY.y>=0)//(back1 & DIRECTION) //while(direction != STOP)
{
if( ((prevDirection & DIRECTION) == UP) && (prevDirection & CONTIN_UP) )
{
todo = UP;
}
else if( ((prevDirection & DIRECTION) == LEFT) && (prevDirection & CONTIN_LEFT) )
{
todo = LEFT;
}
else if ((back1 & DIRECTION) == UP)
{
todo = UP;
}
else if ((back1 & DIRECTION) == LEFT)
{
todo = LEFT;
}
else //if (back1 & DIRECTION == CROSSWISE)
{
todo = CROSSWISE;
}
if(prevDirection==ININTI) prevDirection=todo;
if((prevDirection & DIRECTION)==todo)
{
segment_length++;
}
else
{
//printf(" prevDirectio= %d todo=%d\n",prevDirection,todo);
//if(prevDirection==LEFT);
cigar_store[cigar_index]=(prevDirection & DIRECTION);//'D'; //I D M????????
//if(prevDirection==UP)
// cigar_store[cigar_index]=UP;//'I'; //I D M????????
//if(prevDirection==CROSSWISE)
// cigar_store[cigar_index]=CROSSWISE;//'M'; //I D M????????
cigar_int_store[cigar_index]=segment_length;
cigar_index++;
segment_length=1;
prevDirection=todo;
}
if (todo == LEFT)
{
indexXY.x--;
back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4; //because of the last row of back array
}
else if (todo == UP)
{
indexXY.y--;
if((indexXY.y % 8) == 7)
back8 = BACK(indexXY.x, indexXY.y); //since up direction, 8 elements stored in the same int.
}
else //if (todo == CROSSWISE)
{
indexXY.x--;
indexXY.y--;
back8 = BACK(indexXY.x, indexXY.y);
back8 >>= ((8 - ((indexXY.y + 1) % 8)) % 8) * 4; //because of the last row of back array
}
prevDirection = todo | back1&3; //Here, back1 is used to calculate preDirection.
//printf("prevDirection=%d %d %d \n",prevDirection,indexXY.x,indexXY.y);
back1 = back8 & ELEMENT;
back8 >>= 4;
}
//maybe S
//**********
cigar_store[cigar_index]=todo;
cigar_int_store[cigar_index]=segment_length;
cigar_index++;
// printf("%d\n",cigar_index);
if(indexXY.x>=0)
{
cigar_store[cigar_index]='S';
cigar_int_store[cigar_index]=indexXY.x+1;
cigar_index++;
}
myMaxXY.z=indexXY.x+1;
myMaxXY.w=cigar_index;
result[offset]=myMaxXY;
offset+=blockDim.x*gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
uint64_t total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
char data[200][1000]; //* Here, we read in 100 sequences.
for(int i=0;i<1;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{
//Here is the sequences pairs.
int index=0;
InputData * inputdata=(InputData* )malloc(100*(sizeof(InputData)));
for(int i=0;i<1;i++)
for(int j=0;j<1;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
// printf("%s\n",inputdata[index].reference_base);
// printf("%s\n",inputdata[index].read_base);
index++;
}
for(int j=1;j<99;j++)
{
strcpy(inputdata[j].reference_base,inputdata[0].reference_base);
strcpy(inputdata[j].read_base,inputdata[0].read_base);
}
size=100;
//data preparation.
//we put all the sequence pairs into a char* array
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
// printf("total size=%d\n",(size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
short * result_h=(short*) malloc(sizeof(short)*size*4);
// printf("%d\n",sizeof(short)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128); //Here the length of alignment is 128
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); //Here the length of alignment is 128
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
//printf("i=%d total_size=%d",i,total_size);
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
cudaError_t err;
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
short4 * result_d=(short4 *) (data_d_total+data_size_to_copy);
//printf("data size to copy=%d\n",data_size_to_copy);
int blocksize=128;
dim3 block(blocksize);
dim3 grid((size+blocksize-1)/blocksize); //size/blocksize
char * cigar;
err=cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
if (err != cudaSuccess)
{
printf("1 1 1 1 %s", cudaGetErrorString(err));
}
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
unsigned int * direction;
int2 * AF_maxXY;
err=cudaMalloc((int2 **)& AF_maxXY, 640*sizeof(int2)*(size+blocksize-1)/blocksize*blocksize);// vector
if (err != cudaSuccess)
{
printf("2 23 %s", cudaGetErrorString(err));
}
// cudaMalloc( (unsigned int **) & direction, size * (640*640* sizeof (unsigned int)));
// cudaMalloc( (unsigned int **) & direction, 640*640* sizeof (unsigned int)*(size+blocksize-1)/blocksize*blocksize);
err=cudaMalloc( (unsigned int **) & direction, 640*(640/8)* sizeof (unsigned int)*(size+blocksize-1)/blocksize*blocksize);
if (err != cudaSuccess)
{
printf("3 %s", cudaGetErrorString(err));
}
Calculate_ben<<<grid,block>>> (size,data_d,num_add_d,result_d,AF_maxXY, direction, 640); //result
//Calculate_ben(int size, char *data, NUM_ADD * num_add, short4 *result, int2* AF_maxXY, unsigned int* back, short rowWidth)
cudaDeviceSynchronize();
// Calculate_ben_back(int size, short4 * result, char * cigar, int *cigar_int, unsigned int* back, short rowWidth)
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
Calculate_ben_back<<<grid,block>>> (size,result_d,cigar,cigar_int,direction,640); //result
// printf("%d\n", size*sizeof(short4));
cudaDeviceSynchronize();
// cudaMemcpy(result_h,result_d,size*sizeof(short4),cudaMemcpyDeviceToHost);
// cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost);
// cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
// printf("%d\n",result_h[i*4]);
// printf("%d\n",result_h[i*4+1]);
// printf("%d\n",result_h[i*4+2]);
// printf("%d\n",result_h[i*4+3]);
printf("[");
for(int j=result_h[i*4+3]-1;j>=0;j--)
{
printf("%d",cigar_int_h[128*i+j]);
if(cigar_h[128*i+j]==UP)
printf("%c",'D');
if(cigar_h[128*i+j]==LEFT)
printf("%c",'I');
if(cigar_h[128*i+j]==CROSSWISE)
printf("%c",'M');
if(cigar_h[128*i+j]=='S')
printf("%c",'S');
if(j!=0) printf(", ");
}
printf("]\n");
}
*/
cudaFree(AF_maxXY);
cudaFree(direction);
free(data_h_total);
cudaFree(data_d_total);
free(inputdata);
cudaFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( total_size)/computation_time/1000000000);
return 0;
}
#undef STOP
#undef UP
#undef LEFT
#undef CROSSWISE
#undef DIRECTION
#undef CONTIN_UP
#undef CONTIN_LEFT
#undef ELEMENT
|
61571303b6c7444f60e0f6a13cf7397af078ca64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/resize_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
Dtype roi_start_w = 0.0; roi_start_w *= spatial_scale;
Dtype roi_start_h = 0.0;roi_start_h*= spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(width); roi_end_w*= spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(height); roi_end_h*= spatial_scale;
const Dtype* bottom_data_cur = bottom_data + (n*channels+c)* height * width;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype h_cur = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype w_cur = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
/*
//int hstart = static_cast<int>(floor( h_cur));
//int wstart = static_cast<int>(floor(w_cur));
//int hend = static_cast<int>(ceil(h_cur));
//int wend = static_cast<int>(ceil(w_cur));
// Add roi offsets and clip to input boundaries
hstart = min(max(static_cast<Dtype>(hstart ), 0.0), static_cast<Dtype>(height));
hend = min(max(static_cast<Dtype>(hend) , 0.0), static_cast<Dtype>(height));
wstart = min(max(static_cast<Dtype>(wstart), 0.0), static_cast<Dtype>(width));
wend = min(max(static_cast<Dtype>(wend),0.0),static_cast<Dtype>( width));
bool is_empty = (hend < hstart) || (wend < wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
Dtype u = h_cur - hstart;
Dtype v = w_cur - wstart;
bottom_data += (c) * height * width;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
*/
int hend = ceil(h_cur - 0.5);
//int pwstart = floor(pw_cur);
int wend = ceil(w_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
hend = min(max(static_cast<Dtype>(hend), 1.0), static_cast<Dtype>(height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
wend = min(max(static_cast<Dtype>(wend), 1.0), static_cast<Dtype>(width - 1.0));
int hstart = hend - 1;
int wstart = wend - 1;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
Dtype u = h_cur - hstart - 0.5 ;
Dtype v = w_cur - wstart - 0.5 ;
//printf("%d,%d\n",bottom_index1,bottom_index4);
top_data[index] = (1 - u)*(1 - v)*bottom_data_cur[bottom_index1] + (1 - u)*(v)*bottom_data_cur[bottom_index2]+(u)*(1 - v)*bottom_data_cur[bottom_index3] + u*v*bottom_data_cur[bottom_index4];
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// printf("resize Forward_gpu\n");
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// printf("resize Forward_gpu\n");
int count = top[0]->count();
printf("%d\n",count);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
Dtype roi_start_w = 0.0; roi_start_w *= spatial_scale;
Dtype roi_start_h = 0.0;roi_start_h*= spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(width); roi_end_w*= spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(height); roi_end_h*= spatial_scale;
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
Dtype ph_cur = static_cast<Dtype>(h - roi_start_h) / bin_size_h;
Dtype pw_cur = static_cast<Dtype>(w - roi_start_w) / bin_size_w;
//int phstart = floor(ph_cur);
int phend = ceil(ph_cur - 0.5);
//int pwstart = floor(pw_cur);
int pwend = ceil(pw_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
phend = min(max(static_cast<Dtype>(phend), 1.0), static_cast<Dtype>(pooled_height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
pwend = min(max(static_cast<Dtype>(pwend), 0.0), static_cast<Dtype>(pooled_width - 1.0));
int phstart = phend - 1;
int pwstart = pwend - 1;
int top_index1 = phstart* pooled_width + pwstart;
int top_index2 = phstart* pooled_width + pwend;
int top_index3 = phend* pooled_width + pwstart;
int top_index4 = phend* pooled_width + pwend;
Dtype u = ph_cur - phstart - 0.5 ;
Dtype v = pw_cur - pwstart - 0.5 ;
bottom_diff[index] += (1 - u)*(1 - v)*offset_top_diff[top_index1] + (1 - u)*(v)*offset_top_diff[top_index2]+(u)*(1 - v)*offset_top_diff[top_index3] + u*v*offset_top_diff[top_index4];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// printf("resize back \n");
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
// printf("resize back \n");
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
| 61571303b6c7444f60e0f6a13cf7397af078ca64.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/resize_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
Dtype roi_start_w = 0.0; roi_start_w *= spatial_scale;
Dtype roi_start_h = 0.0;roi_start_h*= spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(width); roi_end_w*= spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(height); roi_end_h*= spatial_scale;
const Dtype* bottom_data_cur = bottom_data + (n*channels+c)* height * width;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
Dtype h_cur = roi_start_h + static_cast<Dtype>(ph) * bin_size_h;
Dtype w_cur = roi_start_w + static_cast<Dtype>(pw) * bin_size_w;
/*
//int hstart = static_cast<int>(floor( h_cur));
//int wstart = static_cast<int>(floor(w_cur));
//int hend = static_cast<int>(ceil(h_cur));
//int wend = static_cast<int>(ceil(w_cur));
// Add roi offsets and clip to input boundaries
hstart = min(max(static_cast<Dtype>(hstart ), 0.0), static_cast<Dtype>(height));
hend = min(max(static_cast<Dtype>(hend) , 0.0), static_cast<Dtype>(height));
wstart = min(max(static_cast<Dtype>(wstart), 0.0), static_cast<Dtype>(width));
wend = min(max(static_cast<Dtype>(wend),0.0),static_cast<Dtype>( width));
bool is_empty = (hend < hstart) || (wend < wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
Dtype u = h_cur - hstart;
Dtype v = w_cur - wstart;
bottom_data += (c) * height * width;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
*/
int hend = ceil(h_cur - 0.5);
//int pwstart = floor(pw_cur);
int wend = ceil(w_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
hend = min(max(static_cast<Dtype>(hend), 1.0), static_cast<Dtype>(height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
wend = min(max(static_cast<Dtype>(wend), 1.0), static_cast<Dtype>(width - 1.0));
int hstart = hend - 1;
int wstart = wend - 1;
int bottom_index1 = hstart* width + wstart;
int bottom_index2 = hstart* width + wend;
int bottom_index3 = hend* width + wstart;
int bottom_index4 = hend* width + wend;
Dtype u = h_cur - hstart - 0.5 ;
Dtype v = w_cur - wstart - 0.5 ;
//printf("%d,%d\n",bottom_index1,bottom_index4);
top_data[index] = (1 - u)*(1 - v)*bottom_data_cur[bottom_index1] + (1 - u)*(v)*bottom_data_cur[bottom_index2]+(u)*(1 - v)*bottom_data_cur[bottom_index3] + u*v*bottom_data_cur[bottom_index4];
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// printf("resize Forward_gpu\n");
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// printf("resize Forward_gpu\n");
int count = top[0]->count();
printf("%d\n",count);
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
Dtype roi_start_w = 0.0; roi_start_w *= spatial_scale;
Dtype roi_start_h = 0.0;roi_start_h*= spatial_scale;
Dtype roi_end_w = static_cast<Dtype>(width); roi_end_w*= spatial_scale;
Dtype roi_end_h = static_cast<Dtype>(height); roi_end_h*= spatial_scale;
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
// Force malformed ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w , 1.0);
Dtype roi_height = max(roi_end_h - roi_start_h , 1.0);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
Dtype ph_cur = static_cast<Dtype>(h - roi_start_h) / bin_size_h;
Dtype pw_cur = static_cast<Dtype>(w - roi_start_w) / bin_size_w;
//int phstart = floor(ph_cur);
int phend = ceil(ph_cur - 0.5);
//int pwstart = floor(pw_cur);
int pwend = ceil(pw_cur - 0.5);
//phstart = min(max(static_cast<Dtype>(phstart), 0.0), static_cast<Dtype>(pooled_height - 1.0));
phend = min(max(static_cast<Dtype>(phend), 1.0), static_cast<Dtype>(pooled_height - 1.0));
//pwstart = min(max(static_cast<Dtype>(pwstart), 0.0), static_cast<Dtype>(pooled_width - 1.0));
pwend = min(max(static_cast<Dtype>(pwend), 0.0), static_cast<Dtype>(pooled_width - 1.0));
int phstart = phend - 1;
int pwstart = pwend - 1;
int top_index1 = phstart* pooled_width + pwstart;
int top_index2 = phstart* pooled_width + pwend;
int top_index3 = phend* pooled_width + pwstart;
int top_index4 = phend* pooled_width + pwend;
Dtype u = ph_cur - phstart - 0.5 ;
Dtype v = pw_cur - pwstart - 0.5 ;
bottom_diff[index] += (1 - u)*(1 - v)*offset_top_diff[top_index1] + (1 - u)*(v)*offset_top_diff[top_index2]+(u)*(1 - v)*offset_top_diff[top_index3] + u*v*offset_top_diff[top_index4];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
// printf("resize back \n");
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
// printf("resize back \n");
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
|
f4d7fa2b1f7ac6d27058c8b0069ddf1a12396e73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<ctime>
#include"head.h"
using namespace std;
#define R
template<typename T>
void __global__ transpose(const T* src,T *after,unsigned int row, unsigned int column) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 4;
unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y;
if(idx + blockDim.x * blockIdx.x * 4< row && idy < column) {
//read merge
#ifdef R
unsigned int fo = idx * column + idy;
unsigned int to = idy * row + idx;
//read column write row
after[to] = src[fo];
after[to + blockDim.x] = src[fo + blockDim.x];
after[to + blockDim.x*2] = src[fo + blockDim.x*2];
after[to + blockDim.x*3] = src[fo + blockDim.x*3];
#elif define C
//read row write column
after[idx * column + idy] = src[idy * row + idx];
//after[idx * column + idy] = src[idy * row + idx];
#endif
}
}
template<typename T>
void transposeHost(const T* src, T* after, unsigned int row, unsigned int column) {
for (int i = 0;i < row;i++) {
for(int j = 0;j < column;j++) {
after[j * row + i] = src[i * column + j];
}
}
}
int main(int argc,char *argv[]) {
int Row = 32;
int Column = 16;
int nx = 1 << 9;
int ny = 1 << 9;
int a[] = {0,1,2,3,4,5,6,7,8,9,10,11};
int b[12];
memset(b,0x0,sizeof(b));
transposeHost(a,b,3,4);
for (int i = 0;i < 12;i++) {
cout << b[i] <<" ";
}
cout <<endl;
int a_[ny * nx],*a_dev,*b_dev;
dim3 block(Row,Column);
dim3 grid( (nx + Row*4 - 1)/Row*4, (Column + ny - 1)/Column);
hipMalloc((void**)&a_dev,sizeof(a_));
hipMalloc((void**)&b_dev,sizeof(a_));
cout << "brea 1" << sizeof(a_) <<endl;
hipMemcpy(a_dev,a_,sizeof(a_),hipMemcpyHostToDevice);
clock_t start, end;
start = clock();
hipLaunchKernelGGL(( transpose), dim3(block),dim3(grid), 0, 0, a_dev,b_dev,nx,ny);
hipDeviceSynchronize();
end = clock();
cout << end - start << "ms" << endl;
return 0;
}
| f4d7fa2b1f7ac6d27058c8b0069ddf1a12396e73.cu | #include<iostream>
#include<ctime>
#include"head.h"
using namespace std;
#define R
template<typename T>
void __global__ transpose(const T* src,T *after,unsigned int row, unsigned int column) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 4;
unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y;
if(idx + blockDim.x * blockIdx.x * 4< row && idy < column) {
//read merge
#ifdef R
unsigned int fo = idx * column + idy;
unsigned int to = idy * row + idx;
//read column write row
after[to] = src[fo];
after[to + blockDim.x] = src[fo + blockDim.x];
after[to + blockDim.x*2] = src[fo + blockDim.x*2];
after[to + blockDim.x*3] = src[fo + blockDim.x*3];
#elif define C
//read row write column
after[idx * column + idy] = src[idy * row + idx];
//after[idx * column + idy] = src[idy * row + idx];
#endif
}
}
template<typename T>
void transposeHost(const T* src, T* after, unsigned int row, unsigned int column) {
for (int i = 0;i < row;i++) {
for(int j = 0;j < column;j++) {
after[j * row + i] = src[i * column + j];
}
}
}
int main(int argc,char *argv[]) {
int Row = 32;
int Column = 16;
int nx = 1 << 9;
int ny = 1 << 9;
int a[] = {0,1,2,3,4,5,6,7,8,9,10,11};
int b[12];
memset(b,0x0,sizeof(b));
transposeHost(a,b,3,4);
for (int i = 0;i < 12;i++) {
cout << b[i] <<" ";
}
cout <<endl;
int a_[ny * nx],*a_dev,*b_dev;
dim3 block(Row,Column);
dim3 grid( (nx + Row*4 - 1)/Row*4, (Column + ny - 1)/Column);
cudaMalloc((void**)&a_dev,sizeof(a_));
cudaMalloc((void**)&b_dev,sizeof(a_));
cout << "brea 1" << sizeof(a_) <<endl;
cudaMemcpy(a_dev,a_,sizeof(a_),cudaMemcpyHostToDevice);
clock_t start, end;
start = clock();
transpose<<<block,grid>>>(a_dev,b_dev,nx,ny);
cudaDeviceSynchronize();
end = clock();
cout << end - start << "ms" << endl;
return 0;
}
|
21d393a26cdf30ba7887f47ec8f9a6a395fc96e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
#include "voxel_query_gpu.h"
#include "cuda_utils.h"
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
hiprandState_t state;
hiprand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1*z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1*y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1*x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + \
z_coord * R2 * R3 + \
y_coord * R3 + \
x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx*3 + 0];
float y_per = xyz[neighbor_idx*3 + 1];
float z_per = xyz[neighbor_idx*3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = hiprand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(hiprand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
hipError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( voxel_query_kernel_stack), dim3(blocks), dim3(threads), 0, 0, M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 21d393a26cdf30ba7887f47ec8f9a6a395fc96e2.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include "voxel_query_gpu.h"
#include "cuda_utils.h"
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
curandState state;
curand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1*z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1*y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1*x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + \
z_coord * R2 * R3 + \
y_coord * R3 + \
x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx*3 + 0];
float y_per = xyz[neighbor_idx*3 + 1];
float z_per = xyz[neighbor_idx*3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = curand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(curand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
voxel_query_kernel_stack<<<blocks, threads>>>(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
18e435d97c78e2e18abbee01050709b59d23b93c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
AT_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId());
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
AT_CHECK(n >= 0, "n must be non-negative, got", n);
AT_CHECK(at::scalar_tensor(n, result.options()).defined(),
"n is too large for result tensor type: '", result.type().toString(), "'");
result.resize_({n});
if (result.scalar_type() == at::ScalarType::Half) {
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
result.copy_(randperm_out_cuda(result_float, n, generator));
} else {
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
result.copy_(result_cpu);
} else {
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>());
auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>());
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
thrust::sequence(policy, result_data, result_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, result_data);
}
);
}
}
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
| 18e435d97c78e2e18abbee01050709b59d23b93c.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
AT_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId());
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
AT_CHECK(n >= 0, "n must be non-negative, got", n);
AT_CHECK(at::scalar_tensor(n, result.options()).defined(),
"n is too large for result tensor type: '", result.type().toString(), "'");
result.resize_({n});
if (result.scalar_type() == at::ScalarType::Half) {
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
result.copy_(randperm_out_cuda(result_float, n, generator));
} else {
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
result.copy_(result_cpu);
} else {
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>());
auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>());
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream());
thrust::sequence(policy, result_data, result_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, result_data);
}
);
}
}
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
|
c7f160e8486ef75267af05328acd6f9b2bb40d72.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KerBitReversalMatrixRow.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_lpDstRe = NULL;
hipMalloc(&d_lpDstRe, XSIZE*YSIZE);
float *d_lpDstIm = NULL;
hipMalloc(&d_lpDstIm, XSIZE*YSIZE);
float *d_lpSrcRe = NULL;
hipMalloc(&d_lpSrcRe, XSIZE*YSIZE);
float *d_lpSrcIm = NULL;
hipMalloc(&d_lpSrcIm, XSIZE*YSIZE);
int width = XSIZE;
int log2x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KerBitReversalMatrixRow), dim3(gridBlock),dim3(threadBlock), 0, 0, d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KerBitReversalMatrixRow), dim3(gridBlock),dim3(threadBlock), 0, 0, d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KerBitReversalMatrixRow), dim3(gridBlock),dim3(threadBlock), 0, 0, d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c7f160e8486ef75267af05328acd6f9b2bb40d72.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KerBitReversalMatrixRow.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_lpDstRe = NULL;
cudaMalloc(&d_lpDstRe, XSIZE*YSIZE);
float *d_lpDstIm = NULL;
cudaMalloc(&d_lpDstIm, XSIZE*YSIZE);
float *d_lpSrcRe = NULL;
cudaMalloc(&d_lpSrcRe, XSIZE*YSIZE);
float *d_lpSrcIm = NULL;
cudaMalloc(&d_lpSrcIm, XSIZE*YSIZE);
int width = XSIZE;
int log2x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KerBitReversalMatrixRow<<<gridBlock,threadBlock>>>(d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KerBitReversalMatrixRow<<<gridBlock,threadBlock>>>(d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KerBitReversalMatrixRow<<<gridBlock,threadBlock>>>(d_lpDstRe,d_lpDstIm,d_lpSrcRe,d_lpSrcIm,width,log2x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
467acf2909cb03f0d4221f5f05b7c3fa30199566.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = .3f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
{nDiffs++;
//printf("%f %f\n",cudaRes[i],res[i]);
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N = atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
float *in = (float *)malloc(size);
float *out = (float *)malloc(size);
float *cuda_out = (float *)malloc(size);
float time = 0.f;
hipEvent_t startEvent_inc, stopEvent_inc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
initializeArray(fp,in, N);
thrust::host_vector<float> H(N);
for (int i = 0; i < N; i++)
{
H[i] = in[i];
}
hipEventRecord(startEvent_inc,0); // starting timing for inclusive
thrust::device_vector<float> D = H;
thrust::inclusive_scan(D.begin(), D.end(), D.begin());
thrust::copy(D.begin(), D.end(), cuda_out);
hipEventRecord(stopEvent_inc,0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&time, startEvent_inc, stopEvent_inc);
inclusiveScan_SEQ(in, out, N);
int nDiffs = checkResults(out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//dynamically delete all
return(0);
} | 467acf2909cb03f0d4221f5f05b7c3fa30199566.cu | #include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = .3f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
{nDiffs++;
//printf("%f %f\n",cudaRes[i],res[i]);
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N = atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
float *in = (float *)malloc(size);
float *out = (float *)malloc(size);
float *cuda_out = (float *)malloc(size);
float time = 0.f;
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
initializeArray(fp,in, N);
thrust::host_vector<float> H(N);
for (int i = 0; i < N; i++)
{
H[i] = in[i];
}
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
thrust::device_vector<float> D = H;
thrust::inclusive_scan(D.begin(), D.end(), D.begin());
thrust::copy(D.begin(), D.end(), cuda_out);
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&time, startEvent_inc, stopEvent_inc);
inclusiveScan_SEQ(in, out, N);
int nDiffs = checkResults(out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//dynamically delete all
return(0);
} |
478b716d036a2097473cb68d88330b1f83169fdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Global types and parameters
////////////////////////////////////////////////////////////////////////////////
#define VOTE_DATA_GROUP 4
////////////////////////////////////////////////////////////////////////////////
// CUDA Voting Kernel functions
////////////////////////////////////////////////////////////////////////////////
#include "simpleVote_kernel.hip"
// Generate the test pattern for Tests 1 and 2
void genVoteTestPattern(unsigned int *VOTE_PATTERN, int size)
{
// For testing VOTE.Any (all of these threads will return 0)
for (int i=0; i < size/4; i++)
VOTE_PATTERN[i] = 0x00000000;
// For testing VOTE.Any (1/2 these threads will return 1)
for (int i=2*size/8; i < 4*size/8; i++)
VOTE_PATTERN[i] = (i&0x01) ? i : 0;
// For testing VOTE.all (1/2 of these threads will return 0)
for (int i=2*size/4; i < 3*size/4; i++)
VOTE_PATTERN[i] = (i&0x01) ? 0 : i;
// For testing VOTE.all (all of these threads will return 1)
for (int i=3*size/4; i < 4*size/4; i++)
VOTE_PATTERN[i] = 0xffffffff;
}
int checkErrors1(unsigned int *h_result, int start, int end, int warp_size, char *voteType)
{
int i, sum=0;
for (sum=0, i=start; i < end; i++) {
sum += h_result[i];
}
if (sum > 0) {
printf("\t<%s>[%d - %d] = ", voteType, start, end-1);
for (i=start; i < end; i++) {
printf("%d", h_result[i]);
}
printf(" - FAILED!\n", sum);
}
return (sum>0);
}
int checkErrors2(unsigned int *h_result, int start, int end, int warp_size, char *voteType)
{
int i, sum=0;
for (sum=0, i=start; i < end; i++) {
sum += h_result[i];
}
if (sum!=warp_size) {
printf("\t<%s>[%d - %d] = ", voteType, start, end-1);
for (i=start; i < end; i++) {
printf("%d", h_result[i]);
}
printf(" - FAILED!\n");
}
return (sum!=warp_size);
}
// Verification code for Kernel #1
int checkResultsVoteAnyKernel1(unsigned int *h_result, int size, int warp_size)
{
int error_count = 0;
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
return error_count;
}
// Verification code for Kernel #2
int checkResultsVoteAllKernel2(unsigned int *h_result, int size, int warp_size)
{
int error_count = 0;
#if 1 // results behave differently because DEVICE_EMULATION has WARP size of 1
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
#else
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors1(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors1(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
#endif
return error_count;
}
// Verification code for Kernel #3
int checkResultsVoteAnyKernel3(bool *hinfo, int size)
{
int i, error_count = 0;
#if 1 // comparison is different due to DEVICE_EMULATION having a WARP size of 1
for (i = 0; i < size * 3; i++) {
// All warps should be all zeros.
if (i >= 0 && i < size) {
if (hinfo[i] != false) {
error_count++;
}
} else if (i >= size && i < 2*size) {
if (hinfo[i] != true) {
error_count++;
}
} else if (i >= 2*size && i < 3*size) {
if (hinfo[i] != true) {
error_count++;
}
}
}
#else
for (i = 0; i < size * 3; i++) {
switch(i % 3) {
case 0:
// First warp should be all zeros.
if (hinfo[i] != (i >= size * 1)) {
error_count++;
}
break;
case 1:
// First warp and half of second should be all zeros.
if (hinfo[i] != (i >= size * 3 / 2)) {
error_count++;
}
break;
case 2:
// First two warps should be all zeros.
if (hinfo[i] != (i >= size * 2)) {
error_count++;
}
break;
}
}
#endif
printf((error_count == 0) ? "\tPASSED!\n" : "\tFAILED!\n");
return error_count;
}
int main(int argc, char **argv)
{
unsigned int *h_input, *h_result;
unsigned int *d_input, *d_result;
bool *dinfo = NULL, *hinfo = NULL;
int error_count[3] = { 0, 0, 0 };
hipDeviceProp_t deviceProp;
int dev, warp_size;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( dev = cutGetMaxGflopsDeviceId() );
cutilSafeCall(hipChooseDevice(&dev, &deviceProp));
cutilSafeCall(hipGetDeviceProperties(&deviceProp, 0));
#if 1
warp_size = 1;
#else
if ((deviceProp.major > 1 || deviceProp.minor >= 2))
{
printf("simpleVoteIntrinsics: Using Device %d: \"%s\"\n", dev, deviceProp.name);
} else
{
printf("simpleVoteIntrinsics: requires Compute Capability 1.2 or higher\n");
printf("Aborting test\n");
printf("TEST PASSED\n");
cutilExit(argc,argv);
}
warp_size = deviceProp.warpSize;
#endif
h_input = (unsigned int *)malloc( VOTE_DATA_GROUP*warp_size * sizeof(unsigned int));
h_result = (unsigned int *)malloc( VOTE_DATA_GROUP*warp_size * sizeof(unsigned int));
cutilSafeCall( hipMalloc((void **)&d_input, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int)) );
cutilSafeCall( hipMalloc((void **)&d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int)) );
genVoteTestPattern(h_input, VOTE_DATA_GROUP*warp_size);
cutilSafeCall( hipMemcpy(d_input, h_input, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), hipMemcpyHostToDevice) );
// Start of Vote Any Test Kernel #1
printf("[VOTE Kernel Test 1/3]\n");
printf("\tRunning <<Vote.Any>> kernel1 ...\n");
{
cutilSafeCall( hipDeviceSynchronize() );
dim3 gridBlock(VOTE_DATA_GROUP, 1);
dim3 threadBlock(warp_size, 1);
hipLaunchKernelGGL(( VoteAnyKernel1), dim3(gridBlock), dim3(threadBlock), 0, 0, d_input, d_result, VOTE_DATA_GROUP*warp_size);
cutilCheckMsg("VoteAnyKernel() execution failed\n");
cutilSafeCall( hipDeviceSynchronize() );
}
cutilSafeCall( hipMemcpy(h_result, d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), hipMemcpyDeviceToHost) );
error_count[0] += checkResultsVoteAnyKernel1(h_result, VOTE_DATA_GROUP*warp_size, warp_size);
// Start of Vote All Test Kernel #2
printf("\n[VOTE Kernel Test 2/3]\n");
printf("\tRunning <<Vote.All>> kernel2 ...\n");
{
cutilSafeCall( hipDeviceSynchronize() );
dim3 gridBlock(VOTE_DATA_GROUP, 1);
dim3 threadBlock(warp_size, 1);
hipLaunchKernelGGL(( VoteAllKernel2), dim3(gridBlock), dim3(threadBlock), 0, 0, d_input, d_result, VOTE_DATA_GROUP*warp_size);
cutilCheckMsg("VoteAllKernel() execution failed\n");
cutilSafeCall( hipDeviceSynchronize() );
}
cutilSafeCall( hipMemcpy(h_result, d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), hipMemcpyDeviceToHost) );
error_count[1] += checkResultsVoteAllKernel2(h_result, VOTE_DATA_GROUP*warp_size, warp_size);
// Second Vote Kernel Test #3 (both Any/All)
hinfo = (bool *)calloc(warp_size * 3 * 3, sizeof(bool));
hipMalloc((void**)&dinfo, warp_size * 3 * 3 * sizeof(bool));
hipMemcpy(dinfo, hinfo, warp_size * 3 * 3 * sizeof(bool),
hipMemcpyHostToDevice);
printf("\n[VOTE Kernel Test 3/3]\n");
printf("\tRunning <<Vote.Any>> kernel3 ...\n");
{
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( VoteAnyKernel3), dim3(3), dim3(warp_size), 0, 0, dinfo, warp_size);
cutilSafeCall( hipDeviceSynchronize() );
}
hipMemcpy(hinfo, dinfo, warp_size * 3 * 3 * sizeof(bool),
hipMemcpyDeviceToHost);
error_count[2] = checkResultsVoteAnyKernel3(hinfo, warp_size * 3);
printf("\n");
if (error_count[0] == 0 &&
error_count[1] == 0 &&
error_count[2] == 0)
{
printf("TEST PASSED\n");
}
else {
printf("TEST FAILED\n");
}
// Now free these resources for Test #1,2
cutilSafeCall( hipFree(d_input) );
cutilSafeCall( hipFree(d_result) );
free(h_input);
free(h_result);
// Free resources from Test #3
free(hinfo); hipFree(dinfo);
printf("\tShutting down...\n");
hipDeviceReset();
cutilExit(argc, argv);
}
| 478b716d036a2097473cb68d88330b1f83169fdd.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Global types and parameters
////////////////////////////////////////////////////////////////////////////////
#define VOTE_DATA_GROUP 4
////////////////////////////////////////////////////////////////////////////////
// CUDA Voting Kernel functions
////////////////////////////////////////////////////////////////////////////////
#include "simpleVote_kernel.cu"
// Generate the test pattern for Tests 1 and 2
void genVoteTestPattern(unsigned int *VOTE_PATTERN, int size)
{
// For testing VOTE.Any (all of these threads will return 0)
for (int i=0; i < size/4; i++)
VOTE_PATTERN[i] = 0x00000000;
// For testing VOTE.Any (1/2 these threads will return 1)
for (int i=2*size/8; i < 4*size/8; i++)
VOTE_PATTERN[i] = (i&0x01) ? i : 0;
// For testing VOTE.all (1/2 of these threads will return 0)
for (int i=2*size/4; i < 3*size/4; i++)
VOTE_PATTERN[i] = (i&0x01) ? 0 : i;
// For testing VOTE.all (all of these threads will return 1)
for (int i=3*size/4; i < 4*size/4; i++)
VOTE_PATTERN[i] = 0xffffffff;
}
int checkErrors1(unsigned int *h_result, int start, int end, int warp_size, char *voteType)
{
int i, sum=0;
for (sum=0, i=start; i < end; i++) {
sum += h_result[i];
}
if (sum > 0) {
printf("\t<%s>[%d - %d] = ", voteType, start, end-1);
for (i=start; i < end; i++) {
printf("%d", h_result[i]);
}
printf(" - FAILED!\n", sum);
}
return (sum>0);
}
int checkErrors2(unsigned int *h_result, int start, int end, int warp_size, char *voteType)
{
int i, sum=0;
for (sum=0, i=start; i < end; i++) {
sum += h_result[i];
}
if (sum!=warp_size) {
printf("\t<%s>[%d - %d] = ", voteType, start, end-1);
for (i=start; i < end; i++) {
printf("%d", h_result[i]);
}
printf(" - FAILED!\n");
}
return (sum!=warp_size);
}
// Verification code for Kernel #1
int checkResultsVoteAnyKernel1(unsigned int *h_result, int size, int warp_size)
{
int error_count = 0;
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.Any");
return error_count;
}
// Verification code for Kernel #2
int checkResultsVoteAllKernel2(unsigned int *h_result, int size, int warp_size)
{
int error_count = 0;
#if 1 // results behave differently because DEVICE_EMULATION has WARP size of 1
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
#else
error_count += checkErrors1(h_result, 0, VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors1(h_result, VOTE_DATA_GROUP*warp_size/4, 2*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors1(h_result, 2*VOTE_DATA_GROUP*warp_size/4, 3*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
error_count += checkErrors2(h_result, 3*VOTE_DATA_GROUP*warp_size/4, 4*VOTE_DATA_GROUP*warp_size/4, warp_size, "Vote.All");
#endif
return error_count;
}
// Verification code for Kernel #3
int checkResultsVoteAnyKernel3(bool *hinfo, int size)
{
int i, error_count = 0;
#if 1 // comparison is different due to DEVICE_EMULATION having a WARP size of 1
for (i = 0; i < size * 3; i++) {
// All warps should be all zeros.
if (i >= 0 && i < size) {
if (hinfo[i] != false) {
error_count++;
}
} else if (i >= size && i < 2*size) {
if (hinfo[i] != true) {
error_count++;
}
} else if (i >= 2*size && i < 3*size) {
if (hinfo[i] != true) {
error_count++;
}
}
}
#else
for (i = 0; i < size * 3; i++) {
switch(i % 3) {
case 0:
// First warp should be all zeros.
if (hinfo[i] != (i >= size * 1)) {
error_count++;
}
break;
case 1:
// First warp and half of second should be all zeros.
if (hinfo[i] != (i >= size * 3 / 2)) {
error_count++;
}
break;
case 2:
// First two warps should be all zeros.
if (hinfo[i] != (i >= size * 2)) {
error_count++;
}
break;
}
}
#endif
printf((error_count == 0) ? "\tPASSED!\n" : "\tFAILED!\n");
return error_count;
}
int main(int argc, char **argv)
{
unsigned int *h_input, *h_result;
unsigned int *d_input, *d_result;
bool *dinfo = NULL, *hinfo = NULL;
int error_count[3] = { 0, 0, 0 };
cudaDeviceProp deviceProp;
int dev, warp_size;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( dev = cutGetMaxGflopsDeviceId() );
cutilSafeCall(cudaChooseDevice(&dev, &deviceProp));
cutilSafeCall(cudaGetDeviceProperties(&deviceProp, 0));
#if 1
warp_size = 1;
#else
if ((deviceProp.major > 1 || deviceProp.minor >= 2))
{
printf("simpleVoteIntrinsics: Using Device %d: \"%s\"\n", dev, deviceProp.name);
} else
{
printf("simpleVoteIntrinsics: requires Compute Capability 1.2 or higher\n");
printf("Aborting test\n");
printf("TEST PASSED\n");
cutilExit(argc,argv);
}
warp_size = deviceProp.warpSize;
#endif
h_input = (unsigned int *)malloc( VOTE_DATA_GROUP*warp_size * sizeof(unsigned int));
h_result = (unsigned int *)malloc( VOTE_DATA_GROUP*warp_size * sizeof(unsigned int));
cutilSafeCall( cudaMalloc((void **)&d_input, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int)) );
cutilSafeCall( cudaMalloc((void **)&d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int)) );
genVoteTestPattern(h_input, VOTE_DATA_GROUP*warp_size);
cutilSafeCall( cudaMemcpy(d_input, h_input, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), cudaMemcpyHostToDevice) );
// Start of Vote Any Test Kernel #1
printf("[VOTE Kernel Test 1/3]\n");
printf("\tRunning <<Vote.Any>> kernel1 ...\n");
{
cutilSafeCall( cudaThreadSynchronize() );
dim3 gridBlock(VOTE_DATA_GROUP, 1);
dim3 threadBlock(warp_size, 1);
VoteAnyKernel1<<<gridBlock, threadBlock>>>(d_input, d_result, VOTE_DATA_GROUP*warp_size);
cutilCheckMsg("VoteAnyKernel() execution failed\n");
cutilSafeCall( cudaThreadSynchronize() );
}
cutilSafeCall( cudaMemcpy(h_result, d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), cudaMemcpyDeviceToHost) );
error_count[0] += checkResultsVoteAnyKernel1(h_result, VOTE_DATA_GROUP*warp_size, warp_size);
// Start of Vote All Test Kernel #2
printf("\n[VOTE Kernel Test 2/3]\n");
printf("\tRunning <<Vote.All>> kernel2 ...\n");
{
cutilSafeCall( cudaThreadSynchronize() );
dim3 gridBlock(VOTE_DATA_GROUP, 1);
dim3 threadBlock(warp_size, 1);
VoteAllKernel2<<<gridBlock, threadBlock>>>(d_input, d_result, VOTE_DATA_GROUP*warp_size);
cutilCheckMsg("VoteAllKernel() execution failed\n");
cutilSafeCall( cudaThreadSynchronize() );
}
cutilSafeCall( cudaMemcpy(h_result, d_result, VOTE_DATA_GROUP*warp_size * sizeof(unsigned int), cudaMemcpyDeviceToHost) );
error_count[1] += checkResultsVoteAllKernel2(h_result, VOTE_DATA_GROUP*warp_size, warp_size);
// Second Vote Kernel Test #3 (both Any/All)
hinfo = (bool *)calloc(warp_size * 3 * 3, sizeof(bool));
cudaMalloc((void**)&dinfo, warp_size * 3 * 3 * sizeof(bool));
cudaMemcpy(dinfo, hinfo, warp_size * 3 * 3 * sizeof(bool),
cudaMemcpyHostToDevice);
printf("\n[VOTE Kernel Test 3/3]\n");
printf("\tRunning <<Vote.Any>> kernel3 ...\n");
{
cutilSafeCall( cudaThreadSynchronize() );
VoteAnyKernel3<<<3, warp_size>>>(dinfo, warp_size);
cutilSafeCall( cudaThreadSynchronize() );
}
cudaMemcpy(hinfo, dinfo, warp_size * 3 * 3 * sizeof(bool),
cudaMemcpyDeviceToHost);
error_count[2] = checkResultsVoteAnyKernel3(hinfo, warp_size * 3);
printf("\n");
if (error_count[0] == 0 &&
error_count[1] == 0 &&
error_count[2] == 0)
{
printf("TEST PASSED\n");
}
else {
printf("TEST FAILED\n");
}
// Now free these resources for Test #1,2
cutilSafeCall( cudaFree(d_input) );
cutilSafeCall( cudaFree(d_result) );
free(h_input);
free(h_result);
// Free resources from Test #3
free(hinfo); cudaFree(dinfo);
printf("\tShutting down...\n");
cudaThreadExit();
cutilExit(argc, argv);
}
|
b47920b23a7c0c9e20de2aacd2b6e7bce7d7fcaf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "unary_ops.cuh"
#include <cudf/unary.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cmath>
#include <algorithm>
#include <type_traits>
namespace cudf {
namespace detail {
// trig functions
struct DeviceSin {
template<typename T>
__device__
T apply(T data) {
return std::sin(data);
}
};
struct DeviceCos {
template<typename T>
__device__
T apply(T data) {
return std::cos(data);
}
};
struct DeviceTan {
template<typename T>
__device__
T apply(T data) {
return std::tan(data);
}
};
struct DeviceArcSin {
template<typename T>
__device__
T apply(T data) {
return std::asin(data);
}
};
struct DeviceArcCos {
template<typename T>
__device__
T apply(T data) {
return std::acos(data);
}
};
struct DeviceArcTan {
template<typename T>
__device__
T apply(T data) {
return std::atan(data);
}
};
// exponential functions
struct DeviceExp {
template<typename T>
__device__
T apply(T data) {
return ::exp(data);
}
};
struct DeviceLog {
template<typename T>
__device__
T apply(T data) {
return ::log(data);
}
};
struct DeviceSqrt {
template<typename T>
__device__
T apply(T data) {
return std::sqrt(data);
}
};
// rounding functions
struct DeviceCeil {
template<typename T>
__device__
T apply(T data) {
return ::ceil(data);
}
};
struct DeviceFloor {
template<typename T>
__device__
T apply(T data) {
return ::floor(data);
}
};
struct DeviceAbs {
template<typename T>
__device__
T apply(T data) {
return std::abs(data);
}
};
// bitwise op
struct DeviceInvert {
// TODO: maybe sfinae overload this for cudf::bool8
template<typename T>
__device__
T apply(T data) {
return ~data;
}
};
// logical op
struct DeviceNot {
template<typename T>
__device__
cudf::bool8 apply(T data) {
return static_cast<cudf::bool8>( !data );
}
};
template<typename T, typename F>
static void launch(gdf_column const* input, gdf_column *output) {
cudf::unary::Launcher<T, T, F>::launch(input, output);
}
template <typename F>
struct MathOpDispatcher {
template <typename T>
typename std::enable_if_t<std::is_arithmetic<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
launch<T, F>(input, output);
}
template <typename T>
typename std::enable_if_t<!std::is_arithmetic<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename F>
struct BitwiseOpDispatcher {
template <typename T>
typename std::enable_if_t<std::is_integral<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
launch<T, F>(input, output);
}
template <typename T>
typename std::enable_if_t<!std::is_integral<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename F>
struct LogicalOpDispatcher {
private:
template <typename T>
static constexpr bool is_supported() {
return std::is_arithmetic<T>::value ||
std::is_same<T, cudf::bool8>::value;
// TODO: try using member detector
// std::is_member_function_pointer<decltype(&T::operator!)>::value;
}
public:
template <typename T>
typename std::enable_if_t<is_supported<T>(), void>
operator()(gdf_column const* input, gdf_column *output) {
cudf::unary::Launcher<T, cudf::bool8, F>::launch(input, output);
}
template <typename T>
typename std::enable_if_t<!is_supported<T>(), void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
} // namespace detail
gdf_column unary_operation(gdf_column const& input, unary_op op) {
gdf_column output{};
if (op == unary_op::NOT)
{
// TODO: replace this with a proper column constructor once
// cudf::column is implemented
bool allocate_mask = (input.valid != nullptr);
output = cudf::allocate_column(GDF_BOOL8, input.size, allocate_mask);
}
else
output = cudf::allocate_like(input);
if (input.size == 0) return output;
cudf::unary::handleChecksAndValidity(input, output);
switch(op){
case unary_op::SIN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceSin>{},
&input, &output);
break;
case unary_op::COS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceCos>{},
&input, &output);
break;
case unary_op::TAN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceTan>{},
&input, &output);
break;
case unary_op::ARCSIN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcSin>{},
&input, &output);
break;
case unary_op::ARCCOS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcCos>{},
&input, &output);
break;
case unary_op::ARCTAN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcTan>{},
&input, &output);
break;
case unary_op::EXP:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceExp>{},
&input, &output);
break;
case unary_op::LOG:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceLog>{},
&input, &output);
break;
case unary_op::SQRT:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceSqrt>{},
&input, &output);
break;
case unary_op::CEIL:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceCeil>{},
&input, &output);
break;
case unary_op::FLOOR:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceFloor>{},
&input, &output);
break;
case unary_op::ABS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceAbs>{},
&input, &output);
break;
case unary_op::BIT_INVERT:
cudf::type_dispatcher(
input.dtype,
detail::BitwiseOpDispatcher<detail::DeviceInvert>{},
&input, &output);
break;
case unary_op::NOT:
cudf::type_dispatcher(
input.dtype,
detail::LogicalOpDispatcher<detail::DeviceNot>{},
&input, &output);
break;
default:
CUDF_FAIL("Undefined unary operation");
}
return output;
}
} // namespace cudf
| b47920b23a7c0c9e20de2aacd2b6e7bce7d7fcaf.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "unary_ops.cuh"
#include <cudf/unary.hpp>
#include <cudf/legacy/copying.hpp>
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <cmath>
#include <algorithm>
#include <type_traits>
namespace cudf {
namespace detail {
// trig functions
struct DeviceSin {
template<typename T>
__device__
T apply(T data) {
return std::sin(data);
}
};
struct DeviceCos {
template<typename T>
__device__
T apply(T data) {
return std::cos(data);
}
};
struct DeviceTan {
template<typename T>
__device__
T apply(T data) {
return std::tan(data);
}
};
struct DeviceArcSin {
template<typename T>
__device__
T apply(T data) {
return std::asin(data);
}
};
struct DeviceArcCos {
template<typename T>
__device__
T apply(T data) {
return std::acos(data);
}
};
struct DeviceArcTan {
template<typename T>
__device__
T apply(T data) {
return std::atan(data);
}
};
// exponential functions
struct DeviceExp {
template<typename T>
__device__
T apply(T data) {
return std::exp(data);
}
};
struct DeviceLog {
template<typename T>
__device__
T apply(T data) {
return std::log(data);
}
};
struct DeviceSqrt {
template<typename T>
__device__
T apply(T data) {
return std::sqrt(data);
}
};
// rounding functions
struct DeviceCeil {
template<typename T>
__device__
T apply(T data) {
return std::ceil(data);
}
};
struct DeviceFloor {
template<typename T>
__device__
T apply(T data) {
return std::floor(data);
}
};
struct DeviceAbs {
template<typename T>
__device__
T apply(T data) {
return std::abs(data);
}
};
// bitwise op
struct DeviceInvert {
// TODO: maybe sfinae overload this for cudf::bool8
template<typename T>
__device__
T apply(T data) {
return ~data;
}
};
// logical op
struct DeviceNot {
template<typename T>
__device__
cudf::bool8 apply(T data) {
return static_cast<cudf::bool8>( !data );
}
};
template<typename T, typename F>
static void launch(gdf_column const* input, gdf_column *output) {
cudf::unary::Launcher<T, T, F>::launch(input, output);
}
template <typename F>
struct MathOpDispatcher {
template <typename T>
typename std::enable_if_t<std::is_arithmetic<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
launch<T, F>(input, output);
}
template <typename T>
typename std::enable_if_t<!std::is_arithmetic<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename F>
struct BitwiseOpDispatcher {
template <typename T>
typename std::enable_if_t<std::is_integral<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
launch<T, F>(input, output);
}
template <typename T>
typename std::enable_if_t<!std::is_integral<T>::value, void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
template <typename F>
struct LogicalOpDispatcher {
private:
template <typename T>
static constexpr bool is_supported() {
return std::is_arithmetic<T>::value ||
std::is_same<T, cudf::bool8>::value;
// TODO: try using member detector
// std::is_member_function_pointer<decltype(&T::operator!)>::value;
}
public:
template <typename T>
typename std::enable_if_t<is_supported<T>(), void>
operator()(gdf_column const* input, gdf_column *output) {
cudf::unary::Launcher<T, cudf::bool8, F>::launch(input, output);
}
template <typename T>
typename std::enable_if_t<!is_supported<T>(), void>
operator()(gdf_column const* input, gdf_column *output) {
CUDF_FAIL("Unsupported datatype for operation");
}
};
} // namespace detail
gdf_column unary_operation(gdf_column const& input, unary_op op) {
gdf_column output{};
if (op == unary_op::NOT)
{
// TODO: replace this with a proper column constructor once
// cudf::column is implemented
bool allocate_mask = (input.valid != nullptr);
output = cudf::allocate_column(GDF_BOOL8, input.size, allocate_mask);
}
else
output = cudf::allocate_like(input);
if (input.size == 0) return output;
cudf::unary::handleChecksAndValidity(input, output);
switch(op){
case unary_op::SIN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceSin>{},
&input, &output);
break;
case unary_op::COS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceCos>{},
&input, &output);
break;
case unary_op::TAN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceTan>{},
&input, &output);
break;
case unary_op::ARCSIN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcSin>{},
&input, &output);
break;
case unary_op::ARCCOS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcCos>{},
&input, &output);
break;
case unary_op::ARCTAN:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceArcTan>{},
&input, &output);
break;
case unary_op::EXP:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceExp>{},
&input, &output);
break;
case unary_op::LOG:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceLog>{},
&input, &output);
break;
case unary_op::SQRT:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceSqrt>{},
&input, &output);
break;
case unary_op::CEIL:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceCeil>{},
&input, &output);
break;
case unary_op::FLOOR:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceFloor>{},
&input, &output);
break;
case unary_op::ABS:
cudf::type_dispatcher(
input.dtype,
detail::MathOpDispatcher<detail::DeviceAbs>{},
&input, &output);
break;
case unary_op::BIT_INVERT:
cudf::type_dispatcher(
input.dtype,
detail::BitwiseOpDispatcher<detail::DeviceInvert>{},
&input, &output);
break;
case unary_op::NOT:
cudf::type_dispatcher(
input.dtype,
detail::LogicalOpDispatcher<detail::DeviceNot>{},
&input, &output);
break;
default:
CUDF_FAIL("Undefined unary operation");
}
return output;
}
} // namespace cudf
|
1f7202086c322086be6a085c87b174e02c6591e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ACADEMIC INTEGRITY PLEDGE */
/* */
/* - I have not used source code obtained from another student nor */
/* any other unauthorized source, either modified or unmodified. */
/* */
/* - All source code and documentation used in my program is either */
/* my original work or was derived by me from the source code */
/* published in the textbook for this course or presented in */
/* class. */
/* */
/* - I have not discussed coding details about this project with */
/* anyone other than my instructor. I understand that I may discuss */
/* the concepts of this program with other students and that another */
/* student may help me debug my program so long as neither of us */
/* writes anything during the discussion or modifies any computer */
/* file during the discussion. */
/* */
/* - I have violated neither the spirit nor letter of these restrictions. */
/* */
/* */
/* */
/* Signed: Garrett McDonnell | Date:12/7/18 */
/* */
/* */
/* 3460:677 CUDA Image Processing lab, Version 1.02, Fall 2016. */
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
__global__ void grayScale(unsigned char *device_arr);
__host__ void imgProc(unsigned char *map, int size, int width, int height);
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
}
__global__ void grayScale(unsigned char *device_arr){
int pixel = (blockIdx.x + blockIdx.y * gridDim.x) * 4;
unsigned char red = device_arr[pixel];
unsigned char green = device_arr[pixel + 1];
unsigned char blue = device_arr[pixel + 2];
unsigned grayScale = 0.21 * red + 0.71 * green + 0.07 * blue;
device_arr[pixel] = device_arr[pixel + 1] = device_arr[pixel + 2] = device_arr[pixel + 3] = grayScale;
}
__host__ void imgProc(unsigned char *map, int size, int width, int height) {
unsigned char *device_arr;
hipMalloc((void**) &device_arr, size);
hipMemcpy(device_arr, map, size, hipMemcpyHostToDevice);
dim3 dimGrid(width, height);
dim3 dimBlock(1);
hipLaunchKernelGGL(( grayScale), dim3(dimGrid), dim3(dimBlock), 0, 0, device_arr);
hipMemcpy(map, device_arr, size, hipMemcpyDeviceToHost);
hipFree(device_arr);
return;
} | 1f7202086c322086be6a085c87b174e02c6591e5.cu | /* ACADEMIC INTEGRITY PLEDGE */
/* */
/* - I have not used source code obtained from another student nor */
/* any other unauthorized source, either modified or unmodified. */
/* */
/* - All source code and documentation used in my program is either */
/* my original work or was derived by me from the source code */
/* published in the textbook for this course or presented in */
/* class. */
/* */
/* - I have not discussed coding details about this project with */
/* anyone other than my instructor. I understand that I may discuss */
/* the concepts of this program with other students and that another */
/* student may help me debug my program so long as neither of us */
/* writes anything during the discussion or modifies any computer */
/* file during the discussion. */
/* */
/* - I have violated neither the spirit nor letter of these restrictions. */
/* */
/* */
/* */
/* Signed: Garrett McDonnell | Date:12/7/18 */
/* */
/* */
/* 3460:677 CUDA Image Processing lab, Version 1.02, Fall 2016. */
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include "cpu_bitmap.h"
#include "bitmap_help.h"
__global__ void grayScale(unsigned char *device_arr);
__host__ void imgProc(unsigned char *map, int size, int width, int height);
int main(void) {
char fname[50];
FILE* infile;
unsigned short ftype;
tagBMFH bitHead;
tagBMIH bitInfoHead;
tagRGBQ *pRgb;
printf("Please enter the .bmp file name: ");
scanf("%s", fname);
strcat(fname,".bmp");
infile = fopen(fname, "rb");
if (infile != NULL) {
printf("File open successful.\n");
fread(&ftype, 1, sizeof(unsigned short), infile);
if (ftype != 0x4d42)
{
printf("File not .bmp format.\n");
return 1;
}
fread(&bitHead, 1, sizeof(tagBMFH), infile);
fread(&bitInfoHead, 1, sizeof(tagBMIH), infile);
}
else {
printf("File open fail.\n");
return 1;
}
if (bitInfoHead.biBitCount < 24) {
long nPlateNum = long(pow(2, double(bitInfoHead.biBitCount)));
pRgb = (tagRGBQ *)malloc(nPlateNum * sizeof(tagRGBQ));
memset(pRgb, 0, nPlateNum * sizeof(tagRGBQ));
int num = fread(pRgb, 4, nPlateNum, infile);
}
int width = bitInfoHead.biWidth;
int height = bitInfoHead.biHeight;
int l_width = 4 * ((width * bitInfoHead.biBitCount + 31) / 32);
long nData = height * l_width;
unsigned char *pColorData = (unsigned char *)malloc(nData);
memset(pColorData, 0, nData);
fread(pColorData, 1, nData, infile);
fclose(infile);
CPUBitmap dataOfBmp(width, height);
unsigned char *map = dataOfBmp.get_ptr();
if (bitInfoHead.biBitCount < 24) {
int k, index = 0;
if (bitInfoHead.biBitCount == 1) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 8;
mixIndex = pColorData[k];
if (j % 8 < 7) mixIndex = mixIndex << (7 - (j % 8));
mixIndex = mixIndex >> 7;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 2) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 4;
mixIndex = pColorData[k];
if (j % 4 < 3) mixIndex = mixIndex << (6 - 2 * (j % 4));
mixIndex = mixIndex >> 6;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 4) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j / 2;
mixIndex = pColorData[k];
if (j % 2 == 0) mixIndex = mixIndex << 4;
mixIndex = mixIndex >> 4;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 8) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j;
mixIndex = pColorData[k];
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
else if (bitInfoHead.biBitCount == 16) {
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
unsigned char mixIndex = 0;
k = i * l_width + j * 2;
unsigned char shortTemp = pColorData[k + 1] << 8;
mixIndex = pColorData[k] + shortTemp;
map[index * 4 + 0] = pRgb[mixIndex].rgbRed;
map[index * 4 + 1] = pRgb[mixIndex].rgbGreen;
map[index * 4 + 2] = pRgb[mixIndex].rgbBlue;
map[index * 4 + 3] = pRgb[mixIndex].rgbReserved;
index++;
}
}
}
else {
int k, index = 0;
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++) {
k = i * l_width + j * 3;
map[index * 4 + 0] = pColorData[k + 2];
map[index * 4 + 1] = pColorData[k + 1];
map[index * 4 + 2] = pColorData[k];
index++;
}
}
imgProc(map, dataOfBmp.image_size(), width, height);
dataOfBmp.display_and_exit();
return 0;
}
__global__ void grayScale(unsigned char *device_arr){
int pixel = (blockIdx.x + blockIdx.y * gridDim.x) * 4;
unsigned char red = device_arr[pixel];
unsigned char green = device_arr[pixel + 1];
unsigned char blue = device_arr[pixel + 2];
unsigned grayScale = 0.21 * red + 0.71 * green + 0.07 * blue;
device_arr[pixel] = device_arr[pixel + 1] = device_arr[pixel + 2] = device_arr[pixel + 3] = grayScale;
}
__host__ void imgProc(unsigned char *map, int size, int width, int height) {
unsigned char *device_arr;
cudaMalloc((void**) &device_arr, size);
cudaMemcpy(device_arr, map, size, cudaMemcpyHostToDevice);
dim3 dimGrid(width, height);
dim3 dimBlock(1);
grayScale<<<dimGrid, dimBlock>>>(device_arr);
cudaMemcpy(map, device_arr, size, cudaMemcpyDeviceToHost);
cudaFree(device_arr);
return;
} |
8cd94e3e8022b3e98028e7fe1c69fd743b710ec2.hip | // !!! This is a file automatically generated by hipify!!!
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <stdio.h>
#include <assert.h>
#include <string>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <winsock2.h>
#include <ws2tcpip.h>
#pragma comment (lib, "Ws2_32.lib")
#define DEFAULT_BUFLEN 512
#define DEFAULT_PORT "27015"
int __cdecl init_Server(void)
{
WSADATA wsaData;
int iResult;
SOCKET ListenSocket = INVALID_SOCKET;
SOCKET ClientSocket = INVALID_SOCKET;
struct addrinfo *result = NULL;
struct addrinfo hints;
int iSendResult;
char recvbuf[DEFAULT_BUFLEN];
int recvbuflen = DEFAULT_BUFLEN;
// Initialize Winsock
iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
if (iResult != 0) {
printf("WSAStartup failed with error: %d\n", iResult);
return 1;
}
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_PASSIVE;
// Resolve the server address and port
iResult = getaddrinfo(NULL, DEFAULT_PORT, &hints, &result);
if ( iResult != 0 ) {
printf("getaddrinfo failed with error: %d\n", iResult);
WSACleanup();
return 1;
}
// Create a SOCKET for connecting to server
ListenSocket = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
if (ListenSocket == INVALID_SOCKET) {
printf("socket failed with error: %ld\n", WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
// Setup the TCP listening socket
iResult = bind( ListenSocket, result->ai_addr, (int)result->ai_addrlen);
if (iResult == SOCKET_ERROR) {
printf("bind failed with error: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(ListenSocket);
WSACleanup();
return 1;
}
freeaddrinfo(result);
iResult = listen(ListenSocket, SOMAXCONN);
if (iResult == SOCKET_ERROR) {
printf("listen failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// Accept a client socket
ClientSocket = accept(ListenSocket, NULL, NULL);
if (ClientSocket == INVALID_SOCKET) {
printf("accept failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// No longer need server socket
closesocket(ListenSocket);
// Receive until the peer shuts down the connection
do {
iResult = recv(ClientSocket, recvbuf, recvbuflen, 0);
if (iResult > 0) {
printf("Bytes received: %d\n", iResult);
// Echo the buffer back to the sender
iSendResult = send( ClientSocket, recvbuf, iResult, 0 );
if (iSendResult == SOCKET_ERROR) {
printf("send failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
printf("Bytes sent: %d\n", iSendResult);
}
else if (iResult == 0)
printf("Connection closing...\n");
else {
printf("recv failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
} while (iResult > 0);
// shutdown the connection since we're done
iResult = shutdown(ClientSocket, SD_SEND);
if (iResult == SOCKET_ERROR) {
printf("shutdown failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
// cleanup
closesocket(ClientSocket);
WSACleanup();
return 0;
}
// Main Function.
int main(int argc, const char *argv[]) {
init_Server();
}
| 8cd94e3e8022b3e98028e7fe1c69fd743b710ec2.cu | #define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <stdio.h>
#include <assert.h>
#include <string>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <winsock2.h>
#include <ws2tcpip.h>
#pragma comment (lib, "Ws2_32.lib")
#define DEFAULT_BUFLEN 512
#define DEFAULT_PORT "27015"
int __cdecl init_Server(void)
{
WSADATA wsaData;
int iResult;
SOCKET ListenSocket = INVALID_SOCKET;
SOCKET ClientSocket = INVALID_SOCKET;
struct addrinfo *result = NULL;
struct addrinfo hints;
int iSendResult;
char recvbuf[DEFAULT_BUFLEN];
int recvbuflen = DEFAULT_BUFLEN;
// Initialize Winsock
iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
if (iResult != 0) {
printf("WSAStartup failed with error: %d\n", iResult);
return 1;
}
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_PASSIVE;
// Resolve the server address and port
iResult = getaddrinfo(NULL, DEFAULT_PORT, &hints, &result);
if ( iResult != 0 ) {
printf("getaddrinfo failed with error: %d\n", iResult);
WSACleanup();
return 1;
}
// Create a SOCKET for connecting to server
ListenSocket = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
if (ListenSocket == INVALID_SOCKET) {
printf("socket failed with error: %ld\n", WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
// Setup the TCP listening socket
iResult = bind( ListenSocket, result->ai_addr, (int)result->ai_addrlen);
if (iResult == SOCKET_ERROR) {
printf("bind failed with error: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(ListenSocket);
WSACleanup();
return 1;
}
freeaddrinfo(result);
iResult = listen(ListenSocket, SOMAXCONN);
if (iResult == SOCKET_ERROR) {
printf("listen failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// Accept a client socket
ClientSocket = accept(ListenSocket, NULL, NULL);
if (ClientSocket == INVALID_SOCKET) {
printf("accept failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// No longer need server socket
closesocket(ListenSocket);
// Receive until the peer shuts down the connection
do {
iResult = recv(ClientSocket, recvbuf, recvbuflen, 0);
if (iResult > 0) {
printf("Bytes received: %d\n", iResult);
// Echo the buffer back to the sender
iSendResult = send( ClientSocket, recvbuf, iResult, 0 );
if (iSendResult == SOCKET_ERROR) {
printf("send failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
printf("Bytes sent: %d\n", iSendResult);
}
else if (iResult == 0)
printf("Connection closing...\n");
else {
printf("recv failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
} while (iResult > 0);
// shutdown the connection since we're done
iResult = shutdown(ClientSocket, SD_SEND);
if (iResult == SOCKET_ERROR) {
printf("shutdown failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
// cleanup
closesocket(ClientSocket);
WSACleanup();
return 0;
}
// Main Function.
int main(int argc, const char *argv[]) {
init_Server();
}
|
36266b4e320b40f1f3b628e94ba3cbd5414b2fcf.hip | // !!! This is a file automatically generated by hipify!!!
// bench.cu
//
// Benchmark program for the different operator implementations.
//
#include <sys/time.h>
#include <vector>
#include "common.h"
#include "device.h"
#include "verify_ops.h"
#include "opt_parser.h"
// Set up "problem" values needed by the CUDA kernels.
real x_0 = 0.0;
real y_0 = 0.0;
real z_0 = 0.0;
real dx = (2 * M_PI - 0.0) / (NX - 1);
real dy = (2 * M_PI - 0.0) / (NY - 1);
real dz = (2 * M_PI - 0.0) / (NZ - 1);
struct timings {
float low, high, avg;
};
typedef timings(*bench_op_type)(int);
// Collect timings for 'count' invocations of an operation.
template<void(*call_op)()> timings
bench_op(int count)
{
float elapsedMs;
timings timings;
double start, stop;
timings.avg = 0;
for (int i = 0; i < count; i++) {
check_cuda(hipDeviceSynchronize());
start = read_time_ms();
call_op();
check_cuda(hipPeekAtLastError());
check_cuda(hipDeviceSynchronize());
stop = read_time_ms();
elapsedMs = stop - start;
if (i == 0)
timings.low = timings.high = elapsedMs;
else if (elapsedMs < timings.low)
timings.low = elapsedMs;
else if (elapsedMs > timings.high)
timings.high = elapsedMs;
timings.avg += elapsedMs;
}
timings.avg /= count;
return timings;
}
// Run benchmarks for two seconds so that the GPU
// is not idling.
void
warm_up(bench_op_type bench)
{
puts("Warm up.");
float ttot = 0.0;
while (ttot < 2000.0) {
timings timings = bench(10);
ttot += timings.avg * 10.0;
}
puts("Done.");
}
void
print_timings(std::string name, timings timings)
{
printf("%s: %.20f %.20f %.20f\n", name.c_str(), timings.low, timings.high, timings.avg);
}
// Verify results for operators with one result (div, del2, etc...)
void
verify1(vf3dgpu &gpufield, real (*check)(vf3dhost &), bool relErr = true)
{
vf3dhost h(gpufield.varcount());
gpufield.copy_to_host(h);
real maxErr = check(h);
h.free();
printf("maxErr: (%.20f).\n", maxErr);
if (relErr)
printf("relErr: (%.20f).\n", maxErr/pow(max(max(dx,dy),dz),6));
}
// Verify results for operatgors with three results (grad, curl, etc...)
void
verify3(vf3dgpu &gpufield, real3 (*check)(vf3dhost &), bool relErr = true)
{
vf3dhost h(gpufield.varcount());
gpufield.copy_to_host(h);
real3 maxErr = check(h);
h.free();
printf("maxErr: (%.20f, %.20f, %.20f).\n", maxErr.x, maxErr.y, maxErr.z);
if (relErr)
printf("relErr: (%.20f, %.20f, %.20f).\n", maxErr.x/pow(dx,6), maxErr.y/pow(dy,6), maxErr.z/pow(dz,6));
}
// Variables for grad, div, curl and del2.
vf3dgpu f, u, omega, d2f;
void
run_bench_grad(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(u.mem(), vfmemsize(3)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify3(u, check_grad);
}
template<void (*call_grad)(vf3dgpu &, vf3dgpu &)>
inline void grad_op() { call_grad(f, u); }
void
bench_grad(std::string name, int count, bool verify)
{
printf("Test gradient:\n");
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
puts("Initalizing function values.");
f = vf3dgpu(1);
u = vf3dgpu(3);
init_field(f, TEST_TRIG_INIT);
apply_periodic_bc(f);
puts("Done.");
warm_up(bench_op<grad_op<grad_default> >);
if (name == "default")
run_bench_grad("default", bench_op<grad_op<grad_default> >, count, verify);
else if (name == "old_order")
run_bench_grad("old_order", bench_op<grad_op<grad_old_order> >, count, verify);
else if (name == "simple")
run_bench_grad("simple", bench_op<grad_op<grad_simple> >, count, verify);
else if (name == "noshared")
run_bench_grad("noshared", bench_op<grad_op<grad_noshared> >, count, verify);
else if (name == "flags")
run_bench_grad("flags", bench_op<grad_op<grad_flags> >, count, verify);
else if (name == "x_load")
run_bench_grad("x_load", bench_op<grad_op<grad_x_load> >, count, verify);
else if (name == "y_load")
run_bench_grad("y_load", bench_op<grad_op<grad_y_load> >, count, verify);
else if (name == "linear_load")
run_bench_grad("linear_load", bench_op<grad_op<grad_linear_load> >, count, verify);
else if (name == "three")
run_bench_grad("three", bench_op<grad_op<grad_three> >, count, verify);
else if (name == "all") {
run_bench_grad("default", bench_op<grad_op<grad_default> >, count, verify);
run_bench_grad("old_order", bench_op<grad_op<grad_old_order> >, count, verify);
run_bench_grad("simple", bench_op<grad_op<grad_simple> >, count, verify);
run_bench_grad("noshared", bench_op<grad_op<grad_noshared> >, count, verify);
run_bench_grad("flags", bench_op<grad_op<grad_flags> >, count, verify);
run_bench_grad("x_load", bench_op<grad_op<grad_x_load> >, count, verify);
run_bench_grad("y_load", bench_op<grad_op<grad_y_load> >, count, verify);
run_bench_grad("linear_load", bench_op<grad_op<grad_linear_load> >, count, verify);
run_bench_grad("three", bench_op<grad_op<grad_three> >, count, verify);
}
else {
printf("No grad kernel named '%s'\n", name.c_str());
return;
}
f.free();
u.free();
}
void
run_bench_div(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(f.mem(), vfmemsize(1)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify1(f, check_div);
}
template<void (*call_div)(vf3dgpu &, vf3dgpu &)>
inline void div_op() { call_div(u, f); }
void
bench_div(std::string name, int count, bool verify)
{
puts("Test divergence:");
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
puts("Initalizing vector field.");
u = vf3dgpu(3);
f = vf3dgpu(1);
init_field(u, TEST_TRIG_INIT);
apply_periodic_bc(u);
puts("Done.");
warm_up(bench_op<div_op<div_default> >);
if (name == "default")
run_bench_div("default", bench_op<div_op<div_default> >, count, verify);
else if (name == "same")
run_bench_div("same", bench_op<div_op<div_same> >, count, verify);
else if (name == "three")
run_bench_div("three", bench_op<div_op<div_three> >, count, verify);
else if (name == "all") {
run_bench_div("default", bench_op<div_op<div_default> >, count, verify);
run_bench_div("same", bench_op<div_op<div_same> >, count, verify);
run_bench_div("three", bench_op<div_op<div_three> >, count, verify);
}
else {
printf("No div kernel named '%s'\n", name.c_str());
return;
}
f.free();
u.free();
}
void
run_bench_curl(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(omega.mem(), vfmemsize(3)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify3(omega, check_curl);
}
template<void (*call_curl)(vf3dgpu &, vf3dgpu &)>
inline void curl_op() { call_curl(u, omega); }
void
bench_curl(std::string name, int count, bool verify)
{
puts("Test curl:");
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
printf("omega memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
puts("Initalizing vector field.");
u = vf3dgpu(3);
omega = vf3dgpu(3);
init_field(u, TEST_TRIG_INIT);
apply_periodic_bc(u);
puts("Done.");
warm_up(bench_op<curl_op<curl_default> >);
if (name == "default")
run_bench_curl("default", bench_op<curl_op<curl_default> >, count, verify);
else if (name == "lb")
run_bench_curl("lb", bench_op<curl_op<curl_lb> >, count, verify);
else if (name == "all") {
run_bench_curl("default", bench_op<curl_op<curl_default> >, count, verify);
run_bench_curl("lb", bench_op<curl_op<curl_lb> >, count, verify);
}
else {
printf("No curl kernel named '%s'\n", name.c_str());
return;
}
omega.free();
u.free();
}
void
run_bench_del2(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(d2f.mem(), vfmemsize(1)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify1(d2f, check_del2);
}
template<void (*call_del2)(vf3dgpu &, vf3dgpu &)>
inline void del2_op() { call_del2(f, d2f); }
void
bench_del2(std::string name, int count, bool verify)
{
puts("Test del2:");
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
printf("d2f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
puts("Initalizing vector field.");
f = vf3dgpu(1);
d2f = vf3dgpu(1);
init_field(f, TEST_TRIG_INIT);
apply_periodic_bc(f);
puts("Done.");
warm_up(bench_op<del2_op<del2_default> >);
if (name == "default")
run_bench_del2("default", bench_op<del2_op<del2_default> >, count, verify);
else if (name == "same")
run_bench_del2("same", bench_op<del2_op<del2_same> >, count, verify);
else if (name == "all") {
run_bench_del2("default", bench_op<del2_op<del2_default> >, count, verify);
run_bench_del2("same", bench_op<del2_op<del2_same> >, count, verify);
}
else {
printf("No del2 kernel named '%s'\n", name.c_str());
return;
}
d2f.free();
f.free();
}
// Variables for add2, add3 and dotmul.
vf3dgpu a, b;
void call_add2() { add2(-1, a, 5, a, b); }
void call_add3() { add3(-1, a, 2, a, 3, a, b); }
void call_dotmul() { dotmul3(a, a, b);}
void
bench_add2(std::string name, int count, bool verify)
{
puts("Test add2:");
puts("Initalizing fields.");
a = vf3dgpu(1);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_add2>(count);
print_timings("add2", timings);
if (verify)
verify1(b, check_add2, false);
a.free();
b.free();
}
void
bench_add3(std::string name, int count, bool verify)
{
puts("Test add3:");
puts("Initalizing fields.");
a = vf3dgpu(1);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_add3>(count);
print_timings("add3", timings);
if (verify)
verify1(b, check_add2, false);
a.free();
b.free();
}
void
bench_dotmul(std::string name, int count, bool verify)
{
puts("Test dotmul:");
puts("Initalizing fields.");
a = vf3dgpu(3);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_dotmul>(count);
print_timings("dotmul", timings);
if (verify)
verify1(b, check_dotmul, false);
a.free();
b.free();
}
struct bench_opts {
int count;
std::string op;
std::string name;
bool verify;
bool help;
bench_opts(): count(100), op("all"), name("all"), verify(false), help(false) { }
void usage() {
puts(
"\n"
"Usage: bench [opt=val | verify | help] ...\n"
"\n"
"Options with defaults:\n"
"count=100 - number of iterations\n"
"op=all - [all|grad|div|curl|...]\n"
"name=all - [all|name of op]\n"
"verify - test the op for correctness\n"
"help - only show this message\n"
"\n"
"Example: bench count=200 op=grad name=default\n");
exit(1);
}
void parse(int argc, char *argv[]) {
int pstop;
opt_parser optp;
optp.int_opt("count", &count);
optp.string_opt("op", &op);
optp.string_opt("name", &name);
optp.toggle_opt("verify", &verify);
optp.toggle_opt("help", &help);
if ((pstop = optp.parse(argc, argv)) != argc) {
printf("Unknow option: '%s'\n", argv[pstop]);
usage();
}
if (help)
usage();
}
};
int
main(int argc, char *argv[])
{
bench_opts opts;
opts.parse(argc, argv);
if (opts.op == "all") {
bench_grad("all", opts.count, opts.verify);
bench_div("all", opts.count, opts.verify);
bench_curl("all", opts.count, opts.verify);
bench_del2("all", opts.count, opts.verify);
}
else {
if (opts.op == "grad")
bench_grad(opts.name, opts.count, opts.verify);
else if (opts.op == "div")
bench_div(opts.name, opts.count, opts.verify);
else if (opts.op == "curl")
bench_curl(opts.name, opts.count, opts.verify);
else if (opts.op == "del2")
bench_del2(opts.name, opts.count, opts.verify);
else if (opts.op == "add2")
bench_add2(opts.name, opts.count, opts.verify);
else if (opts.op == "add3")
bench_add3(opts.name, opts.count, opts.verify);
else if (opts.op == "dotmul")
bench_dotmul(opts.name, opts.count, opts.verify);
else
opts.usage();
}
printf("Reported timer resolution: %ld ns\n", time_resolution_ns());
return 0;
}
| 36266b4e320b40f1f3b628e94ba3cbd5414b2fcf.cu | // bench.cu
//
// Benchmark program for the different operator implementations.
//
#include <sys/time.h>
#include <vector>
#include "common.h"
#include "device.h"
#include "verify_ops.h"
#include "opt_parser.h"
// Set up "problem" values needed by the CUDA kernels.
real x_0 = 0.0;
real y_0 = 0.0;
real z_0 = 0.0;
real dx = (2 * M_PI - 0.0) / (NX - 1);
real dy = (2 * M_PI - 0.0) / (NY - 1);
real dz = (2 * M_PI - 0.0) / (NZ - 1);
struct timings {
float low, high, avg;
};
typedef timings(*bench_op_type)(int);
// Collect timings for 'count' invocations of an operation.
template<void(*call_op)()> timings
bench_op(int count)
{
float elapsedMs;
timings timings;
double start, stop;
timings.avg = 0;
for (int i = 0; i < count; i++) {
check_cuda(cudaDeviceSynchronize());
start = read_time_ms();
call_op();
check_cuda(cudaPeekAtLastError());
check_cuda(cudaDeviceSynchronize());
stop = read_time_ms();
elapsedMs = stop - start;
if (i == 0)
timings.low = timings.high = elapsedMs;
else if (elapsedMs < timings.low)
timings.low = elapsedMs;
else if (elapsedMs > timings.high)
timings.high = elapsedMs;
timings.avg += elapsedMs;
}
timings.avg /= count;
return timings;
}
// Run benchmarks for two seconds so that the GPU
// is not idling.
void
warm_up(bench_op_type bench)
{
puts("Warm up.");
float ttot = 0.0;
while (ttot < 2000.0) {
timings timings = bench(10);
ttot += timings.avg * 10.0;
}
puts("Done.");
}
void
print_timings(std::string name, timings timings)
{
printf("%s: %.20f %.20f %.20f\n", name.c_str(), timings.low, timings.high, timings.avg);
}
// Verify results for operators with one result (div, del2, etc...)
void
verify1(vf3dgpu &gpufield, real (*check)(vf3dhost &), bool relErr = true)
{
vf3dhost h(gpufield.varcount());
gpufield.copy_to_host(h);
real maxErr = check(h);
h.free();
printf("maxErr: (%.20f).\n", maxErr);
if (relErr)
printf("relErr: (%.20f).\n", maxErr/pow(max(max(dx,dy),dz),6));
}
// Verify results for operatgors with three results (grad, curl, etc...)
void
verify3(vf3dgpu &gpufield, real3 (*check)(vf3dhost &), bool relErr = true)
{
vf3dhost h(gpufield.varcount());
gpufield.copy_to_host(h);
real3 maxErr = check(h);
h.free();
printf("maxErr: (%.20f, %.20f, %.20f).\n", maxErr.x, maxErr.y, maxErr.z);
if (relErr)
printf("relErr: (%.20f, %.20f, %.20f).\n", maxErr.x/pow(dx,6), maxErr.y/pow(dy,6), maxErr.z/pow(dz,6));
}
// Variables for grad, div, curl and del2.
vf3dgpu f, u, omega, d2f;
void
run_bench_grad(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(u.mem(), vfmemsize(3)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify3(u, check_grad);
}
template<void (*call_grad)(vf3dgpu &, vf3dgpu &)>
inline void grad_op() { call_grad(f, u); }
void
bench_grad(std::string name, int count, bool verify)
{
printf("Test gradient:\n");
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
puts("Initalizing function values.");
f = vf3dgpu(1);
u = vf3dgpu(3);
init_field(f, TEST_TRIG_INIT);
apply_periodic_bc(f);
puts("Done.");
warm_up(bench_op<grad_op<grad_default> >);
if (name == "default")
run_bench_grad("default", bench_op<grad_op<grad_default> >, count, verify);
else if (name == "old_order")
run_bench_grad("old_order", bench_op<grad_op<grad_old_order> >, count, verify);
else if (name == "simple")
run_bench_grad("simple", bench_op<grad_op<grad_simple> >, count, verify);
else if (name == "noshared")
run_bench_grad("noshared", bench_op<grad_op<grad_noshared> >, count, verify);
else if (name == "flags")
run_bench_grad("flags", bench_op<grad_op<grad_flags> >, count, verify);
else if (name == "x_load")
run_bench_grad("x_load", bench_op<grad_op<grad_x_load> >, count, verify);
else if (name == "y_load")
run_bench_grad("y_load", bench_op<grad_op<grad_y_load> >, count, verify);
else if (name == "linear_load")
run_bench_grad("linear_load", bench_op<grad_op<grad_linear_load> >, count, verify);
else if (name == "three")
run_bench_grad("three", bench_op<grad_op<grad_three> >, count, verify);
else if (name == "all") {
run_bench_grad("default", bench_op<grad_op<grad_default> >, count, verify);
run_bench_grad("old_order", bench_op<grad_op<grad_old_order> >, count, verify);
run_bench_grad("simple", bench_op<grad_op<grad_simple> >, count, verify);
run_bench_grad("noshared", bench_op<grad_op<grad_noshared> >, count, verify);
run_bench_grad("flags", bench_op<grad_op<grad_flags> >, count, verify);
run_bench_grad("x_load", bench_op<grad_op<grad_x_load> >, count, verify);
run_bench_grad("y_load", bench_op<grad_op<grad_y_load> >, count, verify);
run_bench_grad("linear_load", bench_op<grad_op<grad_linear_load> >, count, verify);
run_bench_grad("three", bench_op<grad_op<grad_three> >, count, verify);
}
else {
printf("No grad kernel named '%s'\n", name.c_str());
return;
}
f.free();
u.free();
}
void
run_bench_div(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(f.mem(), vfmemsize(1)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify1(f, check_div);
}
template<void (*call_div)(vf3dgpu &, vf3dgpu &)>
inline void div_op() { call_div(u, f); }
void
bench_div(std::string name, int count, bool verify)
{
puts("Test divergence:");
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
puts("Initalizing vector field.");
u = vf3dgpu(3);
f = vf3dgpu(1);
init_field(u, TEST_TRIG_INIT);
apply_periodic_bc(u);
puts("Done.");
warm_up(bench_op<div_op<div_default> >);
if (name == "default")
run_bench_div("default", bench_op<div_op<div_default> >, count, verify);
else if (name == "same")
run_bench_div("same", bench_op<div_op<div_same> >, count, verify);
else if (name == "three")
run_bench_div("three", bench_op<div_op<div_three> >, count, verify);
else if (name == "all") {
run_bench_div("default", bench_op<div_op<div_default> >, count, verify);
run_bench_div("same", bench_op<div_op<div_same> >, count, verify);
run_bench_div("three", bench_op<div_op<div_three> >, count, verify);
}
else {
printf("No div kernel named '%s'\n", name.c_str());
return;
}
f.free();
u.free();
}
void
run_bench_curl(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(omega.mem(), vfmemsize(3)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify3(omega, check_curl);
}
template<void (*call_curl)(vf3dgpu &, vf3dgpu &)>
inline void curl_op() { call_curl(u, omega); }
void
bench_curl(std::string name, int count, bool verify)
{
puts("Test curl:");
printf("u memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
printf("omega memory: %.2f MB.\n", vfmemsize(3)/(1024.0*1024.0));
puts("Initalizing vector field.");
u = vf3dgpu(3);
omega = vf3dgpu(3);
init_field(u, TEST_TRIG_INIT);
apply_periodic_bc(u);
puts("Done.");
warm_up(bench_op<curl_op<curl_default> >);
if (name == "default")
run_bench_curl("default", bench_op<curl_op<curl_default> >, count, verify);
else if (name == "lb")
run_bench_curl("lb", bench_op<curl_op<curl_lb> >, count, verify);
else if (name == "all") {
run_bench_curl("default", bench_op<curl_op<curl_default> >, count, verify);
run_bench_curl("lb", bench_op<curl_op<curl_lb> >, count, verify);
}
else {
printf("No curl kernel named '%s'\n", name.c_str());
return;
}
omega.free();
u.free();
}
void
run_bench_del2(std::string name, bench_op_type bench, int count, bool verify)
{
timings timings;
if (verify)
clear_gpu_mem(d2f.mem(), vfmemsize(1)/sizeof(real));
timings = bench(count);
print_timings(name, timings);
if (verify)
verify1(d2f, check_del2);
}
template<void (*call_del2)(vf3dgpu &, vf3dgpu &)>
inline void del2_op() { call_del2(f, d2f); }
void
bench_del2(std::string name, int count, bool verify)
{
puts("Test del2:");
printf("f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
printf("d2f memory: %.2f MB.\n", vfmemsize(1)/(1024.0*1024.0));
puts("Initalizing vector field.");
f = vf3dgpu(1);
d2f = vf3dgpu(1);
init_field(f, TEST_TRIG_INIT);
apply_periodic_bc(f);
puts("Done.");
warm_up(bench_op<del2_op<del2_default> >);
if (name == "default")
run_bench_del2("default", bench_op<del2_op<del2_default> >, count, verify);
else if (name == "same")
run_bench_del2("same", bench_op<del2_op<del2_same> >, count, verify);
else if (name == "all") {
run_bench_del2("default", bench_op<del2_op<del2_default> >, count, verify);
run_bench_del2("same", bench_op<del2_op<del2_same> >, count, verify);
}
else {
printf("No del2 kernel named '%s'\n", name.c_str());
return;
}
d2f.free();
f.free();
}
// Variables for add2, add3 and dotmul.
vf3dgpu a, b;
void call_add2() { add2(-1, a, 5, a, b); }
void call_add3() { add3(-1, a, 2, a, 3, a, b); }
void call_dotmul() { dotmul3(a, a, b);}
void
bench_add2(std::string name, int count, bool verify)
{
puts("Test add2:");
puts("Initalizing fields.");
a = vf3dgpu(1);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_add2>(count);
print_timings("add2", timings);
if (verify)
verify1(b, check_add2, false);
a.free();
b.free();
}
void
bench_add3(std::string name, int count, bool verify)
{
puts("Test add3:");
puts("Initalizing fields.");
a = vf3dgpu(1);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_add3>(count);
print_timings("add3", timings);
if (verify)
verify1(b, check_add2, false);
a.free();
b.free();
}
void
bench_dotmul(std::string name, int count, bool verify)
{
puts("Test dotmul:");
puts("Initalizing fields.");
a = vf3dgpu(3);
b = vf3dgpu(1);
init_field(a, TEST_TRIG_INIT);
puts("Done.");
timings timings;
if (verify)
clear_gpu_mem(b.mem(), vfmemsize(1)/sizeof(real));
timings = bench_op<call_dotmul>(count);
print_timings("dotmul", timings);
if (verify)
verify1(b, check_dotmul, false);
a.free();
b.free();
}
struct bench_opts {
int count;
std::string op;
std::string name;
bool verify;
bool help;
bench_opts(): count(100), op("all"), name("all"), verify(false), help(false) { }
void usage() {
puts(
"\n"
"Usage: bench [opt=val | verify | help] ...\n"
"\n"
"Options with defaults:\n"
"count=100 - number of iterations\n"
"op=all - [all|grad|div|curl|...]\n"
"name=all - [all|name of op]\n"
"verify - test the op for correctness\n"
"help - only show this message\n"
"\n"
"Example: bench count=200 op=grad name=default\n");
exit(1);
}
void parse(int argc, char *argv[]) {
int pstop;
opt_parser optp;
optp.int_opt("count", &count);
optp.string_opt("op", &op);
optp.string_opt("name", &name);
optp.toggle_opt("verify", &verify);
optp.toggle_opt("help", &help);
if ((pstop = optp.parse(argc, argv)) != argc) {
printf("Unknow option: '%s'\n", argv[pstop]);
usage();
}
if (help)
usage();
}
};
int
main(int argc, char *argv[])
{
bench_opts opts;
opts.parse(argc, argv);
if (opts.op == "all") {
bench_grad("all", opts.count, opts.verify);
bench_div("all", opts.count, opts.verify);
bench_curl("all", opts.count, opts.verify);
bench_del2("all", opts.count, opts.verify);
}
else {
if (opts.op == "grad")
bench_grad(opts.name, opts.count, opts.verify);
else if (opts.op == "div")
bench_div(opts.name, opts.count, opts.verify);
else if (opts.op == "curl")
bench_curl(opts.name, opts.count, opts.verify);
else if (opts.op == "del2")
bench_del2(opts.name, opts.count, opts.verify);
else if (opts.op == "add2")
bench_add2(opts.name, opts.count, opts.verify);
else if (opts.op == "add3")
bench_add3(opts.name, opts.count, opts.verify);
else if (opts.op == "dotmul")
bench_dotmul(opts.name, opts.count, opts.verify);
else
opts.usage();
}
printf("Reported timer resolution: %ld ns\n", time_resolution_ns());
return 0;
}
|
f0a14089016b83723916d63e742e52a75f32c1cb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO parquet writer class implementation
*/
#include "writer_impl.hpp"
#include "compact_protocol_reader.hpp"
#include "compact_protocol_writer.hpp"
#include <io/statistics/column_statistics.cuh>
#include <io/utilities/column_utils.cuh>
#include <io/utilities/config_utils.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/column.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <nvcomp/snappy.h>
#include <thrust/binary_search.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>;
/**
* @brief Function that translates GDF compression to parquet compression
*/
parquet::Compression to_parquet_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return parquet::Compression::SNAPPY;
case compression_type::NONE: return parquet::Compression::UNCOMPRESSED;
default: CUDF_FAIL("Unsupported compression type");
}
}
} // namespace
struct aggregate_writer_metadata {
aggregate_writer_metadata(std::vector<partition_info> const& partitions,
size_type num_columns,
std::vector<SchemaElement> schema,
statistics_freq stats_granularity,
std::vector<std::map<std::string, std::string>> const& kv_md)
: version(1), schema(std::move(schema)), files(partitions.size())
{
for (size_t i = 0; i < partitions.size(); ++i) {
this->files[i].num_rows = partitions[i].num_rows;
}
this->column_order_listsize =
(stats_granularity != statistics_freq::STATISTICS_NONE) ? num_columns : 0;
for (size_t p = 0; p < kv_md.size(); ++p) {
std::transform(kv_md[p].begin(),
kv_md[p].end(),
std::back_inserter(this->files[p].key_value_metadata),
[](auto const& kv) {
return KeyValue{kv.first, kv.second};
});
}
}
void update_files(std::vector<partition_info> const& partitions)
{
CUDF_EXPECTS(partitions.size() == this->files.size(),
"New partitions must be same size as previously passed number of partitions");
for (size_t i = 0; i < partitions.size(); ++i) {
this->files[i].num_rows += partitions[i].num_rows;
}
}
FileMetaData get_metadata(size_t part)
{
CUDF_EXPECTS(part < files.size(), "Invalid part index queried");
FileMetaData meta{};
meta.version = this->version;
meta.schema = this->schema;
meta.num_rows = this->files[part].num_rows;
meta.row_groups = this->files[part].row_groups;
meta.key_value_metadata = this->files[part].key_value_metadata;
meta.created_by = this->created_by;
meta.column_order_listsize = this->column_order_listsize;
return meta;
}
void set_file_paths(std::vector<std::string> const& column_chunks_file_path)
{
for (size_t p = 0; p < this->files.size(); ++p) {
auto& file = this->files[p];
auto const& file_path = column_chunks_file_path[p];
for (auto& rowgroup : file.row_groups) {
for (auto& col : rowgroup.columns) {
col.file_path = file_path;
}
}
}
}
FileMetaData get_merged_metadata()
{
FileMetaData merged_md;
for (size_t p = 0; p < this->files.size(); ++p) {
auto& file = this->files[p];
if (p == 0) {
merged_md = this->get_metadata(0);
} else {
merged_md.row_groups.insert(merged_md.row_groups.end(),
std::make_move_iterator(file.row_groups.begin()),
std::make_move_iterator(file.row_groups.end()));
merged_md.num_rows += file.num_rows;
}
}
return merged_md;
}
std::vector<size_t> num_row_groups_per_file()
{
std::vector<size_t> global_rowgroup_base;
std::transform(this->files.begin(),
this->files.end(),
std::back_inserter(global_rowgroup_base),
[](auto const& part) { return part.row_groups.size(); });
return global_rowgroup_base;
}
[[nodiscard]] bool schema_matches(std::vector<SchemaElement> const& schema) const
{
return this->schema == schema;
}
auto& file(size_t p) { return files[p]; }
[[nodiscard]] size_t num_files() const { return files.size(); }
private:
int32_t version = 0;
std::vector<SchemaElement> schema;
struct per_file_metadata {
int64_t num_rows = 0;
std::vector<RowGroup> row_groups;
std::vector<KeyValue> key_value_metadata;
};
std::vector<per_file_metadata> files;
std::string created_by = "";
uint32_t column_order_listsize = 0;
};
/**
* @brief Extends SchemaElement to add members required in constructing parquet_column_view
*
* Added members are:
* 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream
* of a leaf schema node. For non-leaf struct node, this is nullptr.
* 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node.
* 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet
* supported types
*/
struct schema_tree_node : public SchemaElement {
cudf::detail::LinkedColPtr leaf_column;
statistics_dtype stats_dtype;
int32_t ts_scale;
// TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The
// function construct_schema_tree could be its constructor. It can have method to get the per
// column nullability given a schema node index corresponding to a leaf schema. Much easier than
// that is a method to get path in schema, given a leaf node
};
struct leaf_schema_fn {
schema_tree_node& col_schema;
cudf::detail::LinkedColPtr const& col;
column_in_metadata const& col_meta;
bool timestamp_is_int96;
template <typename T>
std::enable_if_t<std::is_same_v<T, bool>, void> operator()()
{
col_schema.type = Type::BOOLEAN;
col_schema.stats_dtype = statistics_dtype::dtype_bool;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::INT_8;
col_schema.stats_dtype = statistics_dtype::dtype_int8;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::INT_16;
col_schema.stats_dtype = statistics_dtype::dtype_int16;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_8;
col_schema.stats_dtype = statistics_dtype::dtype_int8;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_16;
col_schema.stats_dtype = statistics_dtype::dtype_int16;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::UINT_64;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, float>, void> operator()()
{
col_schema.type = Type::FLOAT;
col_schema.stats_dtype = statistics_dtype::dtype_float32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, double>, void> operator()()
{
col_schema.type = Type::DOUBLE;
col_schema.stats_dtype = statistics_dtype::dtype_float64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()()
{
col_schema.type = Type::BYTE_ARRAY;
col_schema.converted_type = ConvertedType::UTF8;
col_schema.stats_dtype = statistics_dtype::dtype_string;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::DATE;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
col_schema.ts_scale = 1000;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type = ConvertedType::UNKNOWN;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
if (timestamp_is_int96) {
col_schema.ts_scale = -1000; // negative value indicates division by absolute value
}
// set logical type if it's not int96
else {
col_schema.logical_type.isset.TIMESTAMP = true;
col_schema.logical_type.TIMESTAMP.unit.isset.NANOS = true;
}
}
// unsupported outside cudf for parquet 1.0.
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
col_schema.ts_scale = 1000;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
// unsupported outside cudf for parquet 1.0.
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
col_schema.ts_scale = -1000; // negative value indicates division by absolute value
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()()
{
if (std::is_same_v<T, numeric::decimal32>) {
col_schema.type = Type::INT32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
col_schema.decimal_precision = 9;
} else if (std::is_same_v<T, numeric::decimal64>) {
col_schema.type = Type::INT64;
col_schema.stats_dtype = statistics_dtype::dtype_decimal64;
col_schema.decimal_precision = 18;
} else if (std::is_same_v<T, numeric::decimal128>) {
col_schema.type = Type::FIXED_LEN_BYTE_ARRAY;
col_schema.type_length = sizeof(__int128_t);
col_schema.stats_dtype = statistics_dtype::dtype_decimal128;
col_schema.decimal_precision = 38;
} else {
CUDF_FAIL("Unsupported fixed point type for parquet writer");
}
col_schema.converted_type = ConvertedType::DECIMAL;
col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs
if (col_meta.is_decimal_precision_set()) {
CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale,
"Precision must be equal to or greater than scale!");
col_schema.decimal_precision = col_meta.get_decimal_precision();
}
}
template <typename T>
std::enable_if_t<cudf::is_nested<T>(), void> operator()()
{
CUDF_FAIL("This functor is only meant for physical data types");
}
template <typename T>
std::enable_if_t<cudf::is_dictionary<T>(), void> operator()()
{
CUDF_FAIL("Dictionary columns are not supported for writing");
}
};
inline bool is_col_nullable(cudf::detail::LinkedColPtr const& col,
column_in_metadata const& col_meta,
bool single_write_mode)
{
if (single_write_mode) {
return col->nullable();
} else {
if (col_meta.is_nullability_defined()) {
CUDF_EXPECTS(col_meta.nullable() || !col->nullable(),
"Mismatch in metadata prescribed nullability and input column nullability. "
"Metadata for nullable input column cannot prescribe nullability = false");
return col_meta.nullable();
} else {
// For chunked write, when not provided nullability, we assume the worst case scenario
// that all columns are nullable.
return true;
}
}
}
/**
* @brief Construct schema from input columns and per-column input options
*
* Recursively traverses through linked_columns and corresponding metadata to construct schema tree.
* The resulting schema tree is stored in a vector in pre-order traversal order.
*/
std::vector<schema_tree_node> construct_schema_tree(
cudf::detail::LinkedColVector const& linked_columns,
table_input_metadata& metadata,
bool single_write_mode,
bool int96_timestamps)
{
std::vector<schema_tree_node> schema;
schema_tree_node root{};
root.type = UNDEFINED_TYPE;
root.repetition_type = NO_REPETITION_TYPE;
root.name = "schema";
root.num_children = linked_columns.size();
root.parent_idx = -1; // root schema has no parent
schema.push_back(std::move(root));
std::function<void(cudf::detail::LinkedColPtr const&, column_in_metadata&, size_t)> add_schema =
[&](cudf::detail::LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) {
bool col_nullable = is_col_nullable(col, col_meta, single_write_mode);
auto set_field_id = [&schema, parent_idx](schema_tree_node& s,
column_in_metadata const& col_meta) {
if (schema[parent_idx].name != "list" and col_meta.is_parquet_field_id_set()) {
s.field_id = col_meta.get_parquet_field_id();
}
};
if (col->type().id() == type_id::STRUCT) {
// if struct, add current and recursively call for all children
schema_tree_node struct_schema{};
struct_schema.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
struct_schema.num_children = col->children.size();
struct_schema.parent_idx = parent_idx;
set_field_id(struct_schema, col_meta);
schema.push_back(std::move(struct_schema));
auto struct_node_index = schema.size() - 1;
// for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) {
// add_schema(*child_it, struct_node_index);
// }
CUDF_EXPECTS(col->children.size() == static_cast<size_t>(col_meta.num_children()),
"Mismatch in number of child columns between input table and metadata");
for (size_t i = 0; i < col->children.size(); ++i) {
add_schema(col->children[i], col_meta.child(i), struct_node_index);
}
} else if (col->type().id() == type_id::LIST && !col_meta.is_map()) {
// List schema is denoted by two levels for each nesting level and one final level for leaf.
// The top level is the same name as the column name.
// So e.g. List<List<int>> is denoted in the schema by
// "col_name" : { "list" : { "element" : { "list" : { "element" } } } }
schema_tree_node list_schema_1{};
list_schema_1.converted_type = ConvertedType::LIST;
list_schema_1.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
list_schema_1.num_children = 1;
list_schema_1.parent_idx = parent_idx;
set_field_id(list_schema_1, col_meta);
schema.push_back(std::move(list_schema_1));
schema_tree_node list_schema_2{};
list_schema_2.repetition_type = FieldRepetitionType::REPEATED;
list_schema_2.name = "list";
list_schema_2.num_children = 1;
list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added.
schema.push_back(std::move(list_schema_2));
CUDF_EXPECTS(col_meta.num_children() == 2,
"List column's metadata should have exactly two children");
add_schema(col->children[lists_column_view::child_column_index],
col_meta.child(lists_column_view::child_column_index),
schema.size() - 1);
} else if (col->type().id() == type_id::LIST && col_meta.is_map()) {
// Map schema is denoted by a list of struct
// e.g. List<Struct<String,String>> will be
// "col_name" : { "key_value" : { "key", "value" } }
// verify the List child structure is a struct<left_child, right_child>
column_view struct_col = *col->children[lists_column_view::child_column_index];
CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct");
CUDF_EXPECTS(struct_col.num_children() == 2,
"Map should be a List of struct with two children only but found " +
std::to_string(struct_col.num_children()));
schema_tree_node map_schema{};
map_schema.converted_type = ConvertedType::MAP;
map_schema.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
map_schema.name = col_meta.get_name();
if (col_meta.is_parquet_field_id_set()) {
map_schema.field_id = col_meta.get_parquet_field_id();
}
map_schema.num_children = 1;
map_schema.parent_idx = parent_idx;
schema.push_back(std::move(map_schema));
schema_tree_node repeat_group{};
repeat_group.repetition_type = FieldRepetitionType::REPEATED;
repeat_group.name = "key_value";
repeat_group.num_children = 2;
repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added.
schema.push_back(std::move(repeat_group));
CUDF_EXPECTS(col_meta.num_children() == 2,
"List column's metadata should have exactly two children");
CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2,
"Map struct column should have exactly two children");
// verify the col meta of children of the struct have name key and value
auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0);
left_child_meta.set_name("key");
left_child_meta.set_nullability(false);
auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1);
right_child_meta.set_name("value");
// check the repetition type of key is required i.e. the col should be non-nullable
auto key_col = col->children[lists_column_view::child_column_index]->children[0];
CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, single_write_mode),
"key column cannot be nullable. For chunked writing, explicitly set the "
"nullability to false in metadata");
// process key
size_type struct_col_index = schema.size() - 1;
add_schema(key_col, left_child_meta, struct_col_index);
// process value
add_schema(col->children[lists_column_view::child_column_index]->children[1],
right_child_meta,
struct_col_index);
} else {
// if leaf, add current
if (col->type().id() == type_id::STRING) {
CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0,
"String column's corresponding metadata should have zero or two children");
} else {
CUDF_EXPECTS(col_meta.num_children() == 0,
"Leaf column's corresponding metadata cannot have children");
}
schema_tree_node col_schema{};
bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps();
cudf::type_dispatcher(col->type(),
leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96});
col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED;
col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
col_schema.parent_idx = parent_idx;
col_schema.leaf_column = col;
set_field_id(col_schema, col_meta);
schema.push_back(col_schema);
}
};
CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(),
"Mismatch in the number of columns and the corresponding metadata elements");
// Add all linked_columns to schema using parent_idx = 0 (root)
for (size_t i = 0; i < linked_columns.size(); ++i) {
add_schema(linked_columns[i], metadata.column_metadata[i], 0);
}
return schema;
}
/**
* @brief Class to store parquet specific information for one data stream.
*
* Contains information about a single data stream. In case of struct columns, a data stream is one
* of the child leaf columns that contains data.
* e.g. A column Struct<int, List<float>> contains 2 data streams:
* - Struct<int>
* - Struct<List<float>>
*
*/
struct parquet_column_view {
parquet_column_view(schema_tree_node const& schema_node,
std::vector<schema_tree_node> const& schema_tree,
rmm::cuda_stream_view stream);
[[nodiscard]] column_view leaf_column_view() const;
[[nodiscard]] gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const;
[[nodiscard]] column_view cudf_column_view() const { return cudf_col; }
[[nodiscard]] parquet::Type physical_type() const { return schema_node.type; }
std::vector<std::string> const& get_path_in_schema() { return path_in_schema; }
// LIST related member functions
[[nodiscard]] uint8_t max_def_level() const noexcept { return _max_def_level; }
[[nodiscard]] uint8_t max_rep_level() const noexcept { return _max_rep_level; }
[[nodiscard]] bool is_list() const noexcept { return _is_list; }
private:
// Schema related members
schema_tree_node schema_node;
std::vector<std::string> path_in_schema;
uint8_t _max_def_level = 0;
uint8_t _max_rep_level = 0;
rmm::device_uvector<uint8_t> _d_nullability;
column_view cudf_col;
// List-related members
bool _is_list;
rmm::device_uvector<size_type>
_dremel_offsets; ///< For each row, the absolute offset into the repetition and definition
///< level vectors. O(num rows)
rmm::device_uvector<uint8_t> _rep_level;
rmm::device_uvector<uint8_t> _def_level;
std::vector<uint8_t> _nullability;
size_type _data_count = 0;
};
parquet_column_view::parquet_column_view(schema_tree_node const& schema_node,
std::vector<schema_tree_node> const& schema_tree,
rmm::cuda_stream_view stream)
: schema_node(schema_node),
_d_nullability(0, stream),
_dremel_offsets(0, stream),
_rep_level(0, stream),
_def_level(0, stream)
{
// Construct single inheritance column_view from linked_column_view
auto curr_col = schema_node.leaf_column.get();
column_view single_inheritance_cudf_col = *curr_col;
while (curr_col->parent) {
auto const& parent = *curr_col->parent;
// For list columns, we still need to retain the offset child column.
auto children =
(parent.type().id() == type_id::LIST)
? std::vector<column_view>{*parent.children[lists_column_view::offsets_column_index],
single_inheritance_cudf_col}
: std::vector<column_view>{single_inheritance_cudf_col};
single_inheritance_cudf_col = column_view(parent.type(),
parent.size(),
parent.head(),
parent.null_mask(),
UNKNOWN_NULL_COUNT,
parent.offset(),
children);
curr_col = curr_col->parent;
}
cudf_col = single_inheritance_cudf_col;
// Construct path_in_schema by travelling up in the schema_tree
std::vector<std::string> path;
auto curr_schema_node = schema_node;
do {
path.push_back(curr_schema_node.name);
if (curr_schema_node.parent_idx != -1) {
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
} while (curr_schema_node.parent_idx != -1);
path_in_schema = std::vector<std::string>(path.crbegin(), path.crend());
// Calculate max definition level by counting the number of levels that are optional (nullable)
// and max repetition level by counting the number of REPEATED levels in this column's hierarchy
uint16_t max_def_level = 0;
uint16_t max_rep_level = 0;
curr_schema_node = schema_node;
while (curr_schema_node.parent_idx != -1) {
if (curr_schema_node.repetition_type == parquet::REPEATED or
curr_schema_node.repetition_type == parquet::OPTIONAL) {
++max_def_level;
}
if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; }
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported");
CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported");
_max_def_level = max_def_level;
_max_rep_level = max_rep_level;
// Construct nullability vector using repetition_type from schema.
std::vector<uint8_t> r_nullability;
curr_schema_node = schema_node;
while (curr_schema_node.parent_idx != -1) {
if (not curr_schema_node.is_stub()) {
r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL);
}
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
_nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend());
// TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using
// hostdevice_vector. Currently this involves a hipMemcpyAsync for each column.
_d_nullability = cudf::detail::make_device_uvector_async(_nullability, stream);
_is_list = (_max_rep_level > 0);
if (cudf_col.size() == 0) { return; }
if (_is_list) {
// Top level column's offsets are not applied to all children. Get the effective offset and
// size of the leaf column
// Calculate row offset into dremel data (repetition/definition values) and the respective
// definition and repetition levels
gpu::dremel_data dremel = gpu::get_dremel_data(cudf_col, _d_nullability, _nullability, stream);
_dremel_offsets = std::move(dremel.dremel_offsets);
_rep_level = std::move(dremel.rep_level);
_def_level = std::move(dremel.def_level);
_data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate
stream.synchronize();
} else {
// For non-list struct, the size of the root column is the same as the size of the leaf column
_data_count = cudf_col.size();
}
}
column_view parquet_column_view::leaf_column_view() const
{
auto col = cudf_col;
while (cudf::is_nested(col.type())) {
if (col.type().id() == type_id::LIST) {
col = col.child(lists_column_view::child_column_index);
} else if (col.type().id() == type_id::STRUCT) {
col = col.child(0); // Stored cudf_col has only one child if struct
}
}
return col;
}
gpu::parquet_column_device_view parquet_column_view::get_device_view(
rmm::cuda_stream_view stream) const
{
column_view col = leaf_column_view();
auto desc = gpu::parquet_column_device_view{}; // Zero out all fields
desc.stats_dtype = schema_node.stats_dtype;
desc.ts_scale = schema_node.ts_scale;
if (is_list()) {
desc.level_offsets = _dremel_offsets.data();
desc.rep_values = _rep_level.data();
desc.def_values = _def_level.data();
}
desc.num_rows = cudf_col.size();
desc.physical_type = physical_type();
desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 |
CompactProtocolReader::NumRequiredBits(max_def_level());
desc.nullability = _d_nullability.data();
return desc;
}
void writer::impl::init_page_fragments(cudf::detail::hostdevice_2dvector<gpu::PageFragment>& frag,
device_span<gpu::parquet_column_device_view const> col_desc,
host_span<partition_info const> partitions,
device_span<int const> part_frag_offset,
uint32_t fragment_size)
{
auto d_partitions = cudf::detail::make_device_uvector_async(partitions, stream);
gpu::InitPageFragments(frag, col_desc, d_partitions, part_frag_offset, fragment_size, stream);
frag.device_to_host(stream, true);
}
void writer::impl::gather_fragment_statistics(
device_2dspan<statistics_chunk> frag_stats_chunk,
device_2dspan<gpu::PageFragment const> frag,
device_span<gpu::parquet_column_device_view const> col_desc,
uint32_t num_fragments)
{
auto num_columns = col_desc.size();
rmm::device_uvector<statistics_group> frag_stats_group(num_fragments * num_columns, stream);
auto frag_stats_group_2dview =
device_2dspan<statistics_group>(frag_stats_group.data(), num_columns, num_fragments);
gpu::InitFragmentStatistics(frag_stats_group_2dview, frag, col_desc, stream);
detail::calculate_group_statistics<detail::io_file_format::PARQUET>(frag_stats_chunk.data(),
frag_stats_group.data(),
num_fragments * num_columns,
stream,
int96_timestamps);
stream.synchronize();
}
void writer::impl::init_page_sizes(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::parquet_column_device_view const> col_desc,
uint32_t num_columns)
{
chunks.host_to_device(stream);
gpu::InitEncoderPages(chunks,
{},
col_desc,
num_columns,
max_page_size_bytes,
max_page_size_rows,
nullptr,
nullptr,
0,
stream);
chunks.device_to_host(stream, true);
}
auto build_chunk_dictionaries(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
host_span<gpu::parquet_column_device_view const> col_desc,
device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
// At this point, we know all chunks and their sizes. We want to allocate dictionaries for each
// chunk that can have dictionary
auto h_chunks = chunks.host_view().flat_view();
std::vector<rmm::device_uvector<size_type>> dict_data;
std::vector<rmm::device_uvector<uint16_t>> dict_index;
if (h_chunks.size() == 0) { return std::pair(std::move(dict_data), std::move(dict_index)); }
// Allocate slots for each chunk
std::vector<rmm::device_uvector<gpu::slot_type>> hash_maps_storage;
hash_maps_storage.reserve(h_chunks.size());
for (auto& chunk : h_chunks) {
if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN) {
chunk.use_dictionary = false;
} else {
chunk.use_dictionary = true;
auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values, stream);
chunk.dict_map_slots = inserted_map.data();
chunk.dict_map_size = inserted_map.size();
}
}
chunks.host_to_device(stream);
gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream);
gpu::populate_chunk_hash_maps(frags, stream);
chunks.device_to_host(stream, true);
// Make decision about which chunks have dictionary
for (auto& ck : h_chunks) {
if (not ck.use_dictionary) { continue; }
std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() {
// calculate size of chunk if dictionary is used
// If we have N unique values then the idx for the last value is N - 1 and nbits is the number
// of bits required to encode indices into the dictionary
auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0;
auto nbits = CompactProtocolReader::NumRequiredBits(max_dict_index);
// We don't use dictionary if the indices are > 16 bits because that's the maximum bitpacking
// bitsize we efficiently support
if (nbits > 16) { return std::pair(false, 0); }
// Only these bit sizes are allowed for RLE encoding because it's compute optimized
constexpr auto allowed_bitsizes = std::array<size_type, 6>{1, 2, 4, 8, 12, 16};
// ceil to (1/2/4/8/12/16)
auto rle_bits = *std::lower_bound(allowed_bitsizes.begin(), allowed_bitsizes.end(), nbits);
auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * rle_bits, 8);
auto dict_enc_size = ck.uniq_data_size + rle_byte_size;
bool use_dict = (ck.plain_data_size > dict_enc_size);
if (not use_dict) { rle_bits = 0; }
return std::pair(use_dict, rle_bits);
}();
}
// TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers.
dict_data.reserve(h_chunks.size());
dict_index.reserve(h_chunks.size());
for (auto& chunk : h_chunks) {
if (not chunk.use_dictionary) { continue; }
size_t dict_data_size = ::min(MAX_DICT_SIZE, chunk.dict_map_size);
auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream);
auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream);
chunk.dict_data = inserted_dict_data.data();
chunk.dict_index = inserted_dict_index.data();
}
chunks.host_to_device(stream);
gpu::collect_map_entries(chunks.device_view().flat_view(), stream);
gpu::get_dictionary_indices(frags, stream);
return std::pair(std::move(dict_data), std::move(dict_index));
}
void writer::impl::init_encoder_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::parquet_column_device_view const> col_desc,
device_span<gpu::EncPage> pages,
statistics_chunk* page_stats,
statistics_chunk* frag_stats,
size_t max_page_comp_data_size,
uint32_t num_columns,
uint32_t num_pages,
uint32_t num_stats_bfr)
{
rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream);
chunks.host_to_device(stream);
InitEncoderPages(chunks,
pages,
col_desc,
num_columns,
max_page_size_bytes,
max_page_size_rows,
(num_stats_bfr) ? page_stats_mrg.data() : nullptr,
(num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr,
max_page_comp_data_size,
stream);
if (num_stats_bfr > 0) {
detail::merge_group_statistics<detail::io_file_format::PARQUET>(
page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream);
if (num_stats_bfr > num_pages) {
detail::merge_group_statistics<detail::io_file_format::PARQUET>(
page_stats + num_pages,
page_stats,
page_stats_mrg.data() + num_pages,
num_stats_bfr - num_pages,
stream);
}
}
stream.synchronize();
}
void snappy_compress(device_span<device_span<uint8_t const> const> comp_in,
device_span<device_span<uint8_t> const> comp_out,
device_span<decompress_status> comp_stats,
size_t max_page_uncomp_data_size,
rmm::cuda_stream_view stream)
{
size_t num_comp_pages = comp_in.size();
try {
size_t temp_size;
nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize(
num_comp_pages, max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &temp_size);
CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess,
"Error in getting snappy compression scratch size");
// Not needed now but nvcomp API makes no promises about future
rmm::device_buffer scratch(temp_size, stream);
// Analogous to comp_in.srcDevice
rmm::device_uvector<void const*> uncompressed_data_ptrs(num_comp_pages, stream);
// Analogous to comp_in.srcSize
rmm::device_uvector<size_t> uncompressed_data_sizes(num_comp_pages, stream);
// Analogous to comp_in.dstDevice
rmm::device_uvector<void*> compressed_data_ptrs(num_comp_pages, stream);
// Analogous to comp_stat.bytes_written
rmm::device_uvector<size_t> compressed_bytes_written(num_comp_pages, stream);
// nvcomp does not currently use comp_in.dstSize. Cannot assume that the output will fit in
// the space allocated unless one uses the API nvcompBatchedSnappyCompressGetOutputSize()
// Prepare the vectors
auto comp_it =
thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin());
thrust::transform(
rmm::exec_policy(stream),
comp_in.begin(),
comp_in.end(),
comp_it,
[] __device__(auto const& in) { return thrust::make_tuple(in.data(), in.size()); });
thrust::transform(rmm::exec_policy(stream),
comp_out.begin(),
comp_out.end(),
compressed_data_ptrs.begin(),
[] __device__(auto const& out) { return out.data(); });
nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(),
uncompressed_data_sizes.data(),
max_page_uncomp_data_size,
num_comp_pages,
scratch.data(), // Not needed rn but future
scratch.size(),
compressed_data_ptrs.data(),
compressed_bytes_written.data(),
nvcompBatchedSnappyDefaultOpts,
stream.value());
CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression");
// nvcomp also doesn't use comp_out.status . It guarantees that given enough output space,
// compression will succeed.
// The other `comp_out` field is `reserved` which is for internal cuIO debugging and can be 0.
thrust::transform(rmm::exec_policy(stream),
compressed_bytes_written.begin(),
compressed_bytes_written.end(),
comp_stats.begin(),
[] __device__(size_t size) {
decompress_status status{};
status.bytes_written = size;
return status;
});
return;
} catch (...) {
// If we reach this then there was an error in compressing so set an error status for each page
thrust::for_each(rmm::exec_policy(stream),
comp_stats.begin(),
comp_stats.end(),
[] __device__(decompress_status & stat) { stat.status = 1; });
};
}
void writer::impl::encode_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::EncPage> pages,
size_t max_page_uncomp_data_size,
uint32_t pages_in_batch,
uint32_t first_page_in_batch,
uint32_t rowgroups_in_batch,
uint32_t first_rowgroup,
const statistics_chunk* page_stats,
const statistics_chunk* chunk_stats)
{
auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch);
auto batch_pages_stats =
(page_stats != nullptr)
? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch)
: device_span<statistics_chunk const>();
uint32_t max_comp_pages =
(compression_ != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0;
rmm::device_uvector<device_span<uint8_t const>> comp_in(max_comp_pages, stream);
rmm::device_uvector<device_span<uint8_t>> comp_out(max_comp_pages, stream);
rmm::device_uvector<decompress_status> comp_stats(max_comp_pages, stream);
gpu::EncodePages(batch_pages, comp_in, comp_out, comp_stats, stream);
switch (compression_) {
case parquet::Compression::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
snappy_compress(comp_in, comp_out, comp_stats, max_page_uncomp_data_size, stream);
} else {
gpu_snap(comp_in, comp_out, comp_stats, stream);
}
break;
default: break;
}
// TBD: Not clear if the official spec actually allows dynamically turning off compression at the
// chunk-level
auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch);
DecideCompression(d_chunks_in_batch.flat_view(), stream);
EncodePageHeaders(batch_pages, comp_stats, batch_pages_stats, chunk_stats, stream);
GatherPages(d_chunks_in_batch.flat_view(), pages, stream);
auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch);
CUDF_CUDA_TRY(hipMemcpyAsync(h_chunks_in_batch.data(),
d_chunks_in_batch.data(),
d_chunks_in_batch.flat_view().size_bytes(),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
}
writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks,
parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
stream(stream),
max_row_group_size{options.get_row_group_size_bytes()},
max_row_group_rows{options.get_row_group_size_rows()},
max_page_size_bytes(options.get_max_page_size_bytes()),
max_page_size_rows(options.get_max_page_size_rows()),
compression_(to_parquet_compression(options.get_compression())),
stats_granularity_(options.get_stats_level()),
int96_timestamps(options.is_enabled_int96_timestamps()),
kv_md(options.get_key_value_metadata()),
single_write_mode(mode == SingleWriteMode::YES),
out_sink_(std::move(sinks))
{
if (options.get_metadata()) {
table_meta = std::make_unique<table_input_metadata>(*options.get_metadata());
}
init_state();
}
writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks,
chunked_parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
stream(stream),
max_row_group_size{options.get_row_group_size_bytes()},
max_row_group_rows{options.get_row_group_size_rows()},
max_page_size_bytes(options.get_max_page_size_bytes()),
max_page_size_rows(options.get_max_page_size_rows()),
compression_(to_parquet_compression(options.get_compression())),
stats_granularity_(options.get_stats_level()),
int96_timestamps(options.is_enabled_int96_timestamps()),
kv_md(options.get_key_value_metadata()),
single_write_mode(mode == SingleWriteMode::YES),
out_sink_(std::move(sinks))
{
if (options.get_metadata()) {
table_meta = std::make_unique<table_input_metadata>(*options.get_metadata());
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
current_chunk_offset.resize(out_sink_.size());
// Write file header
file_header_s fhdr;
fhdr.magic = parquet_magic;
for (auto& sink : out_sink_) {
sink->host_write(&fhdr, sizeof(fhdr));
}
std::fill_n(current_chunk_offset.begin(), current_chunk_offset.size(), sizeof(file_header_s));
}
void writer::impl::write(table_view const& table, std::vector<partition_info> const& partitions)
{
last_write_successful = false;
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
if (not table_meta) { table_meta = std::make_unique<table_input_metadata>(table); }
// Fill unnamed columns' names in table_meta
std::function<void(column_in_metadata&, std::string)> add_default_name =
[&](column_in_metadata& col_meta, std::string default_name) {
if (col_meta.get_name().empty()) col_meta.set_name(default_name);
for (size_type i = 0; i < col_meta.num_children(); ++i) {
add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i));
}
};
for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) {
add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i));
}
auto vec = table_to_linked_columns(table);
auto schema_tree = construct_schema_tree(vec, *table_meta, single_write_mode, int96_timestamps);
// Construct parquet_column_views from the schema tree leaf nodes.
std::vector<parquet_column_view> parquet_columns;
for (schema_tree_node const& schema_node : schema_tree) {
if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); }
}
// Mass allocation of column_device_views for each parquet_column_view
std::vector<column_view> cudf_cols;
cudf_cols.reserve(parquet_columns.size());
for (auto const& parq_col : parquet_columns) {
cudf_cols.push_back(parq_col.cudf_column_view());
}
table_view single_streams_table(cudf_cols);
size_type num_columns = single_streams_table.num_columns();
std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end());
if (!md) {
md = std::make_unique<aggregate_writer_metadata>(
partitions, num_columns, std::move(this_table_schema), stats_granularity_, kv_md);
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(md->schema_matches(this_table_schema),
"Mismatch in schema between multiple calls to write_chunk");
md->update_files(partitions);
}
// Create table_device_view so that corresponding column_device_view data
// can be written into col_desc members
auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream);
rmm::device_uvector<column_device_view> leaf_column_views(0, stream);
// Initialize column description
hostdevice_vector<gpu::parquet_column_device_view> col_desc(parquet_columns.size(), stream);
std::transform(
parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) {
return pcol.get_device_view(stream);
});
// Init page fragments
// 5000 is good enough for up to ~200-character strings. Longer strings will start producing
// fragments larger than the desired page size -> TODO: keep track of the max fragment size, and
// iteratively reduce this value if the largest fragment exceeds the max page size limit (we
// ideally want the page size to be below 1MB so as to have enough pages to get good
// compression/decompression performance).
using cudf::io::parquet::gpu::max_page_fragment_size;
std::vector<int> num_frag_in_part;
std::transform(partitions.begin(),
partitions.end(),
std::back_inserter(num_frag_in_part),
[](auto const& part) {
return util::div_rounding_up_unsafe(part.num_rows, max_page_fragment_size);
});
size_type num_fragments = std::reduce(num_frag_in_part.begin(), num_frag_in_part.end());
std::vector<int> part_frag_offset; // Store the idx of the first fragment in each partition
std::exclusive_scan(
num_frag_in_part.begin(), num_frag_in_part.end(), std::back_inserter(part_frag_offset), 0);
part_frag_offset.push_back(part_frag_offset.back() + num_frag_in_part.back());
auto d_part_frag_offset = cudf::detail::make_device_uvector_async(part_frag_offset, stream);
cudf::detail::hostdevice_2dvector<gpu::PageFragment> fragments(
num_columns, num_fragments, stream);
if (num_fragments != 0) {
// Move column info to device
col_desc.host_to_device(stream);
leaf_column_views = create_leaf_column_device_views<gpu::parquet_column_device_view>(
col_desc, *parent_column_table_device_view, stream);
init_page_fragments(
fragments, col_desc, partitions, d_part_frag_offset, max_page_fragment_size);
}
std::vector<size_t> const global_rowgroup_base = md->num_row_groups_per_file();
// Decide row group boundaries based on uncompressed data size
int num_rowgroups = 0;
std::vector<int> num_rg_in_part(partitions.size());
for (size_t p = 0; p < partitions.size(); ++p) {
size_type curr_rg_num_rows = 0;
size_t curr_rg_data_size = 0;
int first_frag_in_rg = part_frag_offset[p];
int last_frag_in_part = part_frag_offset[p + 1] - 1;
for (auto f = first_frag_in_rg; f <= last_frag_in_part; ++f) {
size_t fragment_data_size = 0;
for (auto c = 0; c < num_columns; c++) {
fragment_data_size += fragments[c][f].fragment_data_size;
}
size_type fragment_num_rows = fragments[0][f].num_rows;
// If the fragment size gets larger than rg limit then break off a rg
if (f > first_frag_in_rg && // There has to be at least one fragment in row group
(curr_rg_data_size + fragment_data_size > max_row_group_size ||
curr_rg_num_rows + fragment_num_rows > max_row_group_rows)) {
auto& rg = md->file(p).row_groups.emplace_back();
rg.num_rows = curr_rg_num_rows;
num_rowgroups++;
num_rg_in_part[p]++;
curr_rg_num_rows = 0;
curr_rg_data_size = 0;
first_frag_in_rg = f;
}
curr_rg_num_rows += fragment_num_rows;
curr_rg_data_size += fragment_data_size;
// TODO: (wishful) refactor to consolidate with above if block
if (f == last_frag_in_part) {
auto& rg = md->file(p).row_groups.emplace_back();
rg.num_rows = curr_rg_num_rows;
num_rowgroups++;
num_rg_in_part[p]++;
}
}
}
// Allocate column chunks and gather fragment statistics
rmm::device_uvector<statistics_chunk> frag_stats(0, stream);
if (stats_granularity_ != statistics_freq::STATISTICS_NONE) {
frag_stats.resize(num_fragments * num_columns, stream);
if (not frag_stats.is_empty()) {
auto frag_stats_2dview =
device_2dspan<statistics_chunk>(frag_stats.data(), num_columns, num_fragments);
gather_fragment_statistics(frag_stats_2dview, fragments, col_desc, num_fragments);
}
}
std::vector<int> first_rg_in_part;
std::exclusive_scan(
num_rg_in_part.begin(), num_rg_in_part.end(), std::back_inserter(first_rg_in_part), 0);
// Initialize row groups and column chunks
auto const num_chunks = num_rowgroups * num_columns;
hostdevice_2dvector<gpu::EncColumnChunk> chunks(num_rowgroups, num_columns, stream);
for (size_t p = 0; p < partitions.size(); ++p) {
int f = part_frag_offset[p];
size_type start_row = partitions[p].start_row;
for (int r = 0; r < num_rg_in_part[p]; r++) {
size_t global_r = global_rowgroup_base[p] + r; // Number of rowgroups already in file/part
auto& row_group = md->file(p).row_groups[global_r];
uint32_t fragments_in_chunk =
util::div_rounding_up_unsafe(row_group.num_rows, max_page_fragment_size);
row_group.total_byte_size = 0;
row_group.columns.resize(num_columns);
for (int c = 0; c < num_columns; c++) {
gpu::EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c];
ck = {};
ck.col_desc = col_desc.device_ptr() + c;
ck.col_desc_id = c;
ck.fragments = &fragments.device_view()[c][f];
ck.stats =
(not frag_stats.is_empty()) ? frag_stats.data() + c * num_fragments + f : nullptr;
ck.start_row = start_row;
ck.num_rows = (uint32_t)row_group.num_rows;
ck.first_fragment = c * num_fragments + f;
auto chunk_fragments = fragments[c].subspan(f, fragments_in_chunk);
// In fragment struct, add a pointer to the chunk it belongs to
// In each fragment in chunk_fragments, update the chunk pointer here.
for (auto& frag : chunk_fragments) {
frag.chunk = &chunks.device_view()[r + first_rg_in_part[p]][c];
}
ck.num_values = std::accumulate(
chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) {
return l + r.num_values;
});
ck.plain_data_size = std::accumulate(
chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) {
return sum + frag.fragment_data_size;
});
auto& column_chunk_meta = row_group.columns[c].meta_data;
column_chunk_meta.type = parquet_columns[c].physical_type();
column_chunk_meta.encodings = {Encoding::PLAIN, Encoding::RLE};
column_chunk_meta.path_in_schema = parquet_columns[c].get_path_in_schema();
column_chunk_meta.codec = UNCOMPRESSED;
column_chunk_meta.num_values = ck.num_values;
}
f += fragments_in_chunk;
start_row += (uint32_t)row_group.num_rows;
}
}
fragments.host_to_device(stream);
auto dict_info_owner = build_chunk_dictionaries(chunks, col_desc, fragments, stream);
for (size_t p = 0; p < partitions.size(); p++) {
for (int rg = 0; rg < num_rg_in_part[p]; rg++) {
size_t global_rg = global_rowgroup_base[p] + rg;
for (int col = 0; col < num_columns; col++) {
if (chunks.host_view()[rg][col].use_dictionary) {
md->file(p).row_groups[global_rg].columns[col].meta_data.encodings.push_back(
Encoding::PLAIN_DICTIONARY);
}
}
}
}
// Build chunk dictionaries and count pages
if (num_chunks != 0) { init_page_sizes(chunks, col_desc, num_columns); }
// Get the maximum page size across all chunks
size_type max_page_uncomp_data_size =
std::accumulate(chunks.host_view().flat_view().begin(),
chunks.host_view().flat_view().end(),
0,
[](uint32_t max_page_size, gpu::EncColumnChunk const& chunk) {
return ::max(max_page_size, chunk.max_page_data_size);
});
size_t max_page_comp_data_size = 0;
if (compression_ != parquet::Compression::UNCOMPRESSED) {
auto status = nvcompBatchedSnappyCompressGetMaxOutputChunkSize(
max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &max_page_comp_data_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Error in getting compressed size from nvcomp");
}
// Find which partition a rg belongs to
std::vector<int> rg_to_part;
for (size_t p = 0; p < num_rg_in_part.size(); ++p) {
std::fill_n(std::back_inserter(rg_to_part), num_rg_in_part[p], p);
}
// Initialize batches of rowgroups to encode (mainly to limit peak memory usage)
std::vector<size_type> batch_list;
size_type num_pages = 0;
size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TODO: Tune this
size_t max_uncomp_bfr_size = 0;
size_t max_comp_bfr_size = 0;
size_t max_chunk_bfr_size = 0;
size_type max_pages_in_batch = 0;
size_t bytes_in_batch = 0;
size_t comp_bytes_in_batch = 0;
for (size_type r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) {
size_t rowgroup_size = 0;
size_t comp_rowgroup_size = 0;
if (r < num_rowgroups) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk* ck = &chunks[r][i];
ck->first_page = num_pages;
num_pages += ck->num_pages;
pages_in_batch += ck->num_pages;
rowgroup_size += ck->bfr_size;
ck->compressed_size =
ck->ck_stat_size + ck->page_headers_size + max_page_comp_data_size * ck->num_pages;
comp_rowgroup_size += ck->compressed_size;
max_chunk_bfr_size =
::max(max_chunk_bfr_size, (size_t)::max(ck->bfr_size, ck->compressed_size));
}
}
// TBD: We may want to also shorten the batch if we have enough pages (not just based on size)
if ((r == num_rowgroups) ||
(groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) {
max_uncomp_bfr_size = ::max(max_uncomp_bfr_size, bytes_in_batch);
max_comp_bfr_size = ::max(max_comp_bfr_size, comp_bytes_in_batch);
max_pages_in_batch = ::max(max_pages_in_batch, pages_in_batch);
if (groups_in_batch != 0) {
batch_list.push_back(groups_in_batch);
groups_in_batch = 0;
}
bytes_in_batch = 0;
comp_bytes_in_batch = 0;
pages_in_batch = 0;
}
bytes_in_batch += rowgroup_size;
comp_bytes_in_batch += comp_rowgroup_size;
groups_in_batch++;
}
// Clear compressed buffer size if compression has been turned off
if (compression_ == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; }
// Initialize data pointers in batch
uint32_t num_stats_bfr =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0;
rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, stream);
rmm::device_buffer comp_bfr(max_comp_bfr_size, stream);
rmm::device_uvector<gpu::EncPage> pages(num_pages, stream);
// This contains stats for both the pages and the rowgroups. TODO: make them separate.
rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream);
for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) {
auto bfr = static_cast<uint8_t*>(uncomp_bfr.data());
auto bfr_c = static_cast<uint8_t*>(comp_bfr.data());
for (auto j = 0; j < batch_list[b]; j++, r++) {
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk& ck = chunks[r][i];
ck.uncompressed_bfr = bfr;
ck.compressed_bfr = bfr_c;
bfr += ck.bfr_size;
bfr_c += ck.compressed_size;
}
}
}
if (num_pages != 0) {
init_encoder_pages(chunks,
col_desc,
{pages.data(), pages.size()},
(num_stats_bfr) ? page_stats.data() : nullptr,
(num_stats_bfr) ? frag_stats.data() : nullptr,
max_page_comp_data_size,
num_columns,
num_pages,
num_stats_bfr);
}
pinned_buffer<uint8_t> host_bfr{nullptr, hipHostFree};
// Encode row groups in batches
for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) {
// Count pages in this batch
auto const rnext = r + batch_list[b];
auto const first_page_in_batch = chunks[r][0].first_page;
auto const first_page_in_next_batch =
(rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages;
auto const pages_in_batch = first_page_in_next_batch - first_page_in_batch;
// device_span<gpu::EncPage> batch_pages{pages.data() + first_page_in_batch, }
encode_pages(
chunks,
{pages.data(), pages.size()},
max_page_uncomp_data_size,
pages_in_batch,
first_page_in_batch,
batch_list[b],
r,
(stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr,
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages
: nullptr);
std::vector<std::future<void>> write_tasks;
for (; r < rnext; r++) {
int p = rg_to_part[r];
int global_r = global_rowgroup_base[p] + r - first_rg_in_part[p];
auto& row_group = md->file(p).row_groups[global_r];
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk& ck = chunks[r][i];
auto& column_chunk_meta = row_group.columns[i].meta_data;
uint8_t* dev_bfr;
if (ck.is_compressed) {
column_chunk_meta.codec = compression_;
dev_bfr = ck.compressed_bfr;
} else {
dev_bfr = ck.uncompressed_bfr;
}
if (out_sink_[p]->is_device_write_preferred(ck.compressed_size)) {
// let the writer do what it wants to retrieve the data from the gpu.
write_tasks.push_back(out_sink_[p]->device_write_async(
dev_bfr + ck.ck_stat_size, ck.compressed_size, stream));
// we still need to do a (much smaller) memcpy for the statistics.
if (ck.ck_stat_size != 0) {
column_chunk_meta.statistics_blob.resize(ck.ck_stat_size);
CUDF_CUDA_TRY(hipMemcpyAsync(column_chunk_meta.statistics_blob.data(),
dev_bfr,
ck.ck_stat_size,
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
}
} else {
if (!host_bfr) {
host_bfr = pinned_buffer<uint8_t>{[](size_t size) {
uint8_t* ptr = nullptr;
CUDF_CUDA_TRY(hipHostMalloc(&ptr, size));
return ptr;
}(max_chunk_bfr_size),
hipHostFree};
}
// copy the full data
CUDF_CUDA_TRY(hipMemcpyAsync(host_bfr.get(),
dev_bfr,
ck.ck_stat_size + ck.compressed_size,
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
out_sink_[p]->host_write(host_bfr.get() + ck.ck_stat_size, ck.compressed_size);
if (ck.ck_stat_size != 0) {
column_chunk_meta.statistics_blob.resize(ck.ck_stat_size);
memcpy(column_chunk_meta.statistics_blob.data(), host_bfr.get(), ck.ck_stat_size);
}
}
row_group.total_byte_size += ck.compressed_size;
column_chunk_meta.data_page_offset =
current_chunk_offset[p] + ((ck.use_dictionary) ? ck.dictionary_size : 0);
column_chunk_meta.dictionary_page_offset =
(ck.use_dictionary) ? current_chunk_offset[p] : 0;
column_chunk_meta.total_uncompressed_size = ck.bfr_size;
column_chunk_meta.total_compressed_size = ck.compressed_size;
current_chunk_offset[p] += ck.compressed_size;
}
}
for (auto const& task : write_tasks) {
task.wait();
}
}
last_write_successful = true;
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::close(
std::vector<std::string> const& column_chunks_file_path)
{
if (closed) { return nullptr; }
closed = true;
if (not last_write_successful) { return nullptr; }
for (size_t p = 0; p < out_sink_.size(); p++) {
std::vector<uint8_t> buffer;
CompactProtocolWriter cpw(&buffer);
file_ender_s fendr;
buffer.resize(0);
fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_metadata(p)));
fendr.magic = parquet_magic;
out_sink_[p]->host_write(buffer.data(), buffer.size());
out_sink_[p]->host_write(&fendr, sizeof(fendr));
out_sink_[p]->flush();
}
// Optionally output raw file metadata with the specified column chunk file path
if (column_chunks_file_path.size() > 0) {
CUDF_EXPECTS(column_chunks_file_path.size() == md->num_files(),
"Expected one column chunk path per output file");
md->set_file_paths(column_chunks_file_path);
file_header_s fhdr = {parquet_magic};
std::vector<uint8_t> buffer;
CompactProtocolWriter cpw(&buffer);
buffer.insert(buffer.end(),
reinterpret_cast<const uint8_t*>(&fhdr),
reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr));
file_ender_s fendr;
fendr.magic = parquet_magic;
fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_merged_metadata()));
buffer.insert(buffer.end(),
reinterpret_cast<const uint8_t*>(&fendr),
reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(buffer));
} else {
return {nullptr};
}
return nullptr;
}
// Forward to implementation
writer::writer(std::vector<std::unique_ptr<data_sink>> sinks,
parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr))
{
}
writer::writer(std::vector<std::unique_ptr<data_sink>> sinks,
chunked_parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const& table, std::vector<partition_info> const& partitions)
{
_impl->write(
table, partitions.empty() ? std::vector<partition_info>{{0, table.num_rows()}} : partitions);
}
// Forward to implementation
std::unique_ptr<std::vector<uint8_t>> writer::close(
std::vector<std::string> const& column_chunks_file_path)
{
return _impl->close(column_chunks_file_path);
}
std::unique_ptr<std::vector<uint8_t>> writer::merge_row_group_metadata(
std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list)
{
std::vector<uint8_t> output;
CompactProtocolWriter cpw(&output);
FileMetaData md;
md.row_groups.reserve(metadata_list.size());
for (const auto& blob : metadata_list) {
CompactProtocolReader cpreader(
blob.get()->data(),
std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s));
cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header
if (md.num_rows == 0) {
cpreader.read(&md);
} else {
FileMetaData tmp;
cpreader.read(&tmp);
md.row_groups.insert(md.row_groups.end(),
std::make_move_iterator(tmp.row_groups.begin()),
std::make_move_iterator(tmp.row_groups.end()));
md.num_rows += tmp.num_rows;
}
}
// Reader doesn't currently populate column_order, so infer it here
if (md.row_groups.size() != 0) {
uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size());
md.column_order_listsize =
(num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size())
? num_columns
: 0;
}
// Thrift-encode the resulting output
file_header_s fhdr;
file_ender_s fendr;
fhdr.magic = parquet_magic;
output.insert(output.end(),
reinterpret_cast<const uint8_t*>(&fhdr),
reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr));
fendr.footer_len = static_cast<uint32_t>(cpw.write(md));
fendr.magic = parquet_magic;
output.insert(output.end(),
reinterpret_cast<const uint8_t*>(&fendr),
reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(output));
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
| f0a14089016b83723916d63e742e52a75f32c1cb.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO parquet writer class implementation
*/
#include "writer_impl.hpp"
#include "compact_protocol_reader.hpp"
#include "compact_protocol_writer.hpp"
#include <io/statistics/column_statistics.cuh>
#include <io/utilities/column_utils.cuh>
#include <io/utilities/config_utils.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/column.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <nvcomp/snappy.h>
#include <thrust/binary_search.h>
#include <thrust/for_each.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>;
/**
* @brief Function that translates GDF compression to parquet compression
*/
parquet::Compression to_parquet_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return parquet::Compression::SNAPPY;
case compression_type::NONE: return parquet::Compression::UNCOMPRESSED;
default: CUDF_FAIL("Unsupported compression type");
}
}
} // namespace
struct aggregate_writer_metadata {
aggregate_writer_metadata(std::vector<partition_info> const& partitions,
size_type num_columns,
std::vector<SchemaElement> schema,
statistics_freq stats_granularity,
std::vector<std::map<std::string, std::string>> const& kv_md)
: version(1), schema(std::move(schema)), files(partitions.size())
{
for (size_t i = 0; i < partitions.size(); ++i) {
this->files[i].num_rows = partitions[i].num_rows;
}
this->column_order_listsize =
(stats_granularity != statistics_freq::STATISTICS_NONE) ? num_columns : 0;
for (size_t p = 0; p < kv_md.size(); ++p) {
std::transform(kv_md[p].begin(),
kv_md[p].end(),
std::back_inserter(this->files[p].key_value_metadata),
[](auto const& kv) {
return KeyValue{kv.first, kv.second};
});
}
}
void update_files(std::vector<partition_info> const& partitions)
{
CUDF_EXPECTS(partitions.size() == this->files.size(),
"New partitions must be same size as previously passed number of partitions");
for (size_t i = 0; i < partitions.size(); ++i) {
this->files[i].num_rows += partitions[i].num_rows;
}
}
FileMetaData get_metadata(size_t part)
{
CUDF_EXPECTS(part < files.size(), "Invalid part index queried");
FileMetaData meta{};
meta.version = this->version;
meta.schema = this->schema;
meta.num_rows = this->files[part].num_rows;
meta.row_groups = this->files[part].row_groups;
meta.key_value_metadata = this->files[part].key_value_metadata;
meta.created_by = this->created_by;
meta.column_order_listsize = this->column_order_listsize;
return meta;
}
void set_file_paths(std::vector<std::string> const& column_chunks_file_path)
{
for (size_t p = 0; p < this->files.size(); ++p) {
auto& file = this->files[p];
auto const& file_path = column_chunks_file_path[p];
for (auto& rowgroup : file.row_groups) {
for (auto& col : rowgroup.columns) {
col.file_path = file_path;
}
}
}
}
FileMetaData get_merged_metadata()
{
FileMetaData merged_md;
for (size_t p = 0; p < this->files.size(); ++p) {
auto& file = this->files[p];
if (p == 0) {
merged_md = this->get_metadata(0);
} else {
merged_md.row_groups.insert(merged_md.row_groups.end(),
std::make_move_iterator(file.row_groups.begin()),
std::make_move_iterator(file.row_groups.end()));
merged_md.num_rows += file.num_rows;
}
}
return merged_md;
}
std::vector<size_t> num_row_groups_per_file()
{
std::vector<size_t> global_rowgroup_base;
std::transform(this->files.begin(),
this->files.end(),
std::back_inserter(global_rowgroup_base),
[](auto const& part) { return part.row_groups.size(); });
return global_rowgroup_base;
}
[[nodiscard]] bool schema_matches(std::vector<SchemaElement> const& schema) const
{
return this->schema == schema;
}
auto& file(size_t p) { return files[p]; }
[[nodiscard]] size_t num_files() const { return files.size(); }
private:
int32_t version = 0;
std::vector<SchemaElement> schema;
struct per_file_metadata {
int64_t num_rows = 0;
std::vector<RowGroup> row_groups;
std::vector<KeyValue> key_value_metadata;
};
std::vector<per_file_metadata> files;
std::string created_by = "";
uint32_t column_order_listsize = 0;
};
/**
* @brief Extends SchemaElement to add members required in constructing parquet_column_view
*
* Added members are:
* 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream
* of a leaf schema node. For non-leaf struct node, this is nullptr.
* 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node.
* 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet
* supported types
*/
struct schema_tree_node : public SchemaElement {
cudf::detail::LinkedColPtr leaf_column;
statistics_dtype stats_dtype;
int32_t ts_scale;
// TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The
// function construct_schema_tree could be its constructor. It can have method to get the per
// column nullability given a schema node index corresponding to a leaf schema. Much easier than
// that is a method to get path in schema, given a leaf node
};
struct leaf_schema_fn {
schema_tree_node& col_schema;
cudf::detail::LinkedColPtr const& col;
column_in_metadata const& col_meta;
bool timestamp_is_int96;
template <typename T>
std::enable_if_t<std::is_same_v<T, bool>, void> operator()()
{
col_schema.type = Type::BOOLEAN;
col_schema.stats_dtype = statistics_dtype::dtype_bool;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::INT_8;
col_schema.stats_dtype = statistics_dtype::dtype_int8;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::INT_16;
col_schema.stats_dtype = statistics_dtype::dtype_int16;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_8;
col_schema.stats_dtype = statistics_dtype::dtype_int8;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_16;
col_schema.stats_dtype = statistics_dtype::dtype_int16;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::UINT_32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::UINT_64;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, float>, void> operator()()
{
col_schema.type = Type::FLOAT;
col_schema.stats_dtype = statistics_dtype::dtype_float32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, double>, void> operator()()
{
col_schema.type = Type::DOUBLE;
col_schema.stats_dtype = statistics_dtype::dtype_float64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()()
{
col_schema.type = Type::BYTE_ARRAY;
col_schema.converted_type = ConvertedType::UTF8;
col_schema.stats_dtype = statistics_dtype::dtype_string;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::DATE;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
col_schema.ts_scale = 1000;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type =
(timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()()
{
col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64;
col_schema.converted_type = ConvertedType::UNKNOWN;
col_schema.stats_dtype = statistics_dtype::dtype_timestamp64;
if (timestamp_is_int96) {
col_schema.ts_scale = -1000; // negative value indicates division by absolute value
}
// set logical type if it's not int96
else {
col_schema.logical_type.isset.TIMESTAMP = true;
col_schema.logical_type.TIMESTAMP.unit.isset.NANOS = true;
}
}
// unsupported outside cudf for parquet 1.0.
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()()
{
col_schema.type = Type::INT32;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
col_schema.ts_scale = 1000;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MILLIS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
}
// unsupported outside cudf for parquet 1.0.
template <typename T>
std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()()
{
col_schema.type = Type::INT64;
col_schema.converted_type = ConvertedType::TIME_MICROS;
col_schema.stats_dtype = statistics_dtype::dtype_int64;
col_schema.ts_scale = -1000; // negative value indicates division by absolute value
}
template <typename T>
std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()()
{
if (std::is_same_v<T, numeric::decimal32>) {
col_schema.type = Type::INT32;
col_schema.stats_dtype = statistics_dtype::dtype_int32;
col_schema.decimal_precision = 9;
} else if (std::is_same_v<T, numeric::decimal64>) {
col_schema.type = Type::INT64;
col_schema.stats_dtype = statistics_dtype::dtype_decimal64;
col_schema.decimal_precision = 18;
} else if (std::is_same_v<T, numeric::decimal128>) {
col_schema.type = Type::FIXED_LEN_BYTE_ARRAY;
col_schema.type_length = sizeof(__int128_t);
col_schema.stats_dtype = statistics_dtype::dtype_decimal128;
col_schema.decimal_precision = 38;
} else {
CUDF_FAIL("Unsupported fixed point type for parquet writer");
}
col_schema.converted_type = ConvertedType::DECIMAL;
col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs
if (col_meta.is_decimal_precision_set()) {
CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale,
"Precision must be equal to or greater than scale!");
col_schema.decimal_precision = col_meta.get_decimal_precision();
}
}
template <typename T>
std::enable_if_t<cudf::is_nested<T>(), void> operator()()
{
CUDF_FAIL("This functor is only meant for physical data types");
}
template <typename T>
std::enable_if_t<cudf::is_dictionary<T>(), void> operator()()
{
CUDF_FAIL("Dictionary columns are not supported for writing");
}
};
inline bool is_col_nullable(cudf::detail::LinkedColPtr const& col,
column_in_metadata const& col_meta,
bool single_write_mode)
{
if (single_write_mode) {
return col->nullable();
} else {
if (col_meta.is_nullability_defined()) {
CUDF_EXPECTS(col_meta.nullable() || !col->nullable(),
"Mismatch in metadata prescribed nullability and input column nullability. "
"Metadata for nullable input column cannot prescribe nullability = false");
return col_meta.nullable();
} else {
// For chunked write, when not provided nullability, we assume the worst case scenario
// that all columns are nullable.
return true;
}
}
}
/**
* @brief Construct schema from input columns and per-column input options
*
* Recursively traverses through linked_columns and corresponding metadata to construct schema tree.
* The resulting schema tree is stored in a vector in pre-order traversal order.
*/
std::vector<schema_tree_node> construct_schema_tree(
cudf::detail::LinkedColVector const& linked_columns,
table_input_metadata& metadata,
bool single_write_mode,
bool int96_timestamps)
{
std::vector<schema_tree_node> schema;
schema_tree_node root{};
root.type = UNDEFINED_TYPE;
root.repetition_type = NO_REPETITION_TYPE;
root.name = "schema";
root.num_children = linked_columns.size();
root.parent_idx = -1; // root schema has no parent
schema.push_back(std::move(root));
std::function<void(cudf::detail::LinkedColPtr const&, column_in_metadata&, size_t)> add_schema =
[&](cudf::detail::LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) {
bool col_nullable = is_col_nullable(col, col_meta, single_write_mode);
auto set_field_id = [&schema, parent_idx](schema_tree_node& s,
column_in_metadata const& col_meta) {
if (schema[parent_idx].name != "list" and col_meta.is_parquet_field_id_set()) {
s.field_id = col_meta.get_parquet_field_id();
}
};
if (col->type().id() == type_id::STRUCT) {
// if struct, add current and recursively call for all children
schema_tree_node struct_schema{};
struct_schema.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
struct_schema.num_children = col->children.size();
struct_schema.parent_idx = parent_idx;
set_field_id(struct_schema, col_meta);
schema.push_back(std::move(struct_schema));
auto struct_node_index = schema.size() - 1;
// for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) {
// add_schema(*child_it, struct_node_index);
// }
CUDF_EXPECTS(col->children.size() == static_cast<size_t>(col_meta.num_children()),
"Mismatch in number of child columns between input table and metadata");
for (size_t i = 0; i < col->children.size(); ++i) {
add_schema(col->children[i], col_meta.child(i), struct_node_index);
}
} else if (col->type().id() == type_id::LIST && !col_meta.is_map()) {
// List schema is denoted by two levels for each nesting level and one final level for leaf.
// The top level is the same name as the column name.
// So e.g. List<List<int>> is denoted in the schema by
// "col_name" : { "list" : { "element" : { "list" : { "element" } } } }
schema_tree_node list_schema_1{};
list_schema_1.converted_type = ConvertedType::LIST;
list_schema_1.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
list_schema_1.num_children = 1;
list_schema_1.parent_idx = parent_idx;
set_field_id(list_schema_1, col_meta);
schema.push_back(std::move(list_schema_1));
schema_tree_node list_schema_2{};
list_schema_2.repetition_type = FieldRepetitionType::REPEATED;
list_schema_2.name = "list";
list_schema_2.num_children = 1;
list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added.
schema.push_back(std::move(list_schema_2));
CUDF_EXPECTS(col_meta.num_children() == 2,
"List column's metadata should have exactly two children");
add_schema(col->children[lists_column_view::child_column_index],
col_meta.child(lists_column_view::child_column_index),
schema.size() - 1);
} else if (col->type().id() == type_id::LIST && col_meta.is_map()) {
// Map schema is denoted by a list of struct
// e.g. List<Struct<String,String>> will be
// "col_name" : { "key_value" : { "key", "value" } }
// verify the List child structure is a struct<left_child, right_child>
column_view struct_col = *col->children[lists_column_view::child_column_index];
CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct");
CUDF_EXPECTS(struct_col.num_children() == 2,
"Map should be a List of struct with two children only but found " +
std::to_string(struct_col.num_children()));
schema_tree_node map_schema{};
map_schema.converted_type = ConvertedType::MAP;
map_schema.repetition_type =
col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED;
map_schema.name = col_meta.get_name();
if (col_meta.is_parquet_field_id_set()) {
map_schema.field_id = col_meta.get_parquet_field_id();
}
map_schema.num_children = 1;
map_schema.parent_idx = parent_idx;
schema.push_back(std::move(map_schema));
schema_tree_node repeat_group{};
repeat_group.repetition_type = FieldRepetitionType::REPEATED;
repeat_group.name = "key_value";
repeat_group.num_children = 2;
repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added.
schema.push_back(std::move(repeat_group));
CUDF_EXPECTS(col_meta.num_children() == 2,
"List column's metadata should have exactly two children");
CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2,
"Map struct column should have exactly two children");
// verify the col meta of children of the struct have name key and value
auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0);
left_child_meta.set_name("key");
left_child_meta.set_nullability(false);
auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1);
right_child_meta.set_name("value");
// check the repetition type of key is required i.e. the col should be non-nullable
auto key_col = col->children[lists_column_view::child_column_index]->children[0];
CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, single_write_mode),
"key column cannot be nullable. For chunked writing, explicitly set the "
"nullability to false in metadata");
// process key
size_type struct_col_index = schema.size() - 1;
add_schema(key_col, left_child_meta, struct_col_index);
// process value
add_schema(col->children[lists_column_view::child_column_index]->children[1],
right_child_meta,
struct_col_index);
} else {
// if leaf, add current
if (col->type().id() == type_id::STRING) {
CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0,
"String column's corresponding metadata should have zero or two children");
} else {
CUDF_EXPECTS(col_meta.num_children() == 0,
"Leaf column's corresponding metadata cannot have children");
}
schema_tree_node col_schema{};
bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps();
cudf::type_dispatcher(col->type(),
leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96});
col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED;
col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name();
col_schema.parent_idx = parent_idx;
col_schema.leaf_column = col;
set_field_id(col_schema, col_meta);
schema.push_back(col_schema);
}
};
CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(),
"Mismatch in the number of columns and the corresponding metadata elements");
// Add all linked_columns to schema using parent_idx = 0 (root)
for (size_t i = 0; i < linked_columns.size(); ++i) {
add_schema(linked_columns[i], metadata.column_metadata[i], 0);
}
return schema;
}
/**
* @brief Class to store parquet specific information for one data stream.
*
* Contains information about a single data stream. In case of struct columns, a data stream is one
* of the child leaf columns that contains data.
* e.g. A column Struct<int, List<float>> contains 2 data streams:
* - Struct<int>
* - Struct<List<float>>
*
*/
struct parquet_column_view {
parquet_column_view(schema_tree_node const& schema_node,
std::vector<schema_tree_node> const& schema_tree,
rmm::cuda_stream_view stream);
[[nodiscard]] column_view leaf_column_view() const;
[[nodiscard]] gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const;
[[nodiscard]] column_view cudf_column_view() const { return cudf_col; }
[[nodiscard]] parquet::Type physical_type() const { return schema_node.type; }
std::vector<std::string> const& get_path_in_schema() { return path_in_schema; }
// LIST related member functions
[[nodiscard]] uint8_t max_def_level() const noexcept { return _max_def_level; }
[[nodiscard]] uint8_t max_rep_level() const noexcept { return _max_rep_level; }
[[nodiscard]] bool is_list() const noexcept { return _is_list; }
private:
// Schema related members
schema_tree_node schema_node;
std::vector<std::string> path_in_schema;
uint8_t _max_def_level = 0;
uint8_t _max_rep_level = 0;
rmm::device_uvector<uint8_t> _d_nullability;
column_view cudf_col;
// List-related members
bool _is_list;
rmm::device_uvector<size_type>
_dremel_offsets; ///< For each row, the absolute offset into the repetition and definition
///< level vectors. O(num rows)
rmm::device_uvector<uint8_t> _rep_level;
rmm::device_uvector<uint8_t> _def_level;
std::vector<uint8_t> _nullability;
size_type _data_count = 0;
};
parquet_column_view::parquet_column_view(schema_tree_node const& schema_node,
std::vector<schema_tree_node> const& schema_tree,
rmm::cuda_stream_view stream)
: schema_node(schema_node),
_d_nullability(0, stream),
_dremel_offsets(0, stream),
_rep_level(0, stream),
_def_level(0, stream)
{
// Construct single inheritance column_view from linked_column_view
auto curr_col = schema_node.leaf_column.get();
column_view single_inheritance_cudf_col = *curr_col;
while (curr_col->parent) {
auto const& parent = *curr_col->parent;
// For list columns, we still need to retain the offset child column.
auto children =
(parent.type().id() == type_id::LIST)
? std::vector<column_view>{*parent.children[lists_column_view::offsets_column_index],
single_inheritance_cudf_col}
: std::vector<column_view>{single_inheritance_cudf_col};
single_inheritance_cudf_col = column_view(parent.type(),
parent.size(),
parent.head(),
parent.null_mask(),
UNKNOWN_NULL_COUNT,
parent.offset(),
children);
curr_col = curr_col->parent;
}
cudf_col = single_inheritance_cudf_col;
// Construct path_in_schema by travelling up in the schema_tree
std::vector<std::string> path;
auto curr_schema_node = schema_node;
do {
path.push_back(curr_schema_node.name);
if (curr_schema_node.parent_idx != -1) {
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
} while (curr_schema_node.parent_idx != -1);
path_in_schema = std::vector<std::string>(path.crbegin(), path.crend());
// Calculate max definition level by counting the number of levels that are optional (nullable)
// and max repetition level by counting the number of REPEATED levels in this column's hierarchy
uint16_t max_def_level = 0;
uint16_t max_rep_level = 0;
curr_schema_node = schema_node;
while (curr_schema_node.parent_idx != -1) {
if (curr_schema_node.repetition_type == parquet::REPEATED or
curr_schema_node.repetition_type == parquet::OPTIONAL) {
++max_def_level;
}
if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; }
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported");
CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported");
_max_def_level = max_def_level;
_max_rep_level = max_rep_level;
// Construct nullability vector using repetition_type from schema.
std::vector<uint8_t> r_nullability;
curr_schema_node = schema_node;
while (curr_schema_node.parent_idx != -1) {
if (not curr_schema_node.is_stub()) {
r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL);
}
curr_schema_node = schema_tree[curr_schema_node.parent_idx];
}
_nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend());
// TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using
// hostdevice_vector. Currently this involves a cudaMemcpyAsync for each column.
_d_nullability = cudf::detail::make_device_uvector_async(_nullability, stream);
_is_list = (_max_rep_level > 0);
if (cudf_col.size() == 0) { return; }
if (_is_list) {
// Top level column's offsets are not applied to all children. Get the effective offset and
// size of the leaf column
// Calculate row offset into dremel data (repetition/definition values) and the respective
// definition and repetition levels
gpu::dremel_data dremel = gpu::get_dremel_data(cudf_col, _d_nullability, _nullability, stream);
_dremel_offsets = std::move(dremel.dremel_offsets);
_rep_level = std::move(dremel.rep_level);
_def_level = std::move(dremel.def_level);
_data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate
stream.synchronize();
} else {
// For non-list struct, the size of the root column is the same as the size of the leaf column
_data_count = cudf_col.size();
}
}
column_view parquet_column_view::leaf_column_view() const
{
auto col = cudf_col;
while (cudf::is_nested(col.type())) {
if (col.type().id() == type_id::LIST) {
col = col.child(lists_column_view::child_column_index);
} else if (col.type().id() == type_id::STRUCT) {
col = col.child(0); // Stored cudf_col has only one child if struct
}
}
return col;
}
gpu::parquet_column_device_view parquet_column_view::get_device_view(
rmm::cuda_stream_view stream) const
{
column_view col = leaf_column_view();
auto desc = gpu::parquet_column_device_view{}; // Zero out all fields
desc.stats_dtype = schema_node.stats_dtype;
desc.ts_scale = schema_node.ts_scale;
if (is_list()) {
desc.level_offsets = _dremel_offsets.data();
desc.rep_values = _rep_level.data();
desc.def_values = _def_level.data();
}
desc.num_rows = cudf_col.size();
desc.physical_type = physical_type();
desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 |
CompactProtocolReader::NumRequiredBits(max_def_level());
desc.nullability = _d_nullability.data();
return desc;
}
void writer::impl::init_page_fragments(cudf::detail::hostdevice_2dvector<gpu::PageFragment>& frag,
device_span<gpu::parquet_column_device_view const> col_desc,
host_span<partition_info const> partitions,
device_span<int const> part_frag_offset,
uint32_t fragment_size)
{
auto d_partitions = cudf::detail::make_device_uvector_async(partitions, stream);
gpu::InitPageFragments(frag, col_desc, d_partitions, part_frag_offset, fragment_size, stream);
frag.device_to_host(stream, true);
}
void writer::impl::gather_fragment_statistics(
device_2dspan<statistics_chunk> frag_stats_chunk,
device_2dspan<gpu::PageFragment const> frag,
device_span<gpu::parquet_column_device_view const> col_desc,
uint32_t num_fragments)
{
auto num_columns = col_desc.size();
rmm::device_uvector<statistics_group> frag_stats_group(num_fragments * num_columns, stream);
auto frag_stats_group_2dview =
device_2dspan<statistics_group>(frag_stats_group.data(), num_columns, num_fragments);
gpu::InitFragmentStatistics(frag_stats_group_2dview, frag, col_desc, stream);
detail::calculate_group_statistics<detail::io_file_format::PARQUET>(frag_stats_chunk.data(),
frag_stats_group.data(),
num_fragments * num_columns,
stream,
int96_timestamps);
stream.synchronize();
}
void writer::impl::init_page_sizes(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::parquet_column_device_view const> col_desc,
uint32_t num_columns)
{
chunks.host_to_device(stream);
gpu::InitEncoderPages(chunks,
{},
col_desc,
num_columns,
max_page_size_bytes,
max_page_size_rows,
nullptr,
nullptr,
0,
stream);
chunks.device_to_host(stream, true);
}
auto build_chunk_dictionaries(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
host_span<gpu::parquet_column_device_view const> col_desc,
device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
// At this point, we know all chunks and their sizes. We want to allocate dictionaries for each
// chunk that can have dictionary
auto h_chunks = chunks.host_view().flat_view();
std::vector<rmm::device_uvector<size_type>> dict_data;
std::vector<rmm::device_uvector<uint16_t>> dict_index;
if (h_chunks.size() == 0) { return std::pair(std::move(dict_data), std::move(dict_index)); }
// Allocate slots for each chunk
std::vector<rmm::device_uvector<gpu::slot_type>> hash_maps_storage;
hash_maps_storage.reserve(h_chunks.size());
for (auto& chunk : h_chunks) {
if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN) {
chunk.use_dictionary = false;
} else {
chunk.use_dictionary = true;
auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values, stream);
chunk.dict_map_slots = inserted_map.data();
chunk.dict_map_size = inserted_map.size();
}
}
chunks.host_to_device(stream);
gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream);
gpu::populate_chunk_hash_maps(frags, stream);
chunks.device_to_host(stream, true);
// Make decision about which chunks have dictionary
for (auto& ck : h_chunks) {
if (not ck.use_dictionary) { continue; }
std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() {
// calculate size of chunk if dictionary is used
// If we have N unique values then the idx for the last value is N - 1 and nbits is the number
// of bits required to encode indices into the dictionary
auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0;
auto nbits = CompactProtocolReader::NumRequiredBits(max_dict_index);
// We don't use dictionary if the indices are > 16 bits because that's the maximum bitpacking
// bitsize we efficiently support
if (nbits > 16) { return std::pair(false, 0); }
// Only these bit sizes are allowed for RLE encoding because it's compute optimized
constexpr auto allowed_bitsizes = std::array<size_type, 6>{1, 2, 4, 8, 12, 16};
// ceil to (1/2/4/8/12/16)
auto rle_bits = *std::lower_bound(allowed_bitsizes.begin(), allowed_bitsizes.end(), nbits);
auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * rle_bits, 8);
auto dict_enc_size = ck.uniq_data_size + rle_byte_size;
bool use_dict = (ck.plain_data_size > dict_enc_size);
if (not use_dict) { rle_bits = 0; }
return std::pair(use_dict, rle_bits);
}();
}
// TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers.
dict_data.reserve(h_chunks.size());
dict_index.reserve(h_chunks.size());
for (auto& chunk : h_chunks) {
if (not chunk.use_dictionary) { continue; }
size_t dict_data_size = std::min(MAX_DICT_SIZE, chunk.dict_map_size);
auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream);
auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream);
chunk.dict_data = inserted_dict_data.data();
chunk.dict_index = inserted_dict_index.data();
}
chunks.host_to_device(stream);
gpu::collect_map_entries(chunks.device_view().flat_view(), stream);
gpu::get_dictionary_indices(frags, stream);
return std::pair(std::move(dict_data), std::move(dict_index));
}
void writer::impl::init_encoder_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::parquet_column_device_view const> col_desc,
device_span<gpu::EncPage> pages,
statistics_chunk* page_stats,
statistics_chunk* frag_stats,
size_t max_page_comp_data_size,
uint32_t num_columns,
uint32_t num_pages,
uint32_t num_stats_bfr)
{
rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream);
chunks.host_to_device(stream);
InitEncoderPages(chunks,
pages,
col_desc,
num_columns,
max_page_size_bytes,
max_page_size_rows,
(num_stats_bfr) ? page_stats_mrg.data() : nullptr,
(num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr,
max_page_comp_data_size,
stream);
if (num_stats_bfr > 0) {
detail::merge_group_statistics<detail::io_file_format::PARQUET>(
page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream);
if (num_stats_bfr > num_pages) {
detail::merge_group_statistics<detail::io_file_format::PARQUET>(
page_stats + num_pages,
page_stats,
page_stats_mrg.data() + num_pages,
num_stats_bfr - num_pages,
stream);
}
}
stream.synchronize();
}
void snappy_compress(device_span<device_span<uint8_t const> const> comp_in,
device_span<device_span<uint8_t> const> comp_out,
device_span<decompress_status> comp_stats,
size_t max_page_uncomp_data_size,
rmm::cuda_stream_view stream)
{
size_t num_comp_pages = comp_in.size();
try {
size_t temp_size;
nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize(
num_comp_pages, max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &temp_size);
CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess,
"Error in getting snappy compression scratch size");
// Not needed now but nvcomp API makes no promises about future
rmm::device_buffer scratch(temp_size, stream);
// Analogous to comp_in.srcDevice
rmm::device_uvector<void const*> uncompressed_data_ptrs(num_comp_pages, stream);
// Analogous to comp_in.srcSize
rmm::device_uvector<size_t> uncompressed_data_sizes(num_comp_pages, stream);
// Analogous to comp_in.dstDevice
rmm::device_uvector<void*> compressed_data_ptrs(num_comp_pages, stream);
// Analogous to comp_stat.bytes_written
rmm::device_uvector<size_t> compressed_bytes_written(num_comp_pages, stream);
// nvcomp does not currently use comp_in.dstSize. Cannot assume that the output will fit in
// the space allocated unless one uses the API nvcompBatchedSnappyCompressGetOutputSize()
// Prepare the vectors
auto comp_it =
thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin());
thrust::transform(
rmm::exec_policy(stream),
comp_in.begin(),
comp_in.end(),
comp_it,
[] __device__(auto const& in) { return thrust::make_tuple(in.data(), in.size()); });
thrust::transform(rmm::exec_policy(stream),
comp_out.begin(),
comp_out.end(),
compressed_data_ptrs.begin(),
[] __device__(auto const& out) { return out.data(); });
nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(),
uncompressed_data_sizes.data(),
max_page_uncomp_data_size,
num_comp_pages,
scratch.data(), // Not needed rn but future
scratch.size(),
compressed_data_ptrs.data(),
compressed_bytes_written.data(),
nvcompBatchedSnappyDefaultOpts,
stream.value());
CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression");
// nvcomp also doesn't use comp_out.status . It guarantees that given enough output space,
// compression will succeed.
// The other `comp_out` field is `reserved` which is for internal cuIO debugging and can be 0.
thrust::transform(rmm::exec_policy(stream),
compressed_bytes_written.begin(),
compressed_bytes_written.end(),
comp_stats.begin(),
[] __device__(size_t size) {
decompress_status status{};
status.bytes_written = size;
return status;
});
return;
} catch (...) {
// If we reach this then there was an error in compressing so set an error status for each page
thrust::for_each(rmm::exec_policy(stream),
comp_stats.begin(),
comp_stats.end(),
[] __device__(decompress_status & stat) { stat.status = 1; });
};
}
void writer::impl::encode_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks,
device_span<gpu::EncPage> pages,
size_t max_page_uncomp_data_size,
uint32_t pages_in_batch,
uint32_t first_page_in_batch,
uint32_t rowgroups_in_batch,
uint32_t first_rowgroup,
const statistics_chunk* page_stats,
const statistics_chunk* chunk_stats)
{
auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch);
auto batch_pages_stats =
(page_stats != nullptr)
? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch)
: device_span<statistics_chunk const>();
uint32_t max_comp_pages =
(compression_ != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0;
rmm::device_uvector<device_span<uint8_t const>> comp_in(max_comp_pages, stream);
rmm::device_uvector<device_span<uint8_t>> comp_out(max_comp_pages, stream);
rmm::device_uvector<decompress_status> comp_stats(max_comp_pages, stream);
gpu::EncodePages(batch_pages, comp_in, comp_out, comp_stats, stream);
switch (compression_) {
case parquet::Compression::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
snappy_compress(comp_in, comp_out, comp_stats, max_page_uncomp_data_size, stream);
} else {
gpu_snap(comp_in, comp_out, comp_stats, stream);
}
break;
default: break;
}
// TBD: Not clear if the official spec actually allows dynamically turning off compression at the
// chunk-level
auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch);
DecideCompression(d_chunks_in_batch.flat_view(), stream);
EncodePageHeaders(batch_pages, comp_stats, batch_pages_stats, chunk_stats, stream);
GatherPages(d_chunks_in_batch.flat_view(), pages, stream);
auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch);
CUDF_CUDA_TRY(cudaMemcpyAsync(h_chunks_in_batch.data(),
d_chunks_in_batch.data(),
d_chunks_in_batch.flat_view().size_bytes(),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
}
writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks,
parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
stream(stream),
max_row_group_size{options.get_row_group_size_bytes()},
max_row_group_rows{options.get_row_group_size_rows()},
max_page_size_bytes(options.get_max_page_size_bytes()),
max_page_size_rows(options.get_max_page_size_rows()),
compression_(to_parquet_compression(options.get_compression())),
stats_granularity_(options.get_stats_level()),
int96_timestamps(options.is_enabled_int96_timestamps()),
kv_md(options.get_key_value_metadata()),
single_write_mode(mode == SingleWriteMode::YES),
out_sink_(std::move(sinks))
{
if (options.get_metadata()) {
table_meta = std::make_unique<table_input_metadata>(*options.get_metadata());
}
init_state();
}
writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks,
chunked_parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _mr(mr),
stream(stream),
max_row_group_size{options.get_row_group_size_bytes()},
max_row_group_rows{options.get_row_group_size_rows()},
max_page_size_bytes(options.get_max_page_size_bytes()),
max_page_size_rows(options.get_max_page_size_rows()),
compression_(to_parquet_compression(options.get_compression())),
stats_granularity_(options.get_stats_level()),
int96_timestamps(options.is_enabled_int96_timestamps()),
kv_md(options.get_key_value_metadata()),
single_write_mode(mode == SingleWriteMode::YES),
out_sink_(std::move(sinks))
{
if (options.get_metadata()) {
table_meta = std::make_unique<table_input_metadata>(*options.get_metadata());
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
current_chunk_offset.resize(out_sink_.size());
// Write file header
file_header_s fhdr;
fhdr.magic = parquet_magic;
for (auto& sink : out_sink_) {
sink->host_write(&fhdr, sizeof(fhdr));
}
std::fill_n(current_chunk_offset.begin(), current_chunk_offset.size(), sizeof(file_header_s));
}
void writer::impl::write(table_view const& table, std::vector<partition_info> const& partitions)
{
last_write_successful = false;
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
if (not table_meta) { table_meta = std::make_unique<table_input_metadata>(table); }
// Fill unnamed columns' names in table_meta
std::function<void(column_in_metadata&, std::string)> add_default_name =
[&](column_in_metadata& col_meta, std::string default_name) {
if (col_meta.get_name().empty()) col_meta.set_name(default_name);
for (size_type i = 0; i < col_meta.num_children(); ++i) {
add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i));
}
};
for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) {
add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i));
}
auto vec = table_to_linked_columns(table);
auto schema_tree = construct_schema_tree(vec, *table_meta, single_write_mode, int96_timestamps);
// Construct parquet_column_views from the schema tree leaf nodes.
std::vector<parquet_column_view> parquet_columns;
for (schema_tree_node const& schema_node : schema_tree) {
if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); }
}
// Mass allocation of column_device_views for each parquet_column_view
std::vector<column_view> cudf_cols;
cudf_cols.reserve(parquet_columns.size());
for (auto const& parq_col : parquet_columns) {
cudf_cols.push_back(parq_col.cudf_column_view());
}
table_view single_streams_table(cudf_cols);
size_type num_columns = single_streams_table.num_columns();
std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end());
if (!md) {
md = std::make_unique<aggregate_writer_metadata>(
partitions, num_columns, std::move(this_table_schema), stats_granularity_, kv_md);
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(md->schema_matches(this_table_schema),
"Mismatch in schema between multiple calls to write_chunk");
md->update_files(partitions);
}
// Create table_device_view so that corresponding column_device_view data
// can be written into col_desc members
auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream);
rmm::device_uvector<column_device_view> leaf_column_views(0, stream);
// Initialize column description
hostdevice_vector<gpu::parquet_column_device_view> col_desc(parquet_columns.size(), stream);
std::transform(
parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) {
return pcol.get_device_view(stream);
});
// Init page fragments
// 5000 is good enough for up to ~200-character strings. Longer strings will start producing
// fragments larger than the desired page size -> TODO: keep track of the max fragment size, and
// iteratively reduce this value if the largest fragment exceeds the max page size limit (we
// ideally want the page size to be below 1MB so as to have enough pages to get good
// compression/decompression performance).
using cudf::io::parquet::gpu::max_page_fragment_size;
std::vector<int> num_frag_in_part;
std::transform(partitions.begin(),
partitions.end(),
std::back_inserter(num_frag_in_part),
[](auto const& part) {
return util::div_rounding_up_unsafe(part.num_rows, max_page_fragment_size);
});
size_type num_fragments = std::reduce(num_frag_in_part.begin(), num_frag_in_part.end());
std::vector<int> part_frag_offset; // Store the idx of the first fragment in each partition
std::exclusive_scan(
num_frag_in_part.begin(), num_frag_in_part.end(), std::back_inserter(part_frag_offset), 0);
part_frag_offset.push_back(part_frag_offset.back() + num_frag_in_part.back());
auto d_part_frag_offset = cudf::detail::make_device_uvector_async(part_frag_offset, stream);
cudf::detail::hostdevice_2dvector<gpu::PageFragment> fragments(
num_columns, num_fragments, stream);
if (num_fragments != 0) {
// Move column info to device
col_desc.host_to_device(stream);
leaf_column_views = create_leaf_column_device_views<gpu::parquet_column_device_view>(
col_desc, *parent_column_table_device_view, stream);
init_page_fragments(
fragments, col_desc, partitions, d_part_frag_offset, max_page_fragment_size);
}
std::vector<size_t> const global_rowgroup_base = md->num_row_groups_per_file();
// Decide row group boundaries based on uncompressed data size
int num_rowgroups = 0;
std::vector<int> num_rg_in_part(partitions.size());
for (size_t p = 0; p < partitions.size(); ++p) {
size_type curr_rg_num_rows = 0;
size_t curr_rg_data_size = 0;
int first_frag_in_rg = part_frag_offset[p];
int last_frag_in_part = part_frag_offset[p + 1] - 1;
for (auto f = first_frag_in_rg; f <= last_frag_in_part; ++f) {
size_t fragment_data_size = 0;
for (auto c = 0; c < num_columns; c++) {
fragment_data_size += fragments[c][f].fragment_data_size;
}
size_type fragment_num_rows = fragments[0][f].num_rows;
// If the fragment size gets larger than rg limit then break off a rg
if (f > first_frag_in_rg && // There has to be at least one fragment in row group
(curr_rg_data_size + fragment_data_size > max_row_group_size ||
curr_rg_num_rows + fragment_num_rows > max_row_group_rows)) {
auto& rg = md->file(p).row_groups.emplace_back();
rg.num_rows = curr_rg_num_rows;
num_rowgroups++;
num_rg_in_part[p]++;
curr_rg_num_rows = 0;
curr_rg_data_size = 0;
first_frag_in_rg = f;
}
curr_rg_num_rows += fragment_num_rows;
curr_rg_data_size += fragment_data_size;
// TODO: (wishful) refactor to consolidate with above if block
if (f == last_frag_in_part) {
auto& rg = md->file(p).row_groups.emplace_back();
rg.num_rows = curr_rg_num_rows;
num_rowgroups++;
num_rg_in_part[p]++;
}
}
}
// Allocate column chunks and gather fragment statistics
rmm::device_uvector<statistics_chunk> frag_stats(0, stream);
if (stats_granularity_ != statistics_freq::STATISTICS_NONE) {
frag_stats.resize(num_fragments * num_columns, stream);
if (not frag_stats.is_empty()) {
auto frag_stats_2dview =
device_2dspan<statistics_chunk>(frag_stats.data(), num_columns, num_fragments);
gather_fragment_statistics(frag_stats_2dview, fragments, col_desc, num_fragments);
}
}
std::vector<int> first_rg_in_part;
std::exclusive_scan(
num_rg_in_part.begin(), num_rg_in_part.end(), std::back_inserter(first_rg_in_part), 0);
// Initialize row groups and column chunks
auto const num_chunks = num_rowgroups * num_columns;
hostdevice_2dvector<gpu::EncColumnChunk> chunks(num_rowgroups, num_columns, stream);
for (size_t p = 0; p < partitions.size(); ++p) {
int f = part_frag_offset[p];
size_type start_row = partitions[p].start_row;
for (int r = 0; r < num_rg_in_part[p]; r++) {
size_t global_r = global_rowgroup_base[p] + r; // Number of rowgroups already in file/part
auto& row_group = md->file(p).row_groups[global_r];
uint32_t fragments_in_chunk =
util::div_rounding_up_unsafe(row_group.num_rows, max_page_fragment_size);
row_group.total_byte_size = 0;
row_group.columns.resize(num_columns);
for (int c = 0; c < num_columns; c++) {
gpu::EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c];
ck = {};
ck.col_desc = col_desc.device_ptr() + c;
ck.col_desc_id = c;
ck.fragments = &fragments.device_view()[c][f];
ck.stats =
(not frag_stats.is_empty()) ? frag_stats.data() + c * num_fragments + f : nullptr;
ck.start_row = start_row;
ck.num_rows = (uint32_t)row_group.num_rows;
ck.first_fragment = c * num_fragments + f;
auto chunk_fragments = fragments[c].subspan(f, fragments_in_chunk);
// In fragment struct, add a pointer to the chunk it belongs to
// In each fragment in chunk_fragments, update the chunk pointer here.
for (auto& frag : chunk_fragments) {
frag.chunk = &chunks.device_view()[r + first_rg_in_part[p]][c];
}
ck.num_values = std::accumulate(
chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) {
return l + r.num_values;
});
ck.plain_data_size = std::accumulate(
chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) {
return sum + frag.fragment_data_size;
});
auto& column_chunk_meta = row_group.columns[c].meta_data;
column_chunk_meta.type = parquet_columns[c].physical_type();
column_chunk_meta.encodings = {Encoding::PLAIN, Encoding::RLE};
column_chunk_meta.path_in_schema = parquet_columns[c].get_path_in_schema();
column_chunk_meta.codec = UNCOMPRESSED;
column_chunk_meta.num_values = ck.num_values;
}
f += fragments_in_chunk;
start_row += (uint32_t)row_group.num_rows;
}
}
fragments.host_to_device(stream);
auto dict_info_owner = build_chunk_dictionaries(chunks, col_desc, fragments, stream);
for (size_t p = 0; p < partitions.size(); p++) {
for (int rg = 0; rg < num_rg_in_part[p]; rg++) {
size_t global_rg = global_rowgroup_base[p] + rg;
for (int col = 0; col < num_columns; col++) {
if (chunks.host_view()[rg][col].use_dictionary) {
md->file(p).row_groups[global_rg].columns[col].meta_data.encodings.push_back(
Encoding::PLAIN_DICTIONARY);
}
}
}
}
// Build chunk dictionaries and count pages
if (num_chunks != 0) { init_page_sizes(chunks, col_desc, num_columns); }
// Get the maximum page size across all chunks
size_type max_page_uncomp_data_size =
std::accumulate(chunks.host_view().flat_view().begin(),
chunks.host_view().flat_view().end(),
0,
[](uint32_t max_page_size, gpu::EncColumnChunk const& chunk) {
return std::max(max_page_size, chunk.max_page_data_size);
});
size_t max_page_comp_data_size = 0;
if (compression_ != parquet::Compression::UNCOMPRESSED) {
auto status = nvcompBatchedSnappyCompressGetMaxOutputChunkSize(
max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &max_page_comp_data_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Error in getting compressed size from nvcomp");
}
// Find which partition a rg belongs to
std::vector<int> rg_to_part;
for (size_t p = 0; p < num_rg_in_part.size(); ++p) {
std::fill_n(std::back_inserter(rg_to_part), num_rg_in_part[p], p);
}
// Initialize batches of rowgroups to encode (mainly to limit peak memory usage)
std::vector<size_type> batch_list;
size_type num_pages = 0;
size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TODO: Tune this
size_t max_uncomp_bfr_size = 0;
size_t max_comp_bfr_size = 0;
size_t max_chunk_bfr_size = 0;
size_type max_pages_in_batch = 0;
size_t bytes_in_batch = 0;
size_t comp_bytes_in_batch = 0;
for (size_type r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) {
size_t rowgroup_size = 0;
size_t comp_rowgroup_size = 0;
if (r < num_rowgroups) {
for (int i = 0; i < num_columns; i++) {
gpu::EncColumnChunk* ck = &chunks[r][i];
ck->first_page = num_pages;
num_pages += ck->num_pages;
pages_in_batch += ck->num_pages;
rowgroup_size += ck->bfr_size;
ck->compressed_size =
ck->ck_stat_size + ck->page_headers_size + max_page_comp_data_size * ck->num_pages;
comp_rowgroup_size += ck->compressed_size;
max_chunk_bfr_size =
std::max(max_chunk_bfr_size, (size_t)std::max(ck->bfr_size, ck->compressed_size));
}
}
// TBD: We may want to also shorten the batch if we have enough pages (not just based on size)
if ((r == num_rowgroups) ||
(groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) {
max_uncomp_bfr_size = std::max(max_uncomp_bfr_size, bytes_in_batch);
max_comp_bfr_size = std::max(max_comp_bfr_size, comp_bytes_in_batch);
max_pages_in_batch = std::max(max_pages_in_batch, pages_in_batch);
if (groups_in_batch != 0) {
batch_list.push_back(groups_in_batch);
groups_in_batch = 0;
}
bytes_in_batch = 0;
comp_bytes_in_batch = 0;
pages_in_batch = 0;
}
bytes_in_batch += rowgroup_size;
comp_bytes_in_batch += comp_rowgroup_size;
groups_in_batch++;
}
// Clear compressed buffer size if compression has been turned off
if (compression_ == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; }
// Initialize data pointers in batch
uint32_t num_stats_bfr =
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0;
rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, stream);
rmm::device_buffer comp_bfr(max_comp_bfr_size, stream);
rmm::device_uvector<gpu::EncPage> pages(num_pages, stream);
// This contains stats for both the pages and the rowgroups. TODO: make them separate.
rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream);
for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) {
auto bfr = static_cast<uint8_t*>(uncomp_bfr.data());
auto bfr_c = static_cast<uint8_t*>(comp_bfr.data());
for (auto j = 0; j < batch_list[b]; j++, r++) {
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk& ck = chunks[r][i];
ck.uncompressed_bfr = bfr;
ck.compressed_bfr = bfr_c;
bfr += ck.bfr_size;
bfr_c += ck.compressed_size;
}
}
}
if (num_pages != 0) {
init_encoder_pages(chunks,
col_desc,
{pages.data(), pages.size()},
(num_stats_bfr) ? page_stats.data() : nullptr,
(num_stats_bfr) ? frag_stats.data() : nullptr,
max_page_comp_data_size,
num_columns,
num_pages,
num_stats_bfr);
}
pinned_buffer<uint8_t> host_bfr{nullptr, cudaFreeHost};
// Encode row groups in batches
for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) {
// Count pages in this batch
auto const rnext = r + batch_list[b];
auto const first_page_in_batch = chunks[r][0].first_page;
auto const first_page_in_next_batch =
(rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages;
auto const pages_in_batch = first_page_in_next_batch - first_page_in_batch;
// device_span<gpu::EncPage> batch_pages{pages.data() + first_page_in_batch, }
encode_pages(
chunks,
{pages.data(), pages.size()},
max_page_uncomp_data_size,
pages_in_batch,
first_page_in_batch,
batch_list[b],
r,
(stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr,
(stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages
: nullptr);
std::vector<std::future<void>> write_tasks;
for (; r < rnext; r++) {
int p = rg_to_part[r];
int global_r = global_rowgroup_base[p] + r - first_rg_in_part[p];
auto& row_group = md->file(p).row_groups[global_r];
for (auto i = 0; i < num_columns; i++) {
gpu::EncColumnChunk& ck = chunks[r][i];
auto& column_chunk_meta = row_group.columns[i].meta_data;
uint8_t* dev_bfr;
if (ck.is_compressed) {
column_chunk_meta.codec = compression_;
dev_bfr = ck.compressed_bfr;
} else {
dev_bfr = ck.uncompressed_bfr;
}
if (out_sink_[p]->is_device_write_preferred(ck.compressed_size)) {
// let the writer do what it wants to retrieve the data from the gpu.
write_tasks.push_back(out_sink_[p]->device_write_async(
dev_bfr + ck.ck_stat_size, ck.compressed_size, stream));
// we still need to do a (much smaller) memcpy for the statistics.
if (ck.ck_stat_size != 0) {
column_chunk_meta.statistics_blob.resize(ck.ck_stat_size);
CUDF_CUDA_TRY(cudaMemcpyAsync(column_chunk_meta.statistics_blob.data(),
dev_bfr,
ck.ck_stat_size,
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
}
} else {
if (!host_bfr) {
host_bfr = pinned_buffer<uint8_t>{[](size_t size) {
uint8_t* ptr = nullptr;
CUDF_CUDA_TRY(cudaMallocHost(&ptr, size));
return ptr;
}(max_chunk_bfr_size),
cudaFreeHost};
}
// copy the full data
CUDF_CUDA_TRY(cudaMemcpyAsync(host_bfr.get(),
dev_bfr,
ck.ck_stat_size + ck.compressed_size,
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
out_sink_[p]->host_write(host_bfr.get() + ck.ck_stat_size, ck.compressed_size);
if (ck.ck_stat_size != 0) {
column_chunk_meta.statistics_blob.resize(ck.ck_stat_size);
memcpy(column_chunk_meta.statistics_blob.data(), host_bfr.get(), ck.ck_stat_size);
}
}
row_group.total_byte_size += ck.compressed_size;
column_chunk_meta.data_page_offset =
current_chunk_offset[p] + ((ck.use_dictionary) ? ck.dictionary_size : 0);
column_chunk_meta.dictionary_page_offset =
(ck.use_dictionary) ? current_chunk_offset[p] : 0;
column_chunk_meta.total_uncompressed_size = ck.bfr_size;
column_chunk_meta.total_compressed_size = ck.compressed_size;
current_chunk_offset[p] += ck.compressed_size;
}
}
for (auto const& task : write_tasks) {
task.wait();
}
}
last_write_successful = true;
}
std::unique_ptr<std::vector<uint8_t>> writer::impl::close(
std::vector<std::string> const& column_chunks_file_path)
{
if (closed) { return nullptr; }
closed = true;
if (not last_write_successful) { return nullptr; }
for (size_t p = 0; p < out_sink_.size(); p++) {
std::vector<uint8_t> buffer;
CompactProtocolWriter cpw(&buffer);
file_ender_s fendr;
buffer.resize(0);
fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_metadata(p)));
fendr.magic = parquet_magic;
out_sink_[p]->host_write(buffer.data(), buffer.size());
out_sink_[p]->host_write(&fendr, sizeof(fendr));
out_sink_[p]->flush();
}
// Optionally output raw file metadata with the specified column chunk file path
if (column_chunks_file_path.size() > 0) {
CUDF_EXPECTS(column_chunks_file_path.size() == md->num_files(),
"Expected one column chunk path per output file");
md->set_file_paths(column_chunks_file_path);
file_header_s fhdr = {parquet_magic};
std::vector<uint8_t> buffer;
CompactProtocolWriter cpw(&buffer);
buffer.insert(buffer.end(),
reinterpret_cast<const uint8_t*>(&fhdr),
reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr));
file_ender_s fendr;
fendr.magic = parquet_magic;
fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_merged_metadata()));
buffer.insert(buffer.end(),
reinterpret_cast<const uint8_t*>(&fendr),
reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(buffer));
} else {
return {nullptr};
}
return nullptr;
}
// Forward to implementation
writer::writer(std::vector<std::unique_ptr<data_sink>> sinks,
parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr))
{
}
writer::writer(std::vector<std::unique_ptr<data_sink>> sinks,
chunked_parquet_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const& table, std::vector<partition_info> const& partitions)
{
_impl->write(
table, partitions.empty() ? std::vector<partition_info>{{0, table.num_rows()}} : partitions);
}
// Forward to implementation
std::unique_ptr<std::vector<uint8_t>> writer::close(
std::vector<std::string> const& column_chunks_file_path)
{
return _impl->close(column_chunks_file_path);
}
std::unique_ptr<std::vector<uint8_t>> writer::merge_row_group_metadata(
std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list)
{
std::vector<uint8_t> output;
CompactProtocolWriter cpw(&output);
FileMetaData md;
md.row_groups.reserve(metadata_list.size());
for (const auto& blob : metadata_list) {
CompactProtocolReader cpreader(
blob.get()->data(),
std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s));
cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header
if (md.num_rows == 0) {
cpreader.read(&md);
} else {
FileMetaData tmp;
cpreader.read(&tmp);
md.row_groups.insert(md.row_groups.end(),
std::make_move_iterator(tmp.row_groups.begin()),
std::make_move_iterator(tmp.row_groups.end()));
md.num_rows += tmp.num_rows;
}
}
// Reader doesn't currently populate column_order, so infer it here
if (md.row_groups.size() != 0) {
uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size());
md.column_order_listsize =
(num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size())
? num_columns
: 0;
}
// Thrift-encode the resulting output
file_header_s fhdr;
file_ender_s fendr;
fhdr.magic = parquet_magic;
output.insert(output.end(),
reinterpret_cast<const uint8_t*>(&fhdr),
reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr));
fendr.footer_len = static_cast<uint32_t>(cpw.write(md));
fendr.magic = parquet_magic;
output.insert(output.end(),
reinterpret_cast<const uint8_t*>(&fendr),
reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr));
return std::make_unique<std::vector<uint8_t>>(std::move(output));
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
|
cfd3d36d045b2005156313ae3c5a4587548397c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <cmath>
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4*NUM_THREADS];
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s+t < n){
param[t] = fabs(mask[s+t]*wb[s+t]);
param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t];
if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0;
}
if (s+t+NUM_THREADS < n){
param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]);
param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS];
if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1;
else tcount[t+NUM_THREADS] = 0;
}
else{
param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t+stride];
param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride];
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2*NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s+t < n && mask[s+t]!=0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){
tcount[t+NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
// added by Guiying Li
//if (mask[index] > 1)
// mask[index] = 1;
//else if (mask[index] <0)
// mask[index] = 0;
// ----Guiying Li-----
if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0)))
mask[index] = 0;
else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCcRateCalc(const int n,
Dtype* mask) {
CUDA_KERNEL_LOOP(index, n) {
if (index == 0){
if (mask[0] > 1){
mask[0] = 1;
} else if (mask[0] < 0){
mask[0] = 0;
}
}
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
hipMalloc(&pmu_g, sizeof(Dtype) * num_p);
hipMalloc(&pstd_g, sizeof(Dtype) * num_p);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCMomentCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i];
}
hipFree(pmu_g);hipFree(pstd_g);hipFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){
const unsigned int NUM_THREADS = 512;
unsigned int* pncount_g;
unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCNzeroCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
hipFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void CConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weightMask = this->blobs_[2]->mutable_gpu_data();
Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
}
// added by Guiying Li
bool _update = false;
Dtype* tmp_weightMask = this->blobs_[2]->mutable_cpu_data();
if (tmp_weightMask[0] > 1){
_update = true;
this->crate = tmp_weightMask[0] - 1;
tmp_weightMask[0] = 1;
} else if (tmp_weightMask[0] < 0){
_update = true;
this->crate = -tmp_weightMask[0];
tmp_weightMask[0] = 0;
}
weightMask = this->blobs_[2]->mutable_gpu_data();//update data
// -------Guiying------
if (this->phase_ == TRAIN){
// Calculate the mean and standard deviation of learnable parameters
if (this->std==0 && this->iter_==0){
unsigned int ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount);
}
this->mu /= ncount; this->std -= ncount*mu*mu;
this->std /= ncount; this->std = sqrt(std);
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
}
// Demonstrate the sparsity of compressed convolutional layer
/********************************************************/
/*if(this->iter_%1000==0){
unsigned int ncount = 0;
CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount);
if (this->bias_term_) {
CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount);
}
LOG(INFO)<<ncount<<"\n";
}*/
/********************************************************/
// Calculate the weight mask and bias mask with probability
// Edited by Guiying Li
//Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
//if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) {
// CCcRateCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[0]->count(), weightMask);
// CUDA_POST_KERNEL_CHECK;
if (_update) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight,
weightMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias,
biasMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
}
_update = false;
}
// ------Guiying---------
}
// Calculate the current (masked) weight and bias
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp);
}
}
}
}
template <typename Dtype>
void CConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
const Dtype* weightMask = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[2]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CConvolutionLayer);
} // namespace caffe
| cfd3d36d045b2005156313ae3c5a4587548397c5.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <cmath>
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask,
Dtype* mu, Dtype* std, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ Dtype param [4*NUM_THREADS];
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s+t < n){
param[t] = fabs(mask[s+t]*wb[s+t]);
param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t];
if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1;
else tcount[t] = 0;
}
else{
param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0;
}
if (s+t+NUM_THREADS < n){
param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]);
param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS];
if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1;
else tcount[t+NUM_THREADS] = 0;
}
else{
param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
param[t] += param[t+stride];
param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride];
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
mu [blockIdx.x] = param[0];
std [blockIdx.x] = param[2*NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount [2*NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s+t < n && mask[s+t]!=0){
tcount[t] = 1;
}
tcount[t+NUM_THREADS] = 0;
if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){
tcount[t+NUM_THREADS] = 1;
}
__syncthreads();
for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride ){
tcount[t] += tcount[t+stride];
}
__syncthreads();
}
if (t == 0){
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype* wb,
Dtype* mask, Dtype mu, Dtype std, Dtype r) {
CUDA_KERNEL_LOOP(index, n) {
// added by Guiying Li
//if (mask[index] > 1)
// mask[index] = 1;
//else if (mask[index] <0)
// mask[index] = 0;
// ----Guiying Li-----
if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0)))
mask[index] = 0;
else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCcRateCalc(const int n,
Dtype* mask) {
CUDA_KERNEL_LOOP(index, n) {
if (index == 0){
if (mask[0] > 1){
mask[0] = 1;
} else if (mask[0] < 0){
mask[0] = 0;
}
}
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype* wb,
const Dtype* mask, Dtype* wb_t) {
CUDA_KERNEL_LOOP(index, n) {
wb_t[index] = wb[index] * mask[index];
}
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){
const unsigned int NUM_THREADS = 512;
Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g;
Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
cudaMalloc(&pmu_g, sizeof(Dtype) * num_p);
cudaMalloc(&pstd_g, sizeof(Dtype) * num_p);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
CCMomentCollect<Dtype><<<num_p,NUM_THREADS>>>(n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i];
}
cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g);
free(pmu_c);free(pstd_c);free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){
const unsigned int NUM_THREADS = 512;
unsigned int* pncount_g;
unsigned int* pncount_c;
int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int));
CCNzeroCollect<Dtype><<<num_p,NUM_THREADS>>>(n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
cudaFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void CConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weightMask = this->blobs_[2]->mutable_gpu_data();
Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype* bias = NULL;
Dtype* biasMask = NULL;
Dtype* biasTmp = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
}
// added by Guiying Li
bool _update = false;
Dtype* tmp_weightMask = this->blobs_[2]->mutable_cpu_data();
if (tmp_weightMask[0] > 1){
_update = true;
this->crate = tmp_weightMask[0] - 1;
tmp_weightMask[0] = 1;
} else if (tmp_weightMask[0] < 0){
_update = true;
this->crate = -tmp_weightMask[0];
tmp_weightMask[0] = 0;
}
weightMask = this->blobs_[2]->mutable_gpu_data();//update data
// -------Guiying------
if (this->phase_ == TRAIN){
// Calculate the mean and standard deviation of learnable parameters
if (this->std==0 && this->iter_==0){
unsigned int ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount);
}
this->mu /= ncount; this->std -= ncount*mu*mu;
this->std /= ncount; this->std = sqrt(std);
LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n";
}
// Demonstrate the sparsity of compressed convolutional layer
/********************************************************/
/*if(this->iter_%1000==0){
unsigned int ncount = 0;
CCNZeroCalc(this->blobs_[0]->count(), weightMask, &ncount);
if (this->bias_term_) {
CCNZeroCalc(this->blobs_[1]->count(), biasMask, &ncount);
}
LOG(INFO)<<ncount<<"\n";
}*/
/********************************************************/
// Calculate the weight mask and bias mask with probability
// Edited by Guiying Li
//Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX);
//if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) {
// CCcRateCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[0]->count(), weightMask);
// CUDA_POST_KERNEL_CHECK;
if (_update) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight,
weightMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias,
biasMask, this->mu, this->std, this->crate);
CUDA_POST_KERNEL_CHECK;
}
_update = false;
}
// ------Guiying---------
}
// Calculate the current (masked) weight and bias
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp);
}
}
}
}
template <typename Dtype>
void CConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weightTmp = this->weight_tmp_.gpu_data();
const Dtype* weightMask = this->blobs_[2]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype* biasMask = this->blobs_[3]->gpu_data();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()),
CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CConvolutionLayer);
} // namespace caffe
|
77f234478e7ea52cb06e709f2dbd524035d15f5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include "mean.h"
#define NTHREADS 512
#define NBLOCKS 512
int main() {
int npix = 512;
int Ntot = npix*npix;
int size = Ntot*sizeof(float);
// allocate host array
float* arr = (float *) malloc(size);
float* h_sum = (float *) malloc(sizeof(float));
*h_sum = 0; //init to zero
// assign random numbers
for (int i=0; i<Ntot; ++i) {
arr[i] = (float) (rand() % 255);
}
// allocate memory on device
float* d_arr, *d_sum;
hipMalloc((void**) &d_arr, size);
hipMalloc((void**) &d_sum, sizeof(float));
// copy host data to device
hipMemcpy(d_arr,arr, size, hipMemcpyHostToDevice);
hipMemcpy(d_sum,h_sum,sizeof(float), hipMemcpyHostToDevice);
// calculate average on cpu for comparison
float sum = 0;
for (int i=0; i<Ntot; ++i) {
sum += arr[i];
}
// launch cuda mean thread that uses atomics and a grid stride
hipLaunchKernelGGL(( cu_sum_atomic), dim3(NBLOCKS),dim3(NTHREADS), 0, 0, d_arr, d_sum, Ntot);
// copy result back to host
hipMemcpy(h_sum,d_sum,sizeof(float), hipMemcpyDeviceToHost);
// compare results
printf("Results are cpu: %f , gpu: %f \n", sum, *h_sum);
// assert(*h_sum == sum);
// free memory
free(arr); free(h_sum);
hipFree(d_arr); hipFree(d_sum);
return 0;
}
| 77f234478e7ea52cb06e709f2dbd524035d15f5c.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include "mean.h"
#define NTHREADS 512
#define NBLOCKS 512
int main() {
int npix = 512;
int Ntot = npix*npix;
int size = Ntot*sizeof(float);
// allocate host array
float* arr = (float *) malloc(size);
float* h_sum = (float *) malloc(sizeof(float));
*h_sum = 0; //init to zero
// assign random numbers
for (int i=0; i<Ntot; ++i) {
arr[i] = (float) (rand() % 255);
}
// allocate memory on device
float* d_arr, *d_sum;
cudaMalloc((void**) &d_arr, size);
cudaMalloc((void**) &d_sum, sizeof(float));
// copy host data to device
cudaMemcpy(d_arr,arr, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sum,h_sum,sizeof(float), cudaMemcpyHostToDevice);
// calculate average on cpu for comparison
float sum = 0;
for (int i=0; i<Ntot; ++i) {
sum += arr[i];
}
// launch cuda mean thread that uses atomics and a grid stride
cu_sum_atomic<<<NBLOCKS,NTHREADS>>>(d_arr, d_sum, Ntot);
// copy result back to host
cudaMemcpy(h_sum,d_sum,sizeof(float), cudaMemcpyDeviceToHost);
// compare results
printf("Results are cpu: %f , gpu: %f \n", sum, *h_sum);
// assert(*h_sum == sum);
// free memory
free(arr); free(h_sum);
cudaFree(d_arr); cudaFree(d_sum);
return 0;
}
|
9d9ea1f33144c126179f30579871f9a261f493c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/net.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
min_dtype<Dtype>()));
counts[index] = 1;
}
}
}
template <>
__global__ void SoftmaxLossForwardGPU<half>(const int nthreads,
const half* prob_data, const half* label, half* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
half* counts) {
const float minh = __half2float(min_dtype<half>());
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(__half2float(label[n * spatial_dim + s]));
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index].setx(0U);
counts[index].setx(0U);
} else {
loss[index] = float2half_clip(- log(max(__half2float(
prob_data[n * dim + label_value * spatial_dim + s]), minh)));
counts[index].setx(0x3c00U); // set to 1
}
}
}
template <typename Ftype, typename Btype>
void SoftmaxWithLossLayer<Ftype, Btype>::Forward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Ftype* prob_data = prob_->template gpu_data<Ftype>();
const Ftype* label = bottom[1]->gpu_data<Ftype>();
const int dim = prob_->count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Ftype* loss_data = bottom[0]->mutable_gpu_diff<Ftype>();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Ftype* counts = prob_->template mutable_gpu_diff<Ftype>();
hipStream_t stream = Caffe::thread_stream();
if (tp<Ftype>() == FLOAT16) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<half>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, nthreads, reinterpret_cast<const half*>(prob_data),
reinterpret_cast<const half*>(label), reinterpret_cast<half*>(loss_data),
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
reinterpret_cast<half*>(counts));
} else {
hipLaunchKernelGGL(( SoftmaxLossForwardGPU), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
CUDA_CHECK(hipStreamSynchronize(stream));
float loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
float valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data<Ftype>()[0] = loss / get_normalizer(normalization_, valid_count);
if (top.size() == 2) {
top[1]->ShareData(*prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <>
__global__ void SoftmaxLossBackwardGPU<half>(const int nthreads, const half* top,
const half* label, half* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, half* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(__half2float(label[n * spatial_dim + s]));
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s].setx(0U);
}
counts[index].setx(0U);
} else {
const int idx = n * dim + label_value * spatial_dim + s;
bottom_diff[idx] = float2half_clip(__half2float(bottom_diff[idx]) - 1.F);
counts[index].setx(0x3c00U); // 1.
}
}
}
template <typename Ftype, typename Btype>
void SoftmaxWithLossLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
const Btype* prob_data = prob_->template gpu_data<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
caffe_gpu_memcpy(prob_->count() * sizeof(Btype), prob_data, bottom_diff);
const Btype* label = bottom[1]->gpu_data<Btype>();
const int dim = prob_->count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Btype* counts = prob_->template mutable_gpu_diff<Btype>();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::thread_stream(), nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
int valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) {
float float_count;
caffe_gpu_asum(nthreads, counts, &float_count);
valid_count = int(float_count);
}
float loss_weight = float(top[0]->cpu_diff<Btype>()[0]) /
get_normalizer(normalization_, valid_count);
if (this->parent_net() != NULL) {
loss_weight *= this->parent_net()->global_grad_scale();
}
caffe_gpu_scal<Btype>(prob_->count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxWithLossLayer);
} // namespace caffe
| 9d9ea1f33144c126179f30579871f9a261f493c7.cu | #include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/net.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
min_dtype<Dtype>()));
counts[index] = 1;
}
}
}
template <>
__global__ void SoftmaxLossForwardGPU<half>(const int nthreads,
const half* prob_data, const half* label, half* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
half* counts) {
const float minh = __half2float(min_dtype<half>());
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(__half2float(label[n * spatial_dim + s]));
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index].setx(0U);
counts[index].setx(0U);
} else {
loss[index] = float2half_clip(- log(max(__half2float(
prob_data[n * dim + label_value * spatial_dim + s]), minh)));
counts[index].setx(0x3c00U); // set to 1
}
}
}
template <typename Ftype, typename Btype>
void SoftmaxWithLossLayer<Ftype, Btype>::Forward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Ftype* prob_data = prob_->template gpu_data<Ftype>();
const Ftype* label = bottom[1]->gpu_data<Ftype>();
const int dim = prob_->count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Ftype* loss_data = bottom[0]->mutable_gpu_diff<Ftype>();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Ftype* counts = prob_->template mutable_gpu_diff<Ftype>();
cudaStream_t stream = Caffe::thread_stream();
if (tp<Ftype>() == FLOAT16) {
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<half><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(nthreads, reinterpret_cast<const half*>(prob_data),
reinterpret_cast<const half*>(label), reinterpret_cast<half*>(loss_data),
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,
reinterpret_cast<half*>(counts));
} else {
SoftmaxLossForwardGPU<<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS, 0, stream>>> (nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
float loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
float valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data<Ftype>()[0] = loss / get_normalizer(normalization_, valid_count);
if (top.size() == 2) {
top[1]->ShareData(*prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <>
__global__ void SoftmaxLossBackwardGPU<half>(const int nthreads, const half* top,
const half* label, half* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, half* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(__half2float(label[n * spatial_dim + s]));
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s].setx(0U);
}
counts[index].setx(0U);
} else {
const int idx = n * dim + label_value * spatial_dim + s;
bottom_diff[idx] = float2half_clip(__half2float(bottom_diff[idx]) - 1.F);
counts[index].setx(0x3c00U); // 1.
}
}
}
template <typename Ftype, typename Btype>
void SoftmaxWithLossLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
const Btype* prob_data = prob_->template gpu_data<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
caffe_gpu_memcpy(prob_->count() * sizeof(Btype), prob_data, bottom_diff);
const Btype* label = bottom[1]->gpu_data<Btype>();
const int dim = prob_->count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Btype* counts = prob_->template mutable_gpu_diff<Btype>();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS, 0, Caffe::thread_stream()>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
int valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) {
float float_count;
caffe_gpu_asum(nthreads, counts, &float_count);
valid_count = int(float_count);
}
float loss_weight = float(top[0]->cpu_diff<Btype>()[0]) /
get_normalizer(normalization_, valid_count);
if (this->parent_net() != NULL) {
loss_weight *= this->parent_net()->global_grad_scale();
}
caffe_gpu_scal<Btype>(prob_->count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxWithLossLayer);
} // namespace caffe
|
37c3fc7d5f592ac5e04b11720c7bab4f3f80f75e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BinaryExecution.hpp"
#include "Raster.cuh"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ATAN2(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = atan2(x, y);
}
return;
}
template <typename T>
__global__ void MOD(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = x - x / y;
}
return;
}
template <typename T>
__global__ void LOGICALOR(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = (x || y) ? 1 : 0;
}
return;
}
BinaryExecution::BinaryExecution(int opType, Backend *backend) : Execution(backend) {
mType = opType;
}
BinaryExecution::~BinaryExecution(){
// Do nothing
}
ErrorCode BinaryExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto count = CUDABackend::realSize(outputs[0]);
auto inputS0 = CUDABackend::realSize(inputs[0]);
auto inputS1 = CUDABackend::realSize(inputs[1]);
int s0 = inputS0 == 1 ? 0 : 1;
int s1 = inputS1 == 1 ? 0 : 1;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
//printf("%d - %d\n", block_num, threads_num);
int size[3] = {1, 1, count};
int stride0[3] = {0, 0, s0};
int stride1[3] = {0, 0, s1};
int stride2[3] = {0, 0, 1};
auto computeFunction = [&](Tensor* input0T, Tensor* input1T, Tensor* outputT) {
auto input0 = (uint8_t*)input0T->deviceId();
auto input1 = (uint8_t*)input1T->deviceId();
auto output = (uint8_t*)outputT->deviceId();
BinaryBlit(output, input0, input1, size, stride0, stride1, stride2, outputT->getType(), runtime, mType);
};
computeFunction(inputs[0], inputs[1], outputs[0]);
for (int i=2; i<inputs.size(); ++i) {
computeFunction(outputs[0], inputs[i], outputs[0]);
}
return NO_ERROR;
}
class BinaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_BinaryOp) {
return new BinaryExecution(op->main_as_BinaryOp()->opType(), backend);
}
if (op->type() == OpType_Eltwise) {
switch (op->main_as_Eltwise()->type()) {
case EltwiseType_SUM:
return new BinaryExecution(BinaryOpOperation_ADD, backend);
case EltwiseType_PROD:
return new BinaryExecution(BinaryOpOperation_MUL, backend);
case EltwiseType_MAXIMUM:
return new BinaryExecution(BinaryOpOperation_MAXIMUM, backend);
default:
break;
}
}
return nullptr;
}
};
static CUDACreatorRegister<BinaryCreator> __init(OpType_BinaryOp);
static CUDACreatorRegister<BinaryCreator> __init2(OpType_Eltwise);
}
} | 37c3fc7d5f592ac5e04b11720c7bab4f3f80f75e.cu | #include "BinaryExecution.hpp"
#include "Raster.cuh"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ATAN2(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = atan2(x, y);
}
return;
}
template <typename T>
__global__ void MOD(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = x - x / y;
}
return;
}
template <typename T>
__global__ void LOGICALOR(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = (x || y) ? 1 : 0;
}
return;
}
BinaryExecution::BinaryExecution(int opType, Backend *backend) : Execution(backend) {
mType = opType;
}
BinaryExecution::~BinaryExecution(){
// Do nothing
}
ErrorCode BinaryExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto count = CUDABackend::realSize(outputs[0]);
auto inputS0 = CUDABackend::realSize(inputs[0]);
auto inputS1 = CUDABackend::realSize(inputs[1]);
int s0 = inputS0 == 1 ? 0 : 1;
int s1 = inputS1 == 1 ? 0 : 1;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
//printf("%d - %d\n", block_num, threads_num);
int size[3] = {1, 1, count};
int stride0[3] = {0, 0, s0};
int stride1[3] = {0, 0, s1};
int stride2[3] = {0, 0, 1};
auto computeFunction = [&](Tensor* input0T, Tensor* input1T, Tensor* outputT) {
auto input0 = (uint8_t*)input0T->deviceId();
auto input1 = (uint8_t*)input1T->deviceId();
auto output = (uint8_t*)outputT->deviceId();
BinaryBlit(output, input0, input1, size, stride0, stride1, stride2, outputT->getType(), runtime, mType);
};
computeFunction(inputs[0], inputs[1], outputs[0]);
for (int i=2; i<inputs.size(); ++i) {
computeFunction(outputs[0], inputs[i], outputs[0]);
}
return NO_ERROR;
}
class BinaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_BinaryOp) {
return new BinaryExecution(op->main_as_BinaryOp()->opType(), backend);
}
if (op->type() == OpType_Eltwise) {
switch (op->main_as_Eltwise()->type()) {
case EltwiseType_SUM:
return new BinaryExecution(BinaryOpOperation_ADD, backend);
case EltwiseType_PROD:
return new BinaryExecution(BinaryOpOperation_MUL, backend);
case EltwiseType_MAXIMUM:
return new BinaryExecution(BinaryOpOperation_MAXIMUM, backend);
default:
break;
}
}
return nullptr;
}
};
static CUDACreatorRegister<BinaryCreator> __init(OpType_BinaryOp);
static CUDACreatorRegister<BinaryCreator> __init2(OpType_Eltwise);
}
} |
32bcc03b65d8ba8578c3f27a2ddac2b5a2c73202.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <cassert>
#if __CUDA_ARCH__ < 350
template <typename T>
__forceinline__ __device__ T __ldg(const T* data) {
return data[0];
}
#endif
__forceinline__ __device__ float Sigmoid(float x) {
return 1.0f / (1.0f + expf(-x));
}
__forceinline__ __device__ float SigmoidDer(float x) {
const float p = 1.0f / (1.0f + expf(-x));
return p * (1.0f - p);
}
// ExpProb Polynom {{{
__global__ void ExpProbPolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
float logProb = 0;
bool zeroProb = false;
for (int i = 0; i < depth; ++i) {
if (zeroProb) {
continue;
}
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * x;
const float expVal = 1.0f - expf(val);
if (isfinite(log(expVal))) {
logProb += log(expVal);
} else {
zeroProb = true;
}
}
float prob = 0.0f;
if (!zeroProb) {
prob = expf(logProb);
}
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void ExpProbPolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
void ExpProbPolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
ExpProbPolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
ExpProbPolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void ExpProbPolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
float vals[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
bool zeroProb = false;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
vals[i] = -lambda * x;
const float expVal = 1.0f - exp(vals[i]);
logProbs[i] = log(expVal);
if (isfinite(logProbs[i])) {
totalLogProb += logProbs[i];
} else {
zeroProb = true;
}
}
}
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
// XXX for zero feature it actually shouldn't be zero, but it's not propagated through relu anyways.
float featureDer = 0.0f;
if (!zeroProb) {
// (1 - e^{-lambda * x})' = lambda * e^{-lambda * x}
//
// dp / dx_i = p / (1 - e^{-l * x_i}) * (l * e^{-l * x_i})
// ln (p / (1 - e^{-l * x}) * (l * e^{-l * x})) = ln(p) - ln(1 - e^{-x}) + ln(l) + (-l * x))
const float monomDer = exp(totalLogProb - logProbs[i] + log(lambda) + vals[i]);
featureDer = monomDer * derMultiplier;
}
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void ExpProbPolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
hipLaunchKernelGGL(( ExpProbPolynomBackwardImpl<maxDepth, blockSize, K>) , dim3(numBlocks), dim3(blockSize), 0, 0, lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
// }}}
// SigmoidProb Polynom {{{
__global__ void SigmoidProbPolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
// bool isTrue = true;
float logProb = 0;
for (int i = 0; i < depth; ++i) {
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * (x - c);
// isTrue = x <= c? false : isTrue;
const float expVal = 1.0f + expf(val);
// p( split = 1) = 1.0 / (1.0 + exp(-(x - c)))
// c = 0, x= inf, p = 1.0 / (1.0 + exp(-inf) = 0
// log(p) = -log(1.0 + exp(-(x - c))
const float isTrueLogProb = isfinite(expVal) ? log(expVal) : val;
logProb -= isTrueLogProb;
}
const float prob = expf(logProb);
// const float prob = isTrue ? 1 : 0;//exp(logProb);
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void SigmoidProbPolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
void SigmoidProbPolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
SigmoidProbPolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
SigmoidProbPolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void SigmoidProbPolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
const float val = -lambda * (x - c);
const float expVal = 1.0f + exp(val);
logProbs[i] = -(isfinite(expVal) ? log(expVal) : val);
totalLogProb += logProbs[i];
}
}
const float p = exp(totalLogProb);
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
const float featureProb = exp(logProbs[i]);
const float monomDer = p * (1.0 - featureProb);
const float featureDer = monomDer * derMultiplier;
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void SigmoidProbPolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
hipLaunchKernelGGL(( SigmoidProbPolynomBackwardImpl<maxDepth, blockSize, K>) , dim3(numBlocks), dim3(blockSize), 0, 0, lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
// }}}
| 32bcc03b65d8ba8578c3f27a2ddac2b5a2c73202.cu | #include <cuda_runtime.h>
#include <iostream>
#include <cassert>
#if __CUDA_ARCH__ < 350
template <typename T>
__forceinline__ __device__ T __ldg(const T* data) {
return data[0];
}
#endif
__forceinline__ __device__ float Sigmoid(float x) {
return 1.0f / (1.0f + expf(-x));
}
__forceinline__ __device__ float SigmoidDer(float x) {
const float p = 1.0f / (1.0f + expf(-x));
return p * (1.0f - p);
}
// ExpProb Polynom {{{
__global__ void ExpProbPolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
float logProb = 0;
bool zeroProb = false;
for (int i = 0; i < depth; ++i) {
if (zeroProb) {
continue;
}
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * x;
const float expVal = 1.0f - expf(val);
if (isfinite(log(expVal))) {
logProb += log(expVal);
} else {
zeroProb = true;
}
}
float prob = 0.0f;
if (!zeroProb) {
prob = expf(logProb);
}
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void ExpProbPolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
void ExpProbPolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
ExpProbPolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
ExpProbPolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void ExpProbPolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
float vals[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
bool zeroProb = false;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
vals[i] = -lambda * x;
const float expVal = 1.0f - exp(vals[i]);
logProbs[i] = log(expVal);
if (isfinite(logProbs[i])) {
totalLogProb += logProbs[i];
} else {
zeroProb = true;
}
}
}
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
// XXX for zero feature it actually shouldn't be zero, but it's not propagated through relu anyways.
float featureDer = 0.0f;
if (!zeroProb) {
// (1 - e^{-lambda * x})' = lambda * e^{-lambda * x}
//
// dp / dx_i = p / (1 - e^{-l * x_i}) * (l * e^{-l * x_i})
// ln (p / (1 - e^{-l * x}) * (l * e^{-l * x})) = ln(p) - ln(1 - e^{-x}) + ln(l) + (-l * x))
const float monomDer = exp(totalLogProb - logProbs[i] + log(lambda) + vals[i]);
featureDer = monomDer * derMultiplier;
}
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void ExpProbPolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be ≈ smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
ExpProbPolynomBackwardImpl<maxDepth, blockSize, K> <<<numBlocks, blockSize>>>(lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
// }}}
// SigmoidProb Polynom {{{
__global__ void SigmoidProbPolynomProbsImpl(
const float* features,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
int polynomCount,
float lambda,
float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
// bool isTrue = true;
float logProb = 0;
for (int i = 0; i < depth; ++i) {
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * (x - c);
// isTrue = x <= c? false : isTrue;
const float expVal = 1.0f + expf(val);
// p( split = 1) = 1.0 / (1.0 + exp(-(x - c)))
// c = 0, x= inf, p = 1.0 / (1.0 + exp(-inf) = 0
// log(p) = -log(1.0 + exp(-(x - c))
const float isTrueLogProb = isfinite(expVal) ? log(expVal) : val;
logProb -= isTrueLogProb;
}
const float prob = expf(logProb);
// const float prob = isTrue ? 1 : 0;//exp(logProb);
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
}
//batch size should be equal to BlockSize
//we need to reduce polynoms for each output dim
__global__ void SigmoidProbPolynomForwardImpl(
const float* probs,
int batchSize,
const float* values,
int polynomCount,
int outputDim,
float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
}
void SigmoidProbPolynomForward(
const float lambda,
const float* features,
int fCount,
int batchSize,
const int* splits,
const float* conditions,
const int* polynomOffsets,
const float* values,
int polynomCount,
int outDim,
float* tempProbs,
float* output
) {
const int blockSize = batchSize;
const int numBlocks = min(polynomCount, 1000);
assert(batchSize < 2048);
assert(numBlocks);
SigmoidProbPolynomProbsImpl << < numBlocks, blockSize >>> (features, batchSize, splits, conditions, polynomOffsets, polynomCount, lambda, tempProbs);
dim3 forwardBlocks;
forwardBlocks.z = 1;
forwardBlocks.y = outDim;
forwardBlocks.x = min(polynomCount, 512);
SigmoidProbPolynomForwardImpl << < forwardBlocks, batchSize >> > (tempProbs, batchSize, values, polynomCount, outDim, output);
}
/*
* Here layout is not the same as in forward pass
* BlockSize = 256, MaxDepth = 8, K = 24
* should give 50% occupancy, this should be enough
*/
template <int MaxDepth, int BlockSize, int K>
__global__ void SigmoidProbPolynomBackwardImpl(float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int sampleId = blockIdx.y;
features += sampleId * featuresCount;
featuresDer += sampleId * featuresCount;
outDer += sampleId * outputDim;
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
__shared__ float localFeaturesDer[BlockSize * K];
for (int i = threadIdx.x; i < BlockSize * K; i += BlockSize) {
localFeaturesDer[i] = 0;
}
__syncthreads();
const int alignedFeaturesCount = ((featuresCount + BlockSize - 1) / BlockSize) * BlockSize;
const int memoryBlocks = (BlockSize * K) / alignedFeaturesCount;
const int memoryBlockId = threadIdx.x % memoryBlocks;
int polynomId = blockIdx.x * blockDim.x + threadIdx.x;
while (polynomId < polynomCount) {
const int offset = polynomOffsets[polynomId];
const int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
if (depth != 0) {
float logProbs[MaxDepth];
short fids[MaxDepth];
float totalLogProb = 0;
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = __ldg(featureIds + i + offset);
fids[i] = f;
const float c = __ldg(conditions + i + offset);
const float x = __ldg(features + f);
const float val = -lambda * (x - c);
const float expVal = 1.0f + exp(val);
logProbs[i] = -(isfinite(expVal) ? log(expVal) : val);
totalLogProb += logProbs[i];
}
}
const float p = exp(totalLogProb);
//featureDerivative is outputDer * total value before monom * monom derivative
float derMultiplier = 0;
#pragma unroll 10
for (int dim = 0; dim < outputDim; ++dim) {
derMultiplier += __ldg(values + polynomId * outputDim + dim) * __ldg(outDer + dim);
}
#pragma unroll
for (int i = 0; i < MaxDepth; ++i) {
if (i < depth) {
const int f = fids[i];
const float featureProb = exp(logProbs[i]);
const float monomDer = p * (1.0 - featureProb);
const float featureDer = monomDer * derMultiplier;
//atomics in shared memory, pretty fast on pascal+ hardware
atomicAdd(localFeaturesDer + memoryBlocks * f + memoryBlockId, featureDer);
}
}
}
polynomId += gridDim.x * blockDim.x;
}
__syncthreads();
//outputDim = 1024 => memoryBlocks = 6
for (int f = threadIdx.x; f < featuresCount; f += BlockSize) {
float der = 0;
#pragma unroll
for (int k = 0; k < memoryBlocks; ++k) {
der += localFeaturesDer[f * memoryBlocks + k];
}
atomicAdd(featuresDer + f, der);
}
}
void SigmoidProbPolynomBackward(int batchSize,
float lambda,
const float* features,
int featuresCount,
const float* outDer,
int outputDim,
const int* featureIds,
const float* conditions,
const float* values,
const int* polynomOffsets,
int polynomCount,
float* featuresDer) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.z = 1;
numBlocks.y = batchSize;
//should be ≈ smCount * 6 / batchSize
numBlocks.x = min((polynomCount + blockSize - 1) * outputDim / blockSize, 160);
const int maxDepth = 12;
const int K = 16;
SigmoidProbPolynomBackwardImpl<maxDepth, blockSize, K> <<<numBlocks, blockSize>>>(lambda, features, featuresCount, outDer, outputDim, featureIds, conditions, values, polynomOffsets, polynomCount, featuresDer);
}
// }}}
|
668b3cb6d45d2fee2c095ae6e0f4d4b644535302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/emulation.hpp"
#include "opencv2/core/cuda/transform.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace hist
{
__global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t step, int* hist)
{
__shared__ int shist[256];
const int y = blockIdx.x * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
shist[tid] = 0;
__syncthreads();
if (y < rows)
{
const unsigned int* rowPtr = (const unsigned int*) (src + y * step);
const int cols_4 = cols / 4;
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
{
unsigned int data = rowPtr[x];
Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1);
}
if (cols % 4 != 0 && threadIdx.x == 0)
{
for (int x = cols_4 * 4; x < cols; ++x)
{
unsigned int data = ((const uchar*)rowPtr)[x];
Emulation::smem::atomicAdd(&shist[data], 1);
}
}
}
__syncthreads();
const int histVal = shist[tid];
if (histVal > 0)
::atomicAdd(hist + tid, histVal);
}
void histogram256(PtrStepSzb src, int* hist, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.rows, block.y));
hipLaunchKernelGGL(( histogram256Kernel), dim3(grid), dim3(block), 0, stream, src.data, src.cols, src.rows, src.step, hist);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__constant__ int c_lut[256];
struct EqualizeHist : unary_function<uchar, uchar>
{
float scale;
__host__ EqualizeHist(float _scale) : scale(_scale) {}
__device__ __forceinline__ uchar operator ()(uchar val) const
{
const int lut = c_lut[val];
return __float2int_rn(scale * lut);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<hist::EqualizeHist> : DefaultTransformFunctorTraits<hist::EqualizeHist>
{
enum { smart_shift = 4 };
};
}}}
namespace hist
{
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, hipStream_t stream)
{
if (stream == 0)
cudaSafeCall( hipMemcpyToSymbol(c_lut, lut, 256 * sizeof(int), 0, hipMemcpyDeviceToDevice) );
else
cudaSafeCall( hipMemcpyToSymbolAsync(c_lut, lut, 256 * sizeof(int), 0, hipMemcpyDeviceToDevice, stream) );
const float scale = 255.0f / (src.cols * src.rows);
cudev::transform(src, dst, EqualizeHist(scale), WithOutMask(), stream);
}
}
#endif /* CUDA_DISABLER */
| 668b3cb6d45d2fee2c095ae6e0f4d4b644535302.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/emulation.hpp"
#include "opencv2/core/cuda/transform.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace hist
{
__global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t step, int* hist)
{
__shared__ int shist[256];
const int y = blockIdx.x * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
shist[tid] = 0;
__syncthreads();
if (y < rows)
{
const unsigned int* rowPtr = (const unsigned int*) (src + y * step);
const int cols_4 = cols / 4;
for (int x = threadIdx.x; x < cols_4; x += blockDim.x)
{
unsigned int data = rowPtr[x];
Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1);
Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1);
}
if (cols % 4 != 0 && threadIdx.x == 0)
{
for (int x = cols_4 * 4; x < cols; ++x)
{
unsigned int data = ((const uchar*)rowPtr)[x];
Emulation::smem::atomicAdd(&shist[data], 1);
}
}
}
__syncthreads();
const int histVal = shist[tid];
if (histVal > 0)
::atomicAdd(hist + tid, histVal);
}
void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.rows, block.y));
histogram256Kernel<<<grid, block, 0, stream>>>(src.data, src.cols, src.rows, src.step, hist);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
/////////////////////////////////////////////////////////////////////////
namespace hist
{
__constant__ int c_lut[256];
struct EqualizeHist : unary_function<uchar, uchar>
{
float scale;
__host__ EqualizeHist(float _scale) : scale(_scale) {}
__device__ __forceinline__ uchar operator ()(uchar val) const
{
const int lut = c_lut[val];
return __float2int_rn(scale * lut);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<hist::EqualizeHist> : DefaultTransformFunctorTraits<hist::EqualizeHist>
{
enum { smart_shift = 4 };
};
}}}
namespace hist
{
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream)
{
if (stream == 0)
cudaSafeCall( cudaMemcpyToSymbol(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpyToSymbolAsync(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice, stream) );
const float scale = 255.0f / (src.cols * src.rows);
cudev::transform(src, dst, EqualizeHist(scale), WithOutMask(), stream);
}
}
#endif /* CUDA_DISABLER */
|
6bd8c2b77c5e98e181a7839c377b7146d43f42ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "timer.h"
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int r = by * blockDim.y + ty;
int c = bx * blockDim.x + tx;
int dimC = numAColumns;
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
float value = 0.0;
for (int i=0; i < (dimC-1)/TILE_WIDTH +1; i++){
if ((r < numCRows) && ((i*TILE_WIDTH + tx)< dimC)){
ds_A[ty][tx]=A[r*dimC + i*TILE_WIDTH + tx];
} else {
ds_A[ty][tx]=0.0;
}
if ((c < numCColumns) && ((i*TILE_WIDTH + ty)< dimC)){
ds_B[ty][tx]=B[(i*TILE_WIDTH + ty)*numBColumns + c];
} else {
ds_B[ty][tx]=0.0;
}
__syncthreads();
for (int j=0; j<TILE_WIDTH; j++){
value += ds_A[ty][j] * ds_B[j][tx];
}
__syncthreads();
}
if ((r < numCRows) && (c < numCColumns)){
C[r*numCColumns+c] = value;
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
int main(int argc, char ** argv) {
GpuTimer timer;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
// Conseguir matrices de entrada. Random
if (argc != 5){
fprintf(stderr,"%s numrowsA numcolumnsA numrowsB numcolumnsB\n", argv[0]);
return 1;
}
numARows = atoi(argv[1]);
numAColumns = atoi(argv[2]);
numBRows = atoi(argv[3]);
numBColumns = atoi(argv[4]);
//@@ Set numCRows and numCColumns
if (numAColumns == numBRows){
numCRows = numARows;
numCColumns = numBColumns;
} else {
fprintf(stderr, "The multiplication can not be made because %d columns of matrix A is not equal to %d rows of matrix B\n", numAColumns, numBRows);
return 1;
}
// Initialize host memory
hipHostMalloc((void **) &hostA, numARows*numAColumns*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void **) &hostB, numBRows*numBColumns*sizeof(float), hipHostMallocDefault);
const float valB = 0.01f;
constantInit(hostA, numARows*numAColumns, 1.0f);
constantInit(hostB, numBRows*numBColumns, valB);
//@@ Allocate the hostC matrix
hipHostMalloc((void **) &hostC, numCRows*numCColumns*sizeof(float), hipHostMallocDefault);
//@@ Allocate GPU memory here
hipMalloc((void **) &deviceA,numARows*numAColumns*sizeof(float));
hipMalloc((void **) &deviceB,numBRows*numBColumns*sizeof(float));
hipMalloc((void **) &deviceC,numCRows*numCColumns*sizeof(float));
//@@ Copy memory to the GPU here
hipMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),hipMemcpyHostToDevice);
//@@ Initialize the grid and block dimensions here
dim3 gridSize((numCColumns-1)/TILE_WIDTH +1, (numCRows-1)/TILE_WIDTH +1, 1);
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
timer.Start();
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(gridSize), dim3(blockSize), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns,
numCRows, numCColumns);
hipDeviceSynchronize();
timer.Stop();
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC,numCRows*numCColumns*sizeof(float),hipMemcpyDeviceToHost);
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(numCRows * numCColumns); i++)
{
double abs_err = fabs(hostC[i] - (numAColumns * valB));
double dot_length = numAColumns;
double abs_val = fabs(hostC[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, hostC[i], numAColumns*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
hipHostFree(hostA);
hipHostFree(hostB);
hipHostFree(hostC);
return 0;
}
| 6bd8c2b77c5e98e181a7839c377b7146d43f42ac.cu | #include <stdio.h>
#include "timer.h"
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int r = by * blockDim.y + ty;
int c = bx * blockDim.x + tx;
int dimC = numAColumns;
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
float value = 0.0;
for (int i=0; i < (dimC-1)/TILE_WIDTH +1; i++){
if ((r < numCRows) && ((i*TILE_WIDTH + tx)< dimC)){
ds_A[ty][tx]=A[r*dimC + i*TILE_WIDTH + tx];
} else {
ds_A[ty][tx]=0.0;
}
if ((c < numCColumns) && ((i*TILE_WIDTH + ty)< dimC)){
ds_B[ty][tx]=B[(i*TILE_WIDTH + ty)*numBColumns + c];
} else {
ds_B[ty][tx]=0.0;
}
__syncthreads();
for (int j=0; j<TILE_WIDTH; j++){
value += ds_A[ty][j] * ds_B[j][tx];
}
__syncthreads();
}
if ((r < numCRows) && (c < numCColumns)){
C[r*numCColumns+c] = value;
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
int main(int argc, char ** argv) {
GpuTimer timer;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
// Conseguir matrices de entrada. Random
if (argc != 5){
fprintf(stderr,"%s numrowsA numcolumnsA numrowsB numcolumnsB\n", argv[0]);
return 1;
}
numARows = atoi(argv[1]);
numAColumns = atoi(argv[2]);
numBRows = atoi(argv[3]);
numBColumns = atoi(argv[4]);
//@@ Set numCRows and numCColumns
if (numAColumns == numBRows){
numCRows = numARows;
numCColumns = numBColumns;
} else {
fprintf(stderr, "The multiplication can not be made because %d columns of matrix A is not equal to %d rows of matrix B\n", numAColumns, numBRows);
return 1;
}
// Initialize host memory
cudaHostAlloc((void **) &hostA, numARows*numAColumns*sizeof(float), cudaHostAllocDefault);
cudaHostAlloc((void **) &hostB, numBRows*numBColumns*sizeof(float), cudaHostAllocDefault);
const float valB = 0.01f;
constantInit(hostA, numARows*numAColumns, 1.0f);
constantInit(hostB, numBRows*numBColumns, valB);
//@@ Allocate the hostC matrix
cudaHostAlloc((void **) &hostC, numCRows*numCColumns*sizeof(float), cudaHostAllocDefault);
//@@ Allocate GPU memory here
cudaMalloc((void **) &deviceA,numARows*numAColumns*sizeof(float));
cudaMalloc((void **) &deviceB,numBRows*numBColumns*sizeof(float));
cudaMalloc((void **) &deviceC,numCRows*numCColumns*sizeof(float));
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA,hostA,numARows*numAColumns*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,numBRows*numBColumns*sizeof(float),cudaMemcpyHostToDevice);
//@@ Initialize the grid and block dimensions here
dim3 gridSize((numCColumns-1)/TILE_WIDTH +1, (numCRows-1)/TILE_WIDTH +1, 1);
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
timer.Start();
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<gridSize, blockSize>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns,
numCRows, numCColumns);
cudaDeviceSynchronize();
timer.Stop();
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC,numCRows*numCColumns*sizeof(float),cudaMemcpyDeviceToHost);
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(numCRows * numCColumns); i++)
{
double abs_err = fabs(hostC[i] - (numAColumns * valB));
double dot_length = numAColumns;
double abs_val = fabs(hostC[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, hostC[i], numAColumns*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
cudaFreeHost(hostA);
cudaFreeHost(hostB);
cudaFreeHost(hostC);
return 0;
}
|
c1d05e5d377fd443d2704c8779a99e7df1a7232c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "minutia.cuh"
#include "area.cuh"
#include "constants.cuh"
#include "util.cuh"
#include "errors.h"
#include "debug.h"
using namespace std;
bool initialized = false;
int numCellsInCylinder = 0;
__host__ void initialize() {
if (initialized) return;
initialized = true;
numCellsInCylinder = 0;
float temp = DELTA_S/2;
for (int i = 0; i < NS; ++i) {
float x = DELTA_S * i + temp;
float dx = x-R;
for (int j = 0; j < NS; ++j) {
float y = DELTA_S * j + temp;
float dy = y-R;
if (dx*dx + dy*dy <= R_SQR) ++numCellsInCylinder;
}
}
}
__host__ __device__ __inline__
float spatialContribution(
int mt_x, int mt_y, int pi, int pj) {
auto gaussian = [&](int t_sqr) -> float {
return I_2_SIGMA_S_SQRT_PI * expf(-t_sqr * I_2_SIGMA_S_SQR);
};
return gaussian(sqrDistance(mt_x, mt_y, pi, pj));
}
__host__ __device__ __inline__
float directionalContribution(
float m_theta, float mt_theta, float dphik) {
// http://www.wolframalpha.com/input/?i=integrate+(e%5E(-(t%5E2)%2F(2(x%5E2)))+dt)
auto integrate = [&](float val) -> float {
return SQRT_PI_2_SIGMA_D * erff(val * I_SQRT_2_SIGMA_D);
};
auto gaussian = [&](float val) -> float {
return I_SQRT_2_PI_SIGMA_D *
(integrate(val+DELTA_D_2)-integrate(val-DELTA_D_2));
};
return gaussian(
angle(dphik, angle(m_theta, mt_theta)));
}
__global__
void buildCylinder(
Minutia *minutiae,
int width, int height,
char *validArea,
int numCellsInCylinder,
char *cylinderValidities,
char *cellValidities,
char *cellValues) {
extern __shared__ int shared[];
const int N = gridDim.x;
Minutia *sharedMinutiae = (Minutia*)shared;
int idxMinutia = blockIdx.x;
int idxThread = threadIdx.y * blockDim.x + threadIdx.x;
int contributed = 0;
if (idxThread < N) {
sharedMinutiae[idxThread] = minutiae[idxThread];
if (idxThread != idxMinutia) {
auto dist = sqrDistance(
sharedMinutiae[idxThread].x, sharedMinutiae[idxThread].y,
minutiae[idxMinutia].x, minutiae[idxMinutia].y);
contributed = dist <= (R+SIGMA_3S)*(R+SIGMA_3S);
}
}
int sumContributed = __syncthreads_count(contributed);
Minutia m = sharedMinutiae[idxMinutia];
float halfNS = (NS + 1) / 2.0f;
float halfNSi = (threadIdx.x+1) - halfNS;
float halfNSj = (threadIdx.y+1) - halfNS;
float sint, cost;
sincosf(m.theta, &sint, &cost);
int pi = m.x + roundf(DELTA_S * (cost * halfNSi + sint * halfNSj));
int pj = m.y + roundf(DELTA_S * (-sint * halfNSi + cost * halfNSj));
char validity = pi >= 0 && pi < width && pj >= 0 && pj < height
&& validArea[pj * width + pi]
&& sqrDistance(m.x, m.y, pi, pj) <= R_SQR;
int idx = idxMinutia * NC + threadIdx.y * NS * ND + threadIdx.x * ND;
for (int k = 0; k < ND; ++k) {
char value = 0;
if (validity) {
float dphik = -M_PI + (k + 0.5f) * DELTA_D;
float sum = 0.0f;
for (int l = 0; l < N; ++l) {
if (l == idxMinutia)
continue;
Minutia mt(sharedMinutiae[l]);
if (sqrDistance(mt.x, mt.y, pi, pj) > SIGMA_9S_SQR)
continue;
float sContrib = spatialContribution(mt.x, mt.y, pi, pj);
float dContrib = directionalContribution(m.theta, mt.theta, dphik);
sum += sContrib * dContrib;
}
if (sum >= MU_PSI)
value = 1;
}
cellValidities[idx+k] = validity;
cellValues[idx+k] = value;
}
int sumValidities = __syncthreads_count(validity);
if (threadIdx.x == 0 && threadIdx.y == 0) {
cylinderValidities[idxMinutia] = sumContributed >= MIN_M &&
(float)sumValidities/(numCellsInCylinder) >= MIN_VC;
devDebug("Minutia %2d VC: ((%3d/%d) = %.5f) >= %.2f, M: %2d >= %d\n",
idxMinutia,
sumValidities, numCellsInCylinder,
(float)sumValidities/(numCellsInCylinder), MIN_VC,
sumContributed, MIN_M);
}
}
__host__
void devBuildTemplate(
Minutia *devMinutiae, const int n,
char *devArea, const int width, const int height,
char *devCylinderValidities,
char *devCellValidities,
char *devCellValues) {
initialize();
dim3 blockDim(NS, NS);
int sharedSize = n * sizeof(Minutia);
hipLaunchKernelGGL(( buildCylinder), dim3(n), dim3(blockDim), sharedSize, 0,
devMinutiae, width, height, devArea, numCellsInCylinder,
devCylinderValidities, devCellValidities, devCellValues);
}
__host__
void buildTemplate(
const vector<Minutia>& minutiae,
const int width, const int height,
vector<char>& cylinderValidities,
vector<char>& cellValidities,
vector<char>& cellValues) {
auto area = buildValidArea(minutiae, width, height);
Minutia *devMinutiae;
char *devArea;
char *devCylinderValidities, *devCellValidities, *devCellValues;
size_t devMinutiaeSize = minutiae.size() * sizeof(Minutia);
size_t devAreaSize = width * height * sizeof(char);
size_t devCylinderValiditiesSize = minutiae.size() * sizeof(char);
size_t devCellValiditiesSize = minutiae.size() * NC * sizeof(char);
size_t devCellValuesSize = minutiae.size() * NC * sizeof(char);
handleError(
hipMalloc(&devMinutiae, devMinutiaeSize));
handleError(
hipMemcpy(devMinutiae, minutiae.data(), devMinutiaeSize, hipMemcpyHostToDevice));
handleError(
hipMalloc(&devArea, devAreaSize));
handleError(
hipMemcpy(devArea, area.data(), devAreaSize, hipMemcpyHostToDevice));
handleError(
hipMalloc(&devCylinderValidities, devCylinderValiditiesSize));
handleError(
hipMalloc(&devCellValues, devCellValuesSize));
handleError(
hipMalloc(&devCellValidities, devCellValiditiesSize));
devBuildTemplate(
devMinutiae, minutiae.size(),
devArea, width, height,
devCylinderValidities,
devCellValidities,
devCellValues);
cylinderValidities.resize(minutiae.size());
cellValidities.resize(minutiae.size() * NC);
cellValues.resize(minutiae.size() * NC);
handleError(
hipMemcpy(cylinderValidities.data(), devCylinderValidities, devCylinderValiditiesSize, hipMemcpyDeviceToHost));
handleError(
hipMemcpy(cellValidities.data(), devCellValidities, devCellValiditiesSize, hipMemcpyDeviceToHost));
handleError(
hipMemcpy(cellValues.data(), devCellValues, devCellValuesSize, hipMemcpyDeviceToHost));
hipFree(devMinutiae);
hipFree(devArea);
hipFree(devCylinderValidities);
hipFree(devCellValidities);
hipFree(devCellValues);
}
| c1d05e5d377fd443d2704c8779a99e7df1a7232c.cu | #include <vector>
#include "minutia.cuh"
#include "area.cuh"
#include "constants.cuh"
#include "util.cuh"
#include "errors.h"
#include "debug.h"
using namespace std;
bool initialized = false;
int numCellsInCylinder = 0;
__host__ void initialize() {
if (initialized) return;
initialized = true;
numCellsInCylinder = 0;
float temp = DELTA_S/2;
for (int i = 0; i < NS; ++i) {
float x = DELTA_S * i + temp;
float dx = x-R;
for (int j = 0; j < NS; ++j) {
float y = DELTA_S * j + temp;
float dy = y-R;
if (dx*dx + dy*dy <= R_SQR) ++numCellsInCylinder;
}
}
}
__host__ __device__ __inline__
float spatialContribution(
int mt_x, int mt_y, int pi, int pj) {
auto gaussian = [&](int t_sqr) -> float {
return I_2_SIGMA_S_SQRT_PI * expf(-t_sqr * I_2_SIGMA_S_SQR);
};
return gaussian(sqrDistance(mt_x, mt_y, pi, pj));
}
__host__ __device__ __inline__
float directionalContribution(
float m_theta, float mt_theta, float dphik) {
// http://www.wolframalpha.com/input/?i=integrate+(e%5E(-(t%5E2)%2F(2(x%5E2)))+dt)
auto integrate = [&](float val) -> float {
return SQRT_PI_2_SIGMA_D * erff(val * I_SQRT_2_SIGMA_D);
};
auto gaussian = [&](float val) -> float {
return I_SQRT_2_PI_SIGMA_D *
(integrate(val+DELTA_D_2)-integrate(val-DELTA_D_2));
};
return gaussian(
angle(dphik, angle(m_theta, mt_theta)));
}
__global__
void buildCylinder(
Minutia *minutiae,
int width, int height,
char *validArea,
int numCellsInCylinder,
char *cylinderValidities,
char *cellValidities,
char *cellValues) {
extern __shared__ int shared[];
const int N = gridDim.x;
Minutia *sharedMinutiae = (Minutia*)shared;
int idxMinutia = blockIdx.x;
int idxThread = threadIdx.y * blockDim.x + threadIdx.x;
int contributed = 0;
if (idxThread < N) {
sharedMinutiae[idxThread] = minutiae[idxThread];
if (idxThread != idxMinutia) {
auto dist = sqrDistance(
sharedMinutiae[idxThread].x, sharedMinutiae[idxThread].y,
minutiae[idxMinutia].x, minutiae[idxMinutia].y);
contributed = dist <= (R+SIGMA_3S)*(R+SIGMA_3S);
}
}
int sumContributed = __syncthreads_count(contributed);
Minutia m = sharedMinutiae[idxMinutia];
float halfNS = (NS + 1) / 2.0f;
float halfNSi = (threadIdx.x+1) - halfNS;
float halfNSj = (threadIdx.y+1) - halfNS;
float sint, cost;
sincosf(m.theta, &sint, &cost);
int pi = m.x + roundf(DELTA_S * (cost * halfNSi + sint * halfNSj));
int pj = m.y + roundf(DELTA_S * (-sint * halfNSi + cost * halfNSj));
char validity = pi >= 0 && pi < width && pj >= 0 && pj < height
&& validArea[pj * width + pi]
&& sqrDistance(m.x, m.y, pi, pj) <= R_SQR;
int idx = idxMinutia * NC + threadIdx.y * NS * ND + threadIdx.x * ND;
for (int k = 0; k < ND; ++k) {
char value = 0;
if (validity) {
float dphik = -M_PI + (k + 0.5f) * DELTA_D;
float sum = 0.0f;
for (int l = 0; l < N; ++l) {
if (l == idxMinutia)
continue;
Minutia mt(sharedMinutiae[l]);
if (sqrDistance(mt.x, mt.y, pi, pj) > SIGMA_9S_SQR)
continue;
float sContrib = spatialContribution(mt.x, mt.y, pi, pj);
float dContrib = directionalContribution(m.theta, mt.theta, dphik);
sum += sContrib * dContrib;
}
if (sum >= MU_PSI)
value = 1;
}
cellValidities[idx+k] = validity;
cellValues[idx+k] = value;
}
int sumValidities = __syncthreads_count(validity);
if (threadIdx.x == 0 && threadIdx.y == 0) {
cylinderValidities[idxMinutia] = sumContributed >= MIN_M &&
(float)sumValidities/(numCellsInCylinder) >= MIN_VC;
devDebug("Minutia %2d VC: ((%3d/%d) = %.5f) >= %.2f, M: %2d >= %d\n",
idxMinutia,
sumValidities, numCellsInCylinder,
(float)sumValidities/(numCellsInCylinder), MIN_VC,
sumContributed, MIN_M);
}
}
__host__
void devBuildTemplate(
Minutia *devMinutiae, const int n,
char *devArea, const int width, const int height,
char *devCylinderValidities,
char *devCellValidities,
char *devCellValues) {
initialize();
dim3 blockDim(NS, NS);
int sharedSize = n * sizeof(Minutia);
buildCylinder<<<n, blockDim, sharedSize>>>(
devMinutiae, width, height, devArea, numCellsInCylinder,
devCylinderValidities, devCellValidities, devCellValues);
}
__host__
void buildTemplate(
const vector<Minutia>& minutiae,
const int width, const int height,
vector<char>& cylinderValidities,
vector<char>& cellValidities,
vector<char>& cellValues) {
auto area = buildValidArea(minutiae, width, height);
Minutia *devMinutiae;
char *devArea;
char *devCylinderValidities, *devCellValidities, *devCellValues;
size_t devMinutiaeSize = minutiae.size() * sizeof(Minutia);
size_t devAreaSize = width * height * sizeof(char);
size_t devCylinderValiditiesSize = minutiae.size() * sizeof(char);
size_t devCellValiditiesSize = minutiae.size() * NC * sizeof(char);
size_t devCellValuesSize = minutiae.size() * NC * sizeof(char);
handleError(
cudaMalloc(&devMinutiae, devMinutiaeSize));
handleError(
cudaMemcpy(devMinutiae, minutiae.data(), devMinutiaeSize, cudaMemcpyHostToDevice));
handleError(
cudaMalloc(&devArea, devAreaSize));
handleError(
cudaMemcpy(devArea, area.data(), devAreaSize, cudaMemcpyHostToDevice));
handleError(
cudaMalloc(&devCylinderValidities, devCylinderValiditiesSize));
handleError(
cudaMalloc(&devCellValues, devCellValuesSize));
handleError(
cudaMalloc(&devCellValidities, devCellValiditiesSize));
devBuildTemplate(
devMinutiae, minutiae.size(),
devArea, width, height,
devCylinderValidities,
devCellValidities,
devCellValues);
cylinderValidities.resize(minutiae.size());
cellValidities.resize(minutiae.size() * NC);
cellValues.resize(minutiae.size() * NC);
handleError(
cudaMemcpy(cylinderValidities.data(), devCylinderValidities, devCylinderValiditiesSize, cudaMemcpyDeviceToHost));
handleError(
cudaMemcpy(cellValidities.data(), devCellValidities, devCellValiditiesSize, cudaMemcpyDeviceToHost));
handleError(
cudaMemcpy(cellValues.data(), devCellValues, devCellValuesSize, cudaMemcpyDeviceToHost));
cudaFree(devMinutiae);
cudaFree(devArea);
cudaFree(devCylinderValidities);
cudaFree(devCellValidities);
cudaFree(devCellValues);
}
|
fa6f3aa29062eac04ba05ebe8a51d57a035877a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#ifdef __HIPCC__
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // __HIPCC__
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // __HIPCC__
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
c10::hip::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::hip::compat::sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( Func##CUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( SinCosCUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call hipblasHgemm
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 || defined(__HIPCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIPCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
HIP_R_16F,
ldb,
A_device.data().get(),
HIP_R_16F,
lda,
&beta,
C_device.data().get(),
HIP_R_16F,
ldc,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
B_stride,
A,
HIP_R_16F,
lda,
A_stride,
&beta,
C,
HIP_R_16F,
ldc,
C_stride,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
lda,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
#endif
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
hipLaunchKernelGGL(( SetKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>(
const size_t N,
const at::Half alpha,
at::Half* Y,
CUDAContext* context) {
if (N > 0) {
hipLaunchKernelGGL(( SetKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
// execute with 32-bit math
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
y,
HIP_R_16F,
HIP_R_32F));
#endif
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, hipblasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, hipblasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
hipMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#ifndef __HIPCC__
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_16F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
hipMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasScalEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
y,
HIP_R_16F,
1,
HIP_R_32F));
}
#else // __HIPCC__
namespace {
template <>
__global__ void ScaleCUDAKernel<at::Half, at::Half>(
const int n,
const at::Half alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) *
convert::To<at::Half, float>(alpha));
}
}
template <>
__global__ void ScaleCUDAKernel<at::Half, at::Half>(
const int n,
const at::Half* alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) *
convert::To<at::Half, float>(*alpha));
}
}
template <>
__global__ void ScaleCUDAKernel<float, at::Half>(
const int n,
const float alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * alpha);
}
}
template <>
__global__ void ScaleCUDAKernel<float, at::Half>(
const int n,
const float* alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * (*alpha));
}
}
} // namespace
CAFFE2_SPECIALIZED_HIP_SCALE(at::Half, at::Half)
CAFFE2_SPECIALIZED_HIP_SCALE(float, at::Half)
#endif // __HIPCC__
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
#endif
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
hipMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FIXED_DIVISOR, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseReduceKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
hipMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
hipMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
hipLaunchKernelGGL(( ColwiseMomentsCUDAKernel<T>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
// Splits the original matrix into submatrices with size 32 * 32.
// Each block transposes one submatrix by loading it into shared memory.
// Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
template <typename T>
__global__ void BatchTranspose2DCUDAKernel(
const int N,
const int H,
const int W,
const T* X,
T* Y) {
__shared__ T tile[kTileDim][kTileDim + 1];
const int h = (H + kTileDim - 1) / kTileDim;
const int w = (W + kTileDim - 1) / kTileDim;
const int outer_size = N * h * w;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int n = i / (h * w);
const int k = i % (h * w);
const int r = k / w;
const int c = k % w;
const int offset = n * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int j = 0; j < kTileDim && y + j < H; j += kBlockRows) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
tile[threadIdx.y + j][threadIdx.x] =
__ldg(X + offset + (y + j) * W + x);
#else
tile[threadIdx.y + j][threadIdx.x] = X[offset + (y + j) * W + x];
#endif
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int j = 0; j < kTileDim && y + j < W; j += kBlockRows) {
Y[offset + (y + j) * H + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template CopySameDevice<T>(size, X, Y); \
return; \
} \
if (utils::IsBatchTranspose2D(ndim, axes)) { \
const int N = \
std::accumulate(dims, dims + ndim - 2, 1, std::multiplies<int>()); \
const int H = dims[ndim - 2]; \
const int W = dims[ndim - 1]; \
const int h = (H + kTileDim - 1) / kTileDim; \
const int w = (W + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \
dim_block, \
0, \
context->cuda_stream(), N, H, W, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
hipLaunchKernelGGL(( AffineChannelCUDAKernel<T, kOrder>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
#define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \
template <> \
CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
const int h = (C + kTileDim - 1) / kTileDim; \
const int w = (HxW + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \
dim_block, \
0, \
context->cuda_stream(), N, C, HxW, X, Y); \
}
CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float)
#undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC
#define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \
template <> \
CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
const int h = (HxW + kTileDim - 1) / kTileDim; \
const int w = (C + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<T>) \
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), \
dim_block, \
0, \
context->cuda_stream(), N, HxW, C, X, Y); \
}
CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float)
#undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW
} // namespace math
} // namespace caffe2
| fa6f3aa29062eac04ba05ebe8a51d57a035877a7.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#ifdef __HIPCC__
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // __HIPCC__
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // __HIPCC__
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
c10::cuda::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::cuda::compat::sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
Func##CUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
SinCosCUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call cublasHgemm
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 || defined(__HIPCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIPCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
CUDA_R_16F,
ldb,
A_device.data().get(),
CUDA_R_16F,
lda,
&beta,
C_device.data().get(),
CUDA_R_16F,
ldc,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
B_stride,
A,
CUDA_R_16F,
lda,
A_stride,
&beta,
C,
CUDA_R_16F,
ldc,
C_stride,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
B_stride,
(const __half*)A,
lda,
A_stride,
&beta_fp16,
(__half*)C,
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
lda,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
ldc));
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
lda,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
#endif
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_API void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream()); \
} else { \
SetKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
template <>
CAFFE2_CUDA_EXPORT void Set<at::Half, CUDAContext>(
const size_t N,
const at::Half alpha,
at::Half* Y,
CUDAContext* context) {
if (N > 0) {
SetKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, Y);
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
// execute with 32-bit math
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
y,
CUDA_R_16F,
CUDA_R_32F));
#endif
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
SelectKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
y[i] = __ldg(x + i) * static_cast<TData>(alpha);
#else
y[i] = x[i] * static_cast<TData>(alpha);
#endif
}
}
template <typename TAlpha, typename TData>
__global__ void
ScaleCUDAKernel(const int n, const TAlpha* alpha, const TData* x, TData* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
y[i] = __ldg(x + i) * static_cast<TData>(__ldg(alpha));
#else
y[i] = x[i] * static_cast<TData>(*alpha);
#endif
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
#define DELEGATE_CUBLAS_SCALE_FUNCTION(TAlpha, TData, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
if (alpha != TAlpha(1)) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, y, 1)); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, y, 1)); \
}
DELEGATE_CUBLAS_SCALE_FUNCTION(float, float, cublasSscal)
DELEGATE_CUBLAS_SCALE_FUNCTION(double, double, cublasDscal)
#undef DELEGATE_CUBLAS_SCALE_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == TAlpha(1)) { \
if (x != y) { \
cudaMemcpyAsync( \
y, \
x, \
sizeof(TData) * N, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
} \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const int N, \
const TAlpha* alpha, \
const TData* x, \
TData* y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
ScaleCUDAKernel<TAlpha, TData> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, x, y); \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#ifndef __HIPCC__
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<at::Half, at::Half, CUDAContext>(
const int N,
const at::Half* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_16F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
if (alpha != 1.0f) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
}
template <>
CAFFE2_CUDA_EXPORT void Scale<float, at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* x,
at::Half* y,
CUDAContext* context) {
if (N == 0) {
return;
}
if (x != y) {
cudaMemcpyAsync(
y,
x,
sizeof(at::Half) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasScalEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
#else // __HIPCC__
namespace {
template <>
__global__ void ScaleCUDAKernel<at::Half, at::Half>(
const int n,
const at::Half alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) *
convert::To<at::Half, float>(alpha));
}
}
template <>
__global__ void ScaleCUDAKernel<at::Half, at::Half>(
const int n,
const at::Half* alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) *
convert::To<at::Half, float>(*alpha));
}
}
template <>
__global__ void ScaleCUDAKernel<float, at::Half>(
const int n,
const float alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * alpha);
}
}
template <>
__global__ void ScaleCUDAKernel<float, at::Half>(
const int n,
const float* alpha,
const at::Half* x,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * (*alpha));
}
}
} // namespace
CAFFE2_SPECIALIZED_HIP_SCALE(at::Half, at::Half)
CAFFE2_SPECIALIZED_HIP_SCALE(float, at::Half)
#endif // __HIPCC__
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIPCC__) && !ROCBLAS_FP16
CAFFE_THROW("HIP currently does not support FP16 yet.");
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
#endif
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
cudaMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FIXED_DIVISOR, D> Y_dims,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
X_strides,
Y_dims,
reducer,
init,
alpha,
X,
Y);
}
template <typename T, class Reducer>
CAFFE2_CUDA_EXPORT void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T init,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, alpha * init, Y, context);
return;
}
if (alpha == T(0)) {
Set<T, CUDAContext>(Y_size, T(0), Y, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
Scale<T, T, CUDAContext>(X_size, alpha, X, Y, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseReduceKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, reducer, init, alpha, X, Y);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int outer_size = Y_size;
const int inner_size = X_size / Y_size;
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
alpha,
X,
Y,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
CAFFE2_CUDA_EXPORT void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
int scale = 1; \
for (int i = 0; i < num_axes; ++i) { \
scale *= dims[axes[i]]; \
} \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Sum(), \
T(0), \
alpha / static_cast<T>(scale), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[d], Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
CAFFE2_CUDA_EXPORT void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size =
std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size =
std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T, CUDAContext>(Y_size, T(0), mean, context);
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
cudaMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
Set<T, CUDAContext>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (utils::IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
if (utils::IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
CAFFE2_CUDA_EXPORT void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
namespace {
constexpr int kTileDim = 32;
constexpr int kBlockRows = 8;
// Splits the original matrix into submatrices with size 32 * 32.
// Each block transposes one submatrix by loading it into shared memory.
// Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
template <typename T>
__global__ void BatchTranspose2DCUDAKernel(
const int N,
const int H,
const int W,
const T* X,
T* Y) {
__shared__ T tile[kTileDim][kTileDim + 1];
const int h = (H + kTileDim - 1) / kTileDim;
const int w = (W + kTileDim - 1) / kTileDim;
const int outer_size = N * h * w;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
const int n = i / (h * w);
const int k = i % (h * w);
const int r = k / w;
const int c = k % w;
const int offset = n * H * W;
int x = c * kTileDim + threadIdx.x;
int y = r * kTileDim + threadIdx.y;
if (x < W) {
for (int j = 0; j < kTileDim && y + j < H; j += kBlockRows) {
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
tile[threadIdx.y + j][threadIdx.x] =
__ldg(X + offset + (y + j) * W + x);
#else
tile[threadIdx.y + j][threadIdx.x] = X[offset + (y + j) * W + x];
#endif
}
}
__syncthreads();
x = r * kTileDim + threadIdx.x;
y = c * kTileDim + threadIdx.y;
if (x < H) {
for (int j = 0; j < kTileDim && y + j < W; j += kBlockRows) {
Y[offset + (y + j) * H + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FIXED_DIVISOR, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FIXED_DIVISOR(dims[axes[i]]);
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
CAFFE2_CUDA_EXPORT void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template CopySameDevice<T>(size, X, Y); \
return; \
} \
if (utils::IsBatchTranspose2D(ndim, axes)) { \
const int N = \
std::accumulate(dims, dims + ndim - 2, 1, std::multiplies<int>()); \
const int H = dims[ndim - 2]; \
const int W = dims[ndim - 1]; \
const int h = (H + kTileDim - 1) / kTileDim; \
const int w = (W + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
BatchTranspose2DCUDAKernel<T> \
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \
dim_block, \
0, \
context->cuda_stream()>>>(N, H, W, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
namespace {
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelCUDAKernel(
const int size,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
const int c = kOrder == StorageOrder::NCHW ? i / HxW % C : i % C;
#if __CUDA_ARCH__ >= 350 || defined(__HIPCC__)
Y[i] = __ldg(scale + c) * __ldg(X + i) + __ldg(bias + c);
#else
Y[i] = scale[c] * X[i] + bias[c];
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T, kOrder) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, kOrder>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int size = N * C * HxW; \
AffineChannelCUDAKernel<T, kOrder> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, C, HxW, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NCHW)
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float, StorageOrder::NHWC)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
#define CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(T) \
template <> \
CAFFE2_CUDA_EXPORT void NCHW2NHWC<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
const int h = (C + kTileDim - 1) / kTileDim; \
const int w = (HxW + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
BatchTranspose2DCUDAKernel<T> \
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \
dim_block, \
0, \
context->cuda_stream()>>>(N, C, HxW, X, Y); \
}
CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC(float)
#undef CAFFE2_SPECIALIZED_CUDA_NCHW2NHWC
#define CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(T) \
template <> \
CAFFE2_CUDA_EXPORT void NHWC2NCHW<T, CUDAContext>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
T* Y, \
CUDAContext* context) { \
const int h = (HxW + kTileDim - 1) / kTileDim; \
const int w = (C + kTileDim - 1) / kTileDim; \
const int outer_size = N * h * w; \
const dim3 dim_block(kTileDim, kBlockRows, 1); \
BatchTranspose2DCUDAKernel<T> \
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), \
dim_block, \
0, \
context->cuda_stream()>>>(N, HxW, C, X, Y); \
}
CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW(float)
#undef CAFFE2_SPECIALIZED_CUDA_NHWC2NCHW
} // namespace math
} // namespace caffe2
|
7e1fe0ca463a6e09d75af4696031a7e6f5a45b93.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*7];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0, xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*6+l0*6+l*6+0];
float y2=xyz2[i*m*6+l0*6+l*6+1];
float z2=xyz2[i*m*6+l0*6+l*6+2];
float xn2=xyz2[i*m*6+l0*6+l*6+3];
float yn2=xyz2[i*m*6+l0*6+l*6+4];
float zn2=xyz2[i*m*6+l0*6+l*6+5];
buf[l*7+0]=x2;
buf[l*7+1]=y2;
buf[l*7+2]=z2;
buf[l*7+3]=xn2;
buf[l*7+4]=yn2;
buf[l*7+5]=zn2;
buf[l*7+6]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*7+0];
float y2=buf[l*7+1];
float z2=buf[l*7+2];
float xn2=buf[l*7+3];
float yn2=buf[l*7+4];
float zn2=buf[l*7+5];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2));
float w=__expf(d)*buf[l*7+6];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0,xn2=0,yn2=0,zn2=0;
if (l<m){
x2=xyz2[i*m*6+l*6+0];
y2=xyz2[i*m*6+l*6+1];
z2=xyz2[i*m*6+l*6+2];
xn2=xyz2[i*m*6+l*6+3];
yn2=xyz2[i*m*6+l*6+4];
zn2=xyz2[i*m*6+l*6+5];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*7+0]=xyz1[i*n*6+k0*6+k*6+0];
buf[k*7+1]=xyz1[i*n*6+k0*6+k*6+1];
buf[k*7+2]=xyz1[i*n*6+k0*6+k*6+2];
buf[k*7+3]=xyz1[i*n*6+k0*6+k*6+3];
buf[k*7+4]=xyz1[i*n*6+k0*6+k*6+4];
buf[k*7+5]=xyz1[i*n*6+k0*6+k*6+5];
buf[k*7+6]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*7+0];
float y1=buf[k*7+1];
float z1=buf[k*7+2];
float xn1=buf[k*7+3];
float yn1=buf[k*7+4];
float zn1=buf[k*7+5];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2)))*buf[k*7+6];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0,xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*7+0]=xyz2[i*m*6+l0*6+l*6+0];
buf[l*7+1]=xyz2[i*m*6+l0*6+l*6+1];
buf[l*7+2]=xyz2[i*m*6+l0*6+l*6+2];
buf[l*7+3]=xyz2[i*m*6+l0*6+l*6+3];
buf[l*7+4]=xyz2[i*m*6+l0*6+l*6+4];
buf[l*7+5]=xyz2[i*m*6+l0*6+l*6+5];
buf[l*7+6]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*7+0];
float y2=buf[l*7+1];
float z2=buf[l*7+2];
float xn2=buf[l*7+3];
float yn2=buf[l*7+4];
float zn2=buf[l*7+5];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2)))*rl*buf[l*7+6];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
__syncthreads();
}
}
}
int approxmatch_cuda_forward(int b,int n,int m, const float * xyz1, const float * xyz2, float * match, float * temp){
hipLaunchKernelGGL(( approxmatch), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,temp);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in matching: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=1024;
__shared__ float buf[Block*6];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0, xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*6;l+=blockDim.x)
buf[l]=xyz2[i*m*6+l0*6+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*6+0];
float y2=buf[l*6+1];
float z2=buf[l*6+2];
float xn2=buf[l*6+3];
float yn2=buf[l*6+4];
float zn2=buf[l*6+5];
float d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
int matchcost_cuda_forward(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
hipLaunchKernelGGL(( matchcost), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,out);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in emd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*6];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*6+0];
float y2=xyz2[(i*m+k)*6+1];
float z2=xyz2[(i*m+k)*6+2];
float xn2=xyz2[(i*m+k)*6+3];
float yn2=xyz2[(i*m+k)*6+4];
float zn2=xyz2[(i*m+k)*6+5];
float subsumx=0,subsumy=0,subsumz=0,subsumxn=0,subsumyn=0,subsumzn=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*6+0];
float y1=y2-xyz1[(i*n+j)*6+1];
float z1=z2-xyz1[(i*n+j)*6+2];
float xn1=xn2-xyz1[(i*n+j)*6+3];
float yn1=yn2-xyz1[(i*n+j)*6+4];
float zn1=zn2-xyz1[(i*n+j)*6+5];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1+xn1*xn1+yn1*yn1+zn1*zn1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
subsumxn+=xn1*d;
subsumyn+=yn1*d;
subsumzn+=zn1*d;
}
sum_grad[threadIdx.x*6+0]=subsumx;
sum_grad[threadIdx.x*6+1]=subsumy;
sum_grad[threadIdx.x*6+2]=subsumz;
sum_grad[threadIdx.x*6+3]=subsumxn;
sum_grad[threadIdx.x*6+4]=subsumyn;
sum_grad[threadIdx.x*6+5]=subsumzn;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*6+0]+=sum_grad[j2*6+0];
sum_grad[j1*6+1]+=sum_grad[j2*6+1];
sum_grad[j1*6+2]+=sum_grad[j2*6+2];
sum_grad[j1*6+3]+=sum_grad[j2*6+3];
sum_grad[j1*6+4]+=sum_grad[j2*6+4];
sum_grad[j1*6+5]+=sum_grad[j2*6+5];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*6+0]=sum_grad[0];
grad2[(i*m+k)*6+1]=sum_grad[1];
grad2[(i*m+k)*6+2]=sum_grad[2];
grad2[(i*m+k)*6+3]=sum_grad[3];
grad2[(i*m+k)*6+4]=sum_grad[4];
grad2[(i*m+k)*6+5]=sum_grad[5];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*6+l*6+0];
float y1=xyz1[i*n*6+l*6+1];
float z1=xyz1[i*n*6+l*6+2];
float xn1=xyz1[i*n*6+l*6+3];
float yn1=xyz1[i*n*6+l*6+4];
float zn1=xyz1[i*n*6+l*6+5];
float dx=0,dy=0,dz=0,dxn=0,dyn=0,dzn=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*6+k*6+0];
float y2=xyz2[i*m*6+k*6+1];
float z2=xyz2[i*m*6+k*6+2];
float xn2=xyz2[i*m*6+k*6+3];
float yn2=xyz2[i*m*6+k*6+4];
float zn2=xyz2[i*m*6+k*6+5];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
dxn+=(xn1-xn2)*d;
dyn+=(yn1-yn2)*d;
dzn+=(zn1-zn2)*d;
}
grad1[i*n*6+l*6+0]=dx;
grad1[i*n*6+l*6+1]=dy;
grad1[i*n*6+l*6+2]=dz;
grad1[i*n*6+l*6+3]=dxn;
grad1[i*n*6+l*6+4]=dyn;
grad1[i*n*6+l*6+5]=dzn;
}
}
}
int matchcost_cuda_backward(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2){
hipLaunchKernelGGL(( matchcostgrad1), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,grad1);
hipLaunchKernelGGL(( matchcostgrad2), dim3(dim3(32,32)),dim3(256), 0, 0, b,n,m,xyz1,xyz2,match,grad2);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in emd backward: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| 7e1fe0ca463a6e09d75af4696031a7e6f5a45b93.cu | #include <torch/extension.h>
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
float multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ float buf[Block*7];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
float level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0, xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
float suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
float x2=xyz2[i*m*6+l0*6+l*6+0];
float y2=xyz2[i*m*6+l0*6+l*6+1];
float z2=xyz2[i*m*6+l0*6+l*6+2];
float xn2=xyz2[i*m*6+l0*6+l*6+3];
float yn2=xyz2[i*m*6+l0*6+l*6+4];
float zn2=xyz2[i*m*6+l0*6+l*6+5];
buf[l*7+0]=x2;
buf[l*7+1]=y2;
buf[l*7+2]=z2;
buf[l*7+3]=xn2;
buf[l*7+4]=yn2;
buf[l*7+5]=zn2;
buf[l*7+6]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
float x2=buf[l*7+0];
float y2=buf[l*7+1];
float z2=buf[l*7+2];
float xn2=buf[l*7+3];
float yn2=buf[l*7+4];
float zn2=buf[l*7+5];
float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2));
float w=__expf(d)*buf[l*7+6];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
float x2=0,y2=0,z2=0,xn2=0,yn2=0,zn2=0;
if (l<m){
x2=xyz2[i*m*6+l*6+0];
y2=xyz2[i*m*6+l*6+1];
z2=xyz2[i*m*6+l*6+2];
xn2=xyz2[i*m*6+l*6+3];
yn2=xyz2[i*m*6+l*6+4];
zn2=xyz2[i*m*6+l*6+5];
}
float sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*7+0]=xyz1[i*n*6+k0*6+k*6+0];
buf[k*7+1]=xyz1[i*n*6+k0*6+k*6+1];
buf[k*7+2]=xyz1[i*n*6+k0*6+k*6+2];
buf[k*7+3]=xyz1[i*n*6+k0*6+k*6+3];
buf[k*7+4]=xyz1[i*n*6+k0*6+k*6+4];
buf[k*7+5]=xyz1[i*n*6+k0*6+k*6+5];
buf[k*7+6]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
float x1=buf[k*7+0];
float y1=buf[k*7+1];
float z1=buf[k*7+2];
float xn1=buf[k*7+3];
float yn1=buf[k*7+4];
float zn1=buf[k*7+5];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2)))*buf[k*7+6];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0,xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
float suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*7+0]=xyz2[i*m*6+l0*6+l*6+0];
buf[l*7+1]=xyz2[i*m*6+l0*6+l*6+1];
buf[l*7+2]=xyz2[i*m*6+l0*6+l*6+2];
buf[l*7+3]=xyz2[i*m*6+l0*6+l*6+3];
buf[l*7+4]=xyz2[i*m*6+l0*6+l*6+4];
buf[l*7+5]=xyz2[i*m*6+l0*6+l*6+5];
buf[l*7+6]=ratioR[l0+l];
}
__syncthreads();
float rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*7+0];
float y2=buf[l*7+1];
float z2=buf[l*7+2];
float xn2=buf[l*7+3];
float yn2=buf[l*7+4];
float zn2=buf[l*7+5];
float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2)))*rl*buf[l*7+6];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
__syncthreads();
}
}
}
int approxmatch_cuda_forward(int b,int n,int m, const float * xyz1, const float * xyz2, float * match, float * temp){
approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match,temp);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in matching: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
__shared__ float allsum[512];
const int Block=1024;
__shared__ float buf[Block*6];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
float x1=0,y1=0,z1=0, xn1=0,yn1=0,zn1=0;
if (k<n){
x1=xyz1[i*n*6+k*6+0];
y1=xyz1[i*n*6+k*6+1];
z1=xyz1[i*n*6+k*6+2];
xn1=xyz1[i*n*6+k*6+3];
yn1=xyz1[i*n*6+k*6+4];
zn1=xyz1[i*n*6+k*6+5];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*6;l+=blockDim.x)
buf[l]=xyz2[i*m*6+l0*6+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
float x2=buf[l*6+0];
float y2=buf[l*6+1];
float z2=buf[l*6+2];
float xn2=buf[l*6+3];
float yn2=buf[l*6+4];
float zn2=buf[l*6+5];
float d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
int matchcost_cuda_forward(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){
matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in emd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
__shared__ float sum_grad[256*6];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
float x2=xyz2[(i*m+k)*6+0];
float y2=xyz2[(i*m+k)*6+1];
float z2=xyz2[(i*m+k)*6+2];
float xn2=xyz2[(i*m+k)*6+3];
float yn2=xyz2[(i*m+k)*6+4];
float zn2=xyz2[(i*m+k)*6+5];
float subsumx=0,subsumy=0,subsumz=0,subsumxn=0,subsumyn=0,subsumzn=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
float x1=x2-xyz1[(i*n+j)*6+0];
float y1=y2-xyz1[(i*n+j)*6+1];
float z1=z2-xyz1[(i*n+j)*6+2];
float xn1=xn2-xyz1[(i*n+j)*6+3];
float yn1=yn2-xyz1[(i*n+j)*6+4];
float zn1=zn2-xyz1[(i*n+j)*6+5];
float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1+xn1*xn1+yn1*yn1+zn1*zn1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
subsumxn+=xn1*d;
subsumyn+=yn1*d;
subsumzn+=zn1*d;
}
sum_grad[threadIdx.x*6+0]=subsumx;
sum_grad[threadIdx.x*6+1]=subsumy;
sum_grad[threadIdx.x*6+2]=subsumz;
sum_grad[threadIdx.x*6+3]=subsumxn;
sum_grad[threadIdx.x*6+4]=subsumyn;
sum_grad[threadIdx.x*6+5]=subsumzn;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*6+0]+=sum_grad[j2*6+0];
sum_grad[j1*6+1]+=sum_grad[j2*6+1];
sum_grad[j1*6+2]+=sum_grad[j2*6+2];
sum_grad[j1*6+3]+=sum_grad[j2*6+3];
sum_grad[j1*6+4]+=sum_grad[j2*6+4];
sum_grad[j1*6+5]+=sum_grad[j2*6+5];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*6+0]=sum_grad[0];
grad2[(i*m+k)*6+1]=sum_grad[1];
grad2[(i*m+k)*6+2]=sum_grad[2];
grad2[(i*m+k)*6+3]=sum_grad[3];
grad2[(i*m+k)*6+4]=sum_grad[4];
grad2[(i*m+k)*6+5]=sum_grad[5];
}
__syncthreads();
}
}
}
__global__ void matchcostgrad1(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
float x1=xyz1[i*n*6+l*6+0];
float y1=xyz1[i*n*6+l*6+1];
float z1=xyz1[i*n*6+l*6+2];
float xn1=xyz1[i*n*6+l*6+3];
float yn1=xyz1[i*n*6+l*6+4];
float zn1=xyz1[i*n*6+l*6+5];
float dx=0,dy=0,dz=0,dxn=0,dyn=0,dzn=0;
for (int k=0;k<m;k++){
float x2=xyz2[i*m*6+k*6+0];
float y2=xyz2[i*m*6+k*6+1];
float z2=xyz2[i*m*6+k*6+2];
float xn2=xyz2[i*m*6+k*6+3];
float yn2=xyz2[i*m*6+k*6+4];
float zn2=xyz2[i*m*6+k*6+5];
float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)+(xn1-xn2)*(xn1-xn2)+(yn1-yn2)*(yn1-yn2)+(zn1-zn2)*(zn1-zn2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
dxn+=(xn1-xn2)*d;
dyn+=(yn1-yn2)*d;
dzn+=(zn1-zn2)*d;
}
grad1[i*n*6+l*6+0]=dx;
grad1[i*n*6+l*6+1]=dy;
grad1[i*n*6+l*6+2]=dz;
grad1[i*n*6+l*6+3]=dxn;
grad1[i*n*6+l*6+4]=dyn;
grad1[i*n*6+l*6+5]=dzn;
}
}
}
int matchcost_cuda_backward(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2){
matchcostgrad1<<<32,512>>>(b,n,m,xyz1,xyz2,match,grad1);
matchcostgrad2<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in emd backward: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
6afb0a46643141d35225003b3ce3757546a8d944.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool2d_backward_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/max_unpool3d_backward_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at {
namespace native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_forward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.numel(),
self.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_forward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_backward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_backward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output.data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace native
} // namespace at
| 6afb0a46643141d35225003b3ce3757546a8d944.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool2d_backward_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/max_unpool3d_backward_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at {
namespace native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
max_unpooling2d_forward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.numel(),
self.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_forward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
max_unpooling2d_backward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_backward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output.data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace native
} // namespace at
|
VectorAddtionStreams.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Vector Addition with Streams (Extra Credit)
// Hard deadline : Thu 26 Mar 2015 6:00 AM CST
#include <wb.h>
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<len) out[i]=in1[i]+in2[i];
}
int main(int argc, char ** argv) {
// multi-stream host code
hipStream_t stream0,stream1,stream2,stream3;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
wbArg_t args;
int inputLength;
float *h_A,*h_B,*h_C;
float *d_A0,*d_B0,*d_C0; // stream 0
float *d_A1,*d_B1,*d_C1; // 1
float *d_A2,*d_B2,*d_C2; // stream 2
float *d_A3,*d_B3,*d_C3; // 3
int n;
int size;
int SegSize;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
h_A = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
h_B = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
h_C = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
n=inputLength;
SegSize=inputLength/4;
size=n*sizeof(float);
wbCheck(hipMalloc((void **) &d_A0, size));
wbCheck(hipMalloc((void **) &d_B0, size));
wbCheck(hipMalloc((void **) &d_C0, size));
wbCheck(hipMalloc((void **) &d_A1, size));
wbCheck(hipMalloc((void **) &d_B1, size));
wbCheck(hipMalloc((void **) &d_C1, size));
wbCheck(hipMalloc((void **) &d_A2, size));
wbCheck(hipMalloc((void **) &d_B2, size));
wbCheck(hipMalloc((void **) &d_C2, size));
wbCheck(hipMalloc((void **) &d_A3, size));
wbCheck(hipMalloc((void **) &d_B3, size));
wbCheck(hipMalloc((void **) &d_C3, size));
// dim
dim3 DimGrid((n-1)/256+1,1,1);
dim3 DimBlock(256,1,1);
for(int i=0;i<n;i+=SegSize*4)
{
hipMemcpyAsync(d_A0,h_A+i,SegSize*sizeof(float),hipMemcpyHostToDevice,stream0);
hipMemcpyAsync(d_B0,h_B+i,SegSize*sizeof(float),hipMemcpyHostToDevice,stream0);
hipMemcpyAsync(d_A1+i,h_A+i+SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream1);
hipMemcpyAsync(d_B1+i,h_B+i+SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream1);
hipMemcpyAsync(d_A2,h_A+i+2*SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream2);
hipMemcpyAsync(d_B2,h_B+i+2*SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream2);
hipMemcpyAsync(d_A3+i,h_A+i+3*SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream3);
hipMemcpyAsync(d_B3+i,h_B+i+3*SegSize,SegSize*sizeof(float),hipMemcpyHostToDevice,stream3);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(256),0,stream0, d_A0,d_B0,d_C0,n);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(256),0,stream1, d_A1,d_B1,d_C1,n);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(256),0,stream2, d_A2,d_B2,d_C2,n);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(256),0,stream3, d_A3,d_B3,d_C3,n);
hipMemcpyAsync(h_C+i,d_C0,SegSize*sizeof(float),hipMemcpyDeviceToHost,stream0);
hipMemcpyAsync(h_C+i+SegSize,d_C1,SegSize*sizeof(float),hipMemcpyDeviceToHost,stream1);
hipMemcpyAsync(h_C+i+2*SegSize,d_C2,SegSize*sizeof(float),hipMemcpyDeviceToHost,stream2);
hipMemcpyAsync(h_C+i+3*SegSize,d_C3,SegSize*sizeof(float),hipMemcpyDeviceToHost,stream3);
}
hipFree(d_A0);
hipFree(d_B0);
hipFree(d_C0);
hipFree(d_A1);
hipFree(d_B1);
hipFree(d_C1);
hipFree(d_A2);
hipFree(d_B2);
hipFree(d_C2);
hipFree(d_A3);
hipFree(d_B3);
hipFree(d_C3);
wbSolution(args, h_C, inputLength);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
| VectorAddtionStreams.cu | // Vector Addition with Streams (Extra Credit)
// Hard deadline : Thu 26 Mar 2015 6:00 AM CST
#include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<len) out[i]=in1[i]+in2[i];
}
int main(int argc, char ** argv) {
// multi-stream host code
cudaStream_t stream0,stream1,stream2,stream3;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
wbArg_t args;
int inputLength;
float *h_A,*h_B,*h_C;
float *d_A0,*d_B0,*d_C0; // stream 0
float *d_A1,*d_B1,*d_C1; // 1
float *d_A2,*d_B2,*d_C2; // stream 2
float *d_A3,*d_B3,*d_C3; // 3
int n;
int size;
int SegSize;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
h_A = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
h_B = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
h_C = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
n=inputLength;
SegSize=inputLength/4;
size=n*sizeof(float);
wbCheck(cudaMalloc((void **) &d_A0, size));
wbCheck(cudaMalloc((void **) &d_B0, size));
wbCheck(cudaMalloc((void **) &d_C0, size));
wbCheck(cudaMalloc((void **) &d_A1, size));
wbCheck(cudaMalloc((void **) &d_B1, size));
wbCheck(cudaMalloc((void **) &d_C1, size));
wbCheck(cudaMalloc((void **) &d_A2, size));
wbCheck(cudaMalloc((void **) &d_B2, size));
wbCheck(cudaMalloc((void **) &d_C2, size));
wbCheck(cudaMalloc((void **) &d_A3, size));
wbCheck(cudaMalloc((void **) &d_B3, size));
wbCheck(cudaMalloc((void **) &d_C3, size));
// dim
dim3 DimGrid((n-1)/256+1,1,1);
dim3 DimBlock(256,1,1);
for(int i=0;i<n;i+=SegSize*4)
{
cudaMemcpyAsync(d_A0,h_A+i,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream0);
cudaMemcpyAsync(d_B0,h_B+i,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream0);
cudaMemcpyAsync(d_A1+i,h_A+i+SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_B1+i,h_B+i+SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_A2,h_A+i+2*SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream2);
cudaMemcpyAsync(d_B2,h_B+i+2*SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream2);
cudaMemcpyAsync(d_A3+i,h_A+i+3*SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream3);
cudaMemcpyAsync(d_B3+i,h_B+i+3*SegSize,SegSize*sizeof(float),cudaMemcpyHostToDevice,stream3);
vecAdd<<<DimGrid,256,0,stream0>>>(d_A0,d_B0,d_C0,n);
vecAdd<<<DimGrid,256,0,stream1>>>(d_A1,d_B1,d_C1,n);
vecAdd<<<DimGrid,256,0,stream2>>>(d_A2,d_B2,d_C2,n);
vecAdd<<<DimGrid,256,0,stream3>>>(d_A3,d_B3,d_C3,n);
cudaMemcpyAsync(h_C+i,d_C0,SegSize*sizeof(float),cudaMemcpyDeviceToHost,stream0);
cudaMemcpyAsync(h_C+i+SegSize,d_C1,SegSize*sizeof(float),cudaMemcpyDeviceToHost,stream1);
cudaMemcpyAsync(h_C+i+2*SegSize,d_C2,SegSize*sizeof(float),cudaMemcpyDeviceToHost,stream2);
cudaMemcpyAsync(h_C+i+3*SegSize,d_C3,SegSize*sizeof(float),cudaMemcpyDeviceToHost,stream3);
}
cudaFree(d_A0);
cudaFree(d_B0);
cudaFree(d_C0);
cudaFree(d_A1);
cudaFree(d_B1);
cudaFree(d_C1);
cudaFree(d_A2);
cudaFree(d_B2);
cudaFree(d_C2);
cudaFree(d_A3);
cudaFree(d_B3);
cudaFree(d_C3);
wbSolution(args, h_C, inputLength);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
5528ff4aa03d9364ad22f518cfcf482bd0fd7a08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cmath>
#define N 2048
#define THREADS 32
#define BLOCKS (N / THREADS)
#define SHARED_MEM_SIZE 2048
__global__ void matrixMul(const int *a, const int *b, int *c, size_t N) {
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
// Allocate shared (cache) memory
__shared__ int s_a[SHARED_MEM_SIZE];
__shared__ int s_b[SHARED_MEM_SIZE];
/* Shared (L1 Cache) memory capacity is very limited, so in order not
* to get over its boundaries we will divide our multiplication on tiles.
*
* In other words, let us have two vectors:
* a1 = [2, 4, 3, 1]
* b1 = [2, 4, 3, 1]T (where T means Transposed)
* We divide these vectors into 2 tiles, [2, 4], [3, 1] and [2, 4]T, [3, 1]T
* Then we load these first tiles of vectors A and B
* ([2, 4] and [2, 4]T) into s_a and s_b respectively,
* and then do the multiplication [2, 4] * [2, 4]T that we be written in temp. */
int temp = 0
for (size_t tile = 0; tile < N; tile += blockDim.x) {
// Every single thread will load a single element there
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int k = 0; k < blockDim.x; k++) {
// Accumulate results for a single element
temp += s_a[threadIdx.y * blockDim.x + k] * s_b[k * blockDim.x + threadIdx.x];
}
__syncthreads();
}
c[row * N + col] = temp;
}
void matrixMulHost(const int *a, const int *b, int *c, size_t N) {
for (size_t row_a = 0; row_a < N; row_a++) {
for (size_t col_b = 0; col_b < N; col_b++) {
for (size_t pair = 0; pair < N; pair++) {
c[row_a * N + col_b] += a[row_a * N + pair] * b[pair * N + col_b];
}
}
}
}
int main() {
int *a, *b, *c;
size_t size = N * N * sizeof(int);
// Allocate host memory
malloc(a, size);
malloc(b, size);
malloc(c, size);
// Initialize matrices
std::generate(a.begin(), a.end(), []() { return rand() % 100; });
std::generate(b.begin(), b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy matrices to device memory
hipMemcpy(d_a, a.data(), size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b.data(), size, hipMemcpyHostToDevice);
// dim3 struct is used for grids (as we have a 2D matrices)
dim3 thread_grid(THREADS, THREADS);
dim3 block_grid(BLOCKS, BLOCKS);
// Rum matrix multiplication on device
hipLaunchKernelGGL(( matrixMul), dim3(block_grid), dim3(thread_grid), 0, 0, d_a, d_b, d_c, N);
// Copy the data back to the host
hipMemcpy(c.data(), d_c, size, hipMemcpyDeviceToHost);
// Free memory on device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Free memory on host
free(a);
free(b);
free(c);
return 0;
}
| 5528ff4aa03d9364ad22f518cfcf482bd0fd7a08.cu | #include <iostream>
#include <vector>
#include <cmath>
#define N 2048
#define THREADS 32
#define BLOCKS (N / THREADS)
#define SHARED_MEM_SIZE 2048
__global__ void matrixMul(const int *a, const int *b, int *c, size_t N) {
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
// Allocate shared (cache) memory
__shared__ int s_a[SHARED_MEM_SIZE];
__shared__ int s_b[SHARED_MEM_SIZE];
/* Shared (L1 Cache) memory capacity is very limited, so in order not
* to get over its boundaries we will divide our multiplication on tiles.
*
* In other words, let us have two vectors:
* a1 = [2, 4, 3, 1]
* b1 = [2, 4, 3, 1]T (where T means Transposed)
* We divide these vectors into 2 tiles, [2, 4], [3, 1] and [2, 4]T, [3, 1]T
* Then we load these first tiles of vectors A and B
* ([2, 4] and [2, 4]T) into s_a and s_b respectively,
* and then do the multiplication [2, 4] * [2, 4]T that we be written in temp. */
int temp = 0
for (size_t tile = 0; tile < N; tile += blockDim.x) {
// Every single thread will load a single element there
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int k = 0; k < blockDim.x; k++) {
// Accumulate results for a single element
temp += s_a[threadIdx.y * blockDim.x + k] * s_b[k * blockDim.x + threadIdx.x];
}
__syncthreads();
}
c[row * N + col] = temp;
}
void matrixMulHost(const int *a, const int *b, int *c, size_t N) {
for (size_t row_a = 0; row_a < N; row_a++) {
for (size_t col_b = 0; col_b < N; col_b++) {
for (size_t pair = 0; pair < N; pair++) {
c[row_a * N + col_b] += a[row_a * N + pair] * b[pair * N + col_b];
}
}
}
}
int main() {
int *a, *b, *c;
size_t size = N * N * sizeof(int);
// Allocate host memory
malloc(a, size);
malloc(b, size);
malloc(c, size);
// Initialize matrices
std::generate(a.begin(), a.end(), []() { return rand() % 100; });
std::generate(b.begin(), b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy matrices to device memory
cudaMemcpy(d_a, a.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b.data(), size, cudaMemcpyHostToDevice);
// dim3 struct is used for grids (as we have a 2D matrices)
dim3 thread_grid(THREADS, THREADS);
dim3 block_grid(BLOCKS, BLOCKS);
// Rum matrix multiplication on device
matrixMul<<<block_grid, thread_grid>>>(d_a, d_b, d_c, N);
// Copy the data back to the host
cudaMemcpy(c.data(), d_c, size, cudaMemcpyDeviceToHost);
// Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Free memory on host
free(a);
free(b);
free(c);
return 0;
}
|
d4aaf6898bba4fa327c9bec6931fbcf7789ab47d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <ncs/cuda/CUDA.h>
#include <ncs/sim/CUDA.h>
#include "Izhikevich.cuh"
namespace cuda {
__device__ float dvdt(float v, float u, float current) {
return 0.04f * v * v + 5.0f * v + 140.0f - u + current;
}
__device__ float dudt(float v, float u, float a, float b) {
return a * (b * v - u);
}
__global__ void updateNeuronsKernel(const float* as,
const float* bs,
const float* cs,
const float* ds,
const float* thresholds,
const float* synaptic_current,
const float* input_current,
const float* old_u,
const float* old_v,
float* new_u,
float* new_v,
ncs::sim::Bit::Word* fire_vector,
float step_dt,
unsigned int num_neurons) {
extern __shared__ ncs::sim::Bit::Word shared_fire_vector[];
unsigned int& warp_result = shared_fire_vector[threadIdx.x];
unsigned int* result_vector_base = shared_fire_vector + warp::index() * 32;
unsigned int warp_thread = warp::thread();
unsigned int limit = math::ceiling(num_neurons, 32);
unsigned int mask = bit::mask(warp_thread);
for (size_t i = grid::thread(); i < limit; i += grid::stride()) {
warp_result = 0;
if (i < num_neurons) {
float a = as[i];
float b = bs[i];
float c = cs[i];
float d = ds[i];
float u = old_u[i];
float v = old_v[i];
float threshold = thresholds[i];
float current = input_current[i] + synaptic_current[i];
if (v >= threshold) {
v = c;
u += d;
warp_result = mask;
}
float step_v = v + step_dt * dvdt(v, u, current);
float step_u = u + step_dt * dudt(v, u, a, b);
v = step_v;
u = step_u;
step_v = v + step_dt * dvdt(v, u, current);
step_u = u + step_dt * dudt(v, u, a, b);
v = step_v;
u = step_u;
if (v >= threshold) {
v = threshold;
}
new_v[i] = v;
new_u[i] = u;
}
warp::reduceOr(result_vector_base, warp_thread);
if (warp::leader()) {
fire_vector[bit::word(i)] = warp_result;
}
}
}
bool updateNeurons(const float* as,
const float* bs,
const float* cs,
const float* ds,
const float* thresholds,
const float* synaptic_current,
const float* input_current,
const float* old_u,
const float* old_v,
float* new_u,
float* new_v,
ncs::sim::Bit::Word* fire_vector,
float step_dt,
unsigned int num_neurons) {
using ncs::sim::CUDA;
unsigned int threads_per_block = CUDA::getThreadsPerBlock(num_neurons);
unsigned int num_blocks = CUDA::getNumberOfBlocks(num_neurons);
unsigned int shared_memory = threads_per_block * sizeof(ncs::sim::Bit::Word);
hipLaunchKernelGGL(( updateNeuronsKernel), dim3(num_blocks),
dim3(threads_per_block),
shared_memory,
CUDA::getStream(), as,
bs,
cs,
ds,
thresholds,
synaptic_current,
input_current,
old_u,
old_v,
new_u,
new_v,
fire_vector,
step_dt,
num_neurons);
return CUDA::synchronize();
}
} // namespace cuda
| d4aaf6898bba4fa327c9bec6931fbcf7789ab47d.cu | #include <stdio.h>
#include <ncs/cuda/CUDA.h>
#include <ncs/sim/CUDA.h>
#include "Izhikevich.cuh"
namespace cuda {
__device__ float dvdt(float v, float u, float current) {
return 0.04f * v * v + 5.0f * v + 140.0f - u + current;
}
__device__ float dudt(float v, float u, float a, float b) {
return a * (b * v - u);
}
__global__ void updateNeuronsKernel(const float* as,
const float* bs,
const float* cs,
const float* ds,
const float* thresholds,
const float* synaptic_current,
const float* input_current,
const float* old_u,
const float* old_v,
float* new_u,
float* new_v,
ncs::sim::Bit::Word* fire_vector,
float step_dt,
unsigned int num_neurons) {
extern __shared__ ncs::sim::Bit::Word shared_fire_vector[];
unsigned int& warp_result = shared_fire_vector[threadIdx.x];
unsigned int* result_vector_base = shared_fire_vector + warp::index() * 32;
unsigned int warp_thread = warp::thread();
unsigned int limit = math::ceiling(num_neurons, 32);
unsigned int mask = bit::mask(warp_thread);
for (size_t i = grid::thread(); i < limit; i += grid::stride()) {
warp_result = 0;
if (i < num_neurons) {
float a = as[i];
float b = bs[i];
float c = cs[i];
float d = ds[i];
float u = old_u[i];
float v = old_v[i];
float threshold = thresholds[i];
float current = input_current[i] + synaptic_current[i];
if (v >= threshold) {
v = c;
u += d;
warp_result = mask;
}
float step_v = v + step_dt * dvdt(v, u, current);
float step_u = u + step_dt * dudt(v, u, a, b);
v = step_v;
u = step_u;
step_v = v + step_dt * dvdt(v, u, current);
step_u = u + step_dt * dudt(v, u, a, b);
v = step_v;
u = step_u;
if (v >= threshold) {
v = threshold;
}
new_v[i] = v;
new_u[i] = u;
}
warp::reduceOr(result_vector_base, warp_thread);
if (warp::leader()) {
fire_vector[bit::word(i)] = warp_result;
}
}
}
bool updateNeurons(const float* as,
const float* bs,
const float* cs,
const float* ds,
const float* thresholds,
const float* synaptic_current,
const float* input_current,
const float* old_u,
const float* old_v,
float* new_u,
float* new_v,
ncs::sim::Bit::Word* fire_vector,
float step_dt,
unsigned int num_neurons) {
using ncs::sim::CUDA;
unsigned int threads_per_block = CUDA::getThreadsPerBlock(num_neurons);
unsigned int num_blocks = CUDA::getNumberOfBlocks(num_neurons);
unsigned int shared_memory = threads_per_block * sizeof(ncs::sim::Bit::Word);
updateNeuronsKernel<<<num_blocks,
threads_per_block,
shared_memory,
CUDA::getStream()>>>(as,
bs,
cs,
ds,
thresholds,
synaptic_current,
input_current,
old_u,
old_v,
new_u,
new_v,
fire_vector,
step_dt,
num_neurons);
return CUDA::synchronize();
}
} // namespace cuda
|
fd831702d0e8ea287f1fe97c394c021fb29d5b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include <stdio.h>
// #include <hiprand/hiprand.h>
// #include <hiprand/hiprand_kernel.h>
// #include <hipfft.h>
// #include <iostream>
// using namespace std;
//
// #define ELECTRON_MASS 9.10938356e-31
// #define PROTON_MASS 1.6726219e-27
// #define ELECTRON_CHARGE 1
// // NOTE: setting electron charge to the default SI 1.6e-19 value breaks interpolation
// #define EPSILON_ZERO 8.854e-12
//
// //TODO: THIS HERE TIMESTEP I AM NOT COMPLETELY CERTAIN ABOUT
// #define NT 1000
// #define N_grid 16
#define N_particles_1_axis 64
#define N_particles (N_particles_1_axis*N_particles_1_axis*N_particles_1_axis)
#define L 1e-4
#define dt 1e-25
#define N_grid_all (N_grid *N_grid * N_grid)
#define dx (L/float(N_grid))
#define dy dx
#define dz dx
dim3 particleThreads(512);
dim3 particleBlocks((N_particles+particleThreads.x - 1)/particleThreads.x);
dim3 gridThreads(8,8,8);
dim3 gridBlocks((N_grid+gridThreads.x-1)/gridThreads.x, (N_grid + gridThreads.y - 1)/gridThreads.y, (N_grid+gridThreads.z-1)/gridThreads.z);
static void CUDA_ERROR( hipError_t err){
if (err != hipSuccess) {
printf("CUDA ERROR: %s, exiting\n", hipGetErrorString(err));
exit(-1);
}
}
struct Grid{
float *rho;
float *Ex;
float *Ey;
float *Ez;
float *d_rho;
float *d_Ex;
float *d_Ey;
float *d_Ez;
//fourier transformed versions of grid quantities, for fields solver
hipfftComplex *d_fourier_rho;
hipfftComplex *d_fourier_Ex;
hipfftComplex *d_fourier_Ey;
hipfftComplex *d_fourier_Ez;
//instructions for cuFFT
hipfftHandle plan_forward;
hipfftHandle plan_backward;
//the wave vector, for the field solver
float *kv;
float *d_kv;
};
struct Particle{
//keeps information about the position of one particle in (6D) phase space (positions, velocities)
float x;
float y;
float z;
float vx;
float vy;
float vz;
};
struct Species{
//keeps information about one distinct group of particles
float m; //mass
float q; //charge
//number of particles in group: not fully used yet
long int N;
Particle *particles;
Particle *d_particles;
};
__global__ void solve_poisson(float *d_kv, hipfftComplex *d_fourier_rho, hipfftComplex *d_fourier_Ex, hipfftComplex *d_fourier_Ey, hipfftComplex *d_fourier_Ez){
/*solve poisson equation
d_kv: wave vector
d_fourier_rho: complex array of fourier transformed charge densities
d_fourier_E(i):
*/
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
//wave vector magnitude squared
float k2 = d_kv[i]*d_kv[i] + d_kv[j]*d_kv[j] + d_kv[k]*d_kv[k];
if (i==0 && j==0 && k ==0) {
k2 = 1.0f; //dodge a bullet with a division by zero
}
//see: Birdsall Langdon, Plasma Physics via Computer Simulation, page 19
d_fourier_Ex[index].x = -d_kv[i]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ex[index].y = -d_kv[i]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
d_fourier_Ey[index].x = -d_kv[j]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ey[index].y = -d_kv[j]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
d_fourier_Ez[index].x = -d_kv[k]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ez[index].y = -d_kv[k]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
}
}
__global__ void real2complex(float *input, hipfftComplex *output){
//converts array of floats to array of real complex numbers
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid) {
output[index].x = input[index];
output[index].y = 0.0f;
}
}
__global__ void complex2real(hipfftComplex *input, float *output){
//converts array of complex inputs to floats (discards)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
output[index] = input[index].x/float(N_grid_all);
}
}
__global__ void scale_down_after_fft(float *d_Ex, float *d_Ey, float *d_Ez){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
d_Ex[index] /= float(N_grid_all);
d_Ey[index] /= float(N_grid_all);
d_Ez[index] /= float(N_grid_all);
}
}
__global__ void set_grid_array_to_value(float *arr, float value){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if((i<N_grid) && (j<N_grid) && (k<N_grid)){
arr[index] = value;
}
}
void init_grid(Grid *g){
g->rho = new float[N_grid_all];
g->Ex = new float[N_grid_all];
g->Ey = new float[N_grid_all];
g->Ez = new float[N_grid_all];
g->kv = new float[N_grid];
for (int i =0; i<=N_grid/2; i++)
{
g->kv[i] = i*2*M_PI;
}
for (int i = N_grid/2 + 1; i < N_grid; i++)
{
g->kv[i] = (i-N_grid)*2*M_PI;
}
CUDA_ERROR(hipMalloc((void**)&(g->d_kv), sizeof(float)*N_grid));
CUDA_ERROR(hipMemcpy(g->d_kv, g->kv, sizeof(float)*N_grid, hipMemcpyHostToDevice));
CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_rho), sizeof(hipfftComplex)*N_grid_all));
CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ex), sizeof(hipfftComplex)*N_grid_all));
CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ey), sizeof(hipfftComplex)*N_grid_all));
CUDA_ERROR(hipMalloc((void**)&(g->d_fourier_Ez), sizeof(hipfftComplex)*N_grid_all));
CUDA_ERROR(hipMalloc((void**)&(g->d_rho), sizeof(float)*N_grid_all));
CUDA_ERROR(hipMemcpy(g->d_rho, g->rho, sizeof(float)*N_grid_all, hipMemcpyHostToDevice));
CUDA_ERROR(hipMalloc((void**)&(g->d_Ex), sizeof(float)*N_grid_all));
CUDA_ERROR(hipMemcpy(g->d_Ex, g->Ex, sizeof(float)*N_grid_all, hipMemcpyHostToDevice));
CUDA_ERROR(hipMalloc((void**)&(g->d_Ey), sizeof(float)*N_grid_all));
CUDA_ERROR(hipMemcpy(g->d_Ey, g->Ey, sizeof(float)*N_grid_all, hipMemcpyHostToDevice));
CUDA_ERROR(hipMalloc((void**)&(g->d_Ez), sizeof(float)*N_grid_all));
CUDA_ERROR(hipMemcpy(g->d_Ez, g->Ez, sizeof(float)*N_grid_all, hipMemcpyHostToDevice));
hipfftPlan3d(&(g->plan_forward), N_grid, N_grid, N_grid, HIPFFT_R2C);
hipfftPlan3d(&(g->plan_backward), N_grid, N_grid, N_grid, HIPFFT_C2R);
}
void debug_field_solver_uniform(Grid *g){
float* linear_field_x = new float[N_grid_all];
float* linear_field_y = new float[N_grid_all];
float* linear_field_z = new float[N_grid_all];
for(int i = 0; i<N_grid; i++){
for(int j = 0; j<N_grid; j++){
for(int k = 0; k<N_grid; k++){
int index = i*N_grid*N_grid + j*N_grid + k;
linear_field_x[index] = 1000;
linear_field_y[index] = 0;
linear_field_z[index] = 0;
// printf("%d %f %f %f\n", index, linear_field_x[index], linear_field_y[index],linear_field_z[index]);
}
}
}
// cout << "if happy and know it clap your hands" << endl;
hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
}
void debug_field_solver_sine(Grid *g)
{
float* linear_field_x = new float[N_grid_all];
float* linear_field_y = new float[N_grid_all];
float* linear_field_z = new float[N_grid_all];
for(int i = 0; i<N_grid; i++){
for(int j = 0; j<N_grid; j++){
for(int k = 0; k<N_grid; k++){
int index = i*N_grid*N_grid + j*N_grid + k;
linear_field_x[index] = 1000*sin(2*M_PI*((float)k/(float)N_grid));
linear_field_y[index] = 1000*sin(2*M_PI*((float)j/(float)N_grid));
linear_field_z[index] = 1000*sin(2*M_PI*((float)i/(float)N_grid));
}
}
}
hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
}
// void debug_field_solver_quadratic(Grid *g)
// {
// float* linear_field_x = new float[N_grid_all];
// float* linear_field_y = new float[N_grid_all];
// float* linear_field_z = new float[N_grid_all];
// for(int i = 0; i<N_grid; i++){
// for(int j = 0; j<N_grid; j++){
// for(int k = 0; k<N_grid; k++){
// int index = i*N_grid*N_grid + j*N_grid + k;
// linear_field_x[index] = (dx*i)*(dx*i);
// linear_field_y[index] = (dx*j)*(dx*j);
// linear_field_z[index] = (dx*k)*(dx*k);
// }
// }
// }
// hipMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
// hipMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
// hipMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, hipMemcpyHostToDevice);
// }
void field_solver(Grid *g){
hipfftExecR2C(g->plan_forward, g->d_rho, g->d_fourier_rho);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( solve_poisson), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_kv, g->d_fourier_rho, g->d_fourier_Ex, g->d_fourier_Ey, g->d_fourier_Ez);
CUDA_ERROR(hipDeviceSynchronize());
hipfftExecC2R(g->plan_backward, g->d_fourier_Ex, g->d_Ex);
hipfftExecC2R(g->plan_backward, g->d_fourier_Ey, g->d_Ey);
hipfftExecC2R(g->plan_backward, g->d_fourier_Ez, g->d_Ez);
hipLaunchKernelGGL(( scale_down_after_fft), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_Ex, g->d_Ey, g->d_Ez);
CUDA_ERROR(hipDeviceSynchronize());
}
__device__ int position_to_grid_index(float X){
return int(X/dx);
}
__device__ float position_in_cell(float x){
int grid_index = position_to_grid_index(x);
return x - grid_index*dx;
}
__global__ void scatter_charge(Particle *d_P, float q, float* d_rho){
int n = blockIdx.x*blockDim.x + threadIdx.x;
float x = d_P[n].x;
float y = d_P[n].y;
float z = d_P[n].z;
int i = position_to_grid_index(x);
int j = position_to_grid_index(y);
int k = position_to_grid_index(z);
float Xr = position_in_cell(x)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y)/dy;
float Yl = 1 - Yr;
float Zr = position_in_cell(z)/dz;
float Zl = 1 - Zr;
//this part is literally hitler - not just unreadable but slow af
//TODO: redo this using a reduce
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zr);
}
__device__ float gather_grid_to_particle(Particle *p, float *grid){
float x = p->x;
float y = p->y;
float z = p->z;
int i = position_to_grid_index(x);
int j = position_to_grid_index(y);
int k = position_to_grid_index(z);
float Xr = position_in_cell(x)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y)/dy;
float Yl = 1 - Yr;
float Zr = position_in_cell(z)/dz;
float Zl = 1 - Zr;
float interpolated_scalar = 0.0f;
//this part is also hitler but not as much
//TODO: zafunkcjowa ten kawaek
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zr;
return interpolated_scalar;
}
__global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty, float shiftz){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if (n<N_particles){
Particle *p = &(d_p[n]);
int i = n / (int)(N_particles_1_axis*N_particles_1_axis);
int j = (int) (n/N_particles_1_axis) % N_particles_1_axis;
int k = n % N_particles_1_axis;
p->x = L/float(N_particles_1_axis) * i + shiftx;
p->x = p->x - floor(p->x/L)*L;
p->y = L/float(N_particles_1_axis) * j + shifty;
p->y = p->y - floor(p->y/L)*L;
p->z = L/float(N_particles_1_axis) * k + shiftz;
p->z = p->z - floor(p->z/L)*L;
p->vx = 0.0f;
p->vy = 0.0f;
p->vz = 0.0f;
}
}
__global__ void InitialVelocityStep(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){
int n = blockDim.x * blockIdx.x + threadIdx.x;
{
Particle *p = &(d_p[n]);
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex);
float Ey = gather_grid_to_particle(p, d_Ey);
float Ez = gather_grid_to_particle(p, d_Ez);
//use electric field to accelerate particles
p->vx -= 0.5f*dt*q/m*Ex;
p->vy -= 0.5f*dt*q/m*Ey;
p->vz -= 0.5f*dt*q/m*Ez;
}
}
__global__ void ParticleKernel(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//push positions, enforce periodic boundary conditions
p->x = p->x + p->vx*dt;
p->x = p->x - floor(p->x/L)*L;
p->y = p->y + p->vy*dt;
p->y = p->y - floor(p->y/L)*L;
p->z = p->z + p->vz*dt;
p->z = p->z - floor(p->z/L)*L;
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex);
float Ey = gather_grid_to_particle(p, d_Ey);
float Ez = gather_grid_to_particle(p, d_Ez);
//use electric field to accelerate particles
p->vx += dt*q/m*Ex;
p->vy += dt*q/m*Ey;
p->vz += dt*q/m*Ez;
}
}
void init_species(Species *s, float shiftx, float shifty, float shiftz){
s->particles = new Particle[N_particles];
CUDA_ERROR(hipMalloc((void**)&(s->d_particles), sizeof(Particle)*N_particles));
cout << "initializing particles" << endl;
hipLaunchKernelGGL(( InitParticleArrays), dim3(particleBlocks), dim3(particleThreads), 0, 0, s->d_particles, shiftx, shifty, shiftz);
}
void dump_density_data(Grid *g, char* name){
cout << "dumping" << endl;
CUDA_ERROR(hipMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
FILE *density_data = fopen(name, "w");
float rho_total = 0.0f;
for (int n = 0; n < N_grid_all; n++)
{
fprintf(density_data, "%f %.0f %.0f %.0f\n", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
// printf("%d %f %f %f %f\n", n, g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
rho_total += g->rho[n];
}
printf("rho total: %f\n", rho_total);
}
void dump_running_density_data(Grid *g, char* name){
CUDA_ERROR(hipMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
CUDA_ERROR(hipMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, hipMemcpyDeviceToHost));
FILE *density_data = fopen(name, "w");
for (int n = 0; n < N_grid_all; n++)
{
fprintf(density_data, "\n%f %f %f %f", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
}
fclose(density_data);
}
void dump_position_data(Species *s, char* name){
cout << "Copying particles from GPU to device"<< endl;
CUDA_ERROR(hipMemcpy(s->particles, s->d_particles, sizeof(Particle)*N_particles, hipMemcpyDeviceToHost));
cout << "Copied particles from GPU to device"<< endl;
FILE *initial_position_data = fopen(name, "w");
for (int i =0; i<N_particles; i++)
{
Particle *p = &(s->particles[i]);
fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz);
}
// free(s->particles);
fclose(initial_position_data);
}
void init_timestep(Grid *g, Species *electrons, Species *ions){
hipLaunchKernelGGL(( set_grid_array_to_value), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_rho, 0);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, g->d_rho);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, g->d_rho);
CUDA_ERROR(hipDeviceSynchronize());
// debug_field_solver_sine(g);
field_solver(g);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( InitialVelocityStep), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez);
hipLaunchKernelGGL(( InitialVelocityStep), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez);
CUDA_ERROR(hipDeviceSynchronize());
}
void timestep(Grid *g, Species *electrons, Species *ions){
//1. move particles, gather electric fields at their locations, accelerate particles
hipLaunchKernelGGL(( ParticleKernel), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez);
hipLaunchKernelGGL(( ParticleKernel), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez);
//potential TODO: sort particles?????
//2. clear charge density for scattering fields to particles charge
hipLaunchKernelGGL(( set_grid_array_to_value), dim3(gridBlocks), dim3(gridThreads), 0, 0, g->d_rho, 0);
CUDA_ERROR(hipDeviceSynchronize());
//3. gather charge from new particle position to grid
//TODO: note that I may need to cudaSyncThreads between these steps
hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, electrons->d_particles, electrons->q, g->d_rho);
CUDA_ERROR(hipDeviceSynchronize());
hipLaunchKernelGGL(( scatter_charge), dim3(particleBlocks), dim3(particleThreads), 0, 0, ions->d_particles, ions->q, g->d_rho);
CUDA_ERROR(hipDeviceSynchronize());
//4. use charge density to calculate field
field_solver(g);
CUDA_ERROR(hipDeviceSynchronize());
}
int main(void){
printf("N_grid Threads per block Blocks\n");
printf("%7d %17d %6d\n", N_grid_all, gridThreads.x, gridBlocks.x);
hipEvent_t startLoop, endLoop;
hipEventCreate(&startLoop);
hipEventCreate(&endLoop);
Grid g;
init_grid(&g);
Species electrons;
electrons.q = -ELECTRON_CHARGE;
electrons.m = ELECTRON_MASS;
electrons.N = N_particles;
init_species(&electrons, L/100.0f, 0, 0);
Species ions;
ions.q = +ELECTRON_CHARGE;
ions.m = PROTON_MASS;
ions.N = N_particles;
init_species(&ions, 0, 0, 0);
//TODO: initialize for two stream instability
init_timestep(&g, &electrons, &ions);
CUDA_ERROR(hipGetLastError());
hipEventSynchronize(startLoop);
hipEventRecord(startLoop);
for(int i =0; i<NT; i++){
char* filename = new char[100];
timestep(&g, &electrons, &ions);
}
hipDeviceSynchronize();
hipEventSynchronize(endLoop);
hipEventRecord(endLoop);
cout << endl << "finished time loop" << endl;
float loopRuntimeMS = 0;
hipEventElapsedTime(&loopRuntimeMS, startLoop, endLoop);
printf("N_grid Threads per block Blocks\tRuntime\n");
printf("%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS);
if (loopRuntimeMS > 0.0001)
{
char* filename = new char[100];
sprintf(filename, "benchmark/pb_%d_%d_%d.bdat", N_grid, gridThreads.x, gridBlocks.x);
FILE *benchmark = fopen(filename, "w");
fprintf(benchmark, "N_grid Threads per block Blocks\tRuntime\n");
fprintf(benchmark, "%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS);
fclose(benchmark);
}
else
{
printf("Not saved!\n");
}
CUDA_ERROR(hipFree(electrons.d_particles));
CUDA_ERROR(hipFree(g.d_rho));
CUDA_ERROR(hipFree(g.d_Ex));
CUDA_ERROR(hipFree(g.d_Ey));
CUDA_ERROR(hipFree(g.d_Ez));
CUDA_ERROR(hipFree(g.d_fourier_Ex));
CUDA_ERROR(hipFree(g.d_fourier_Ey));
CUDA_ERROR(hipFree(g.d_fourier_Ez));
CUDA_ERROR(hipFree(g.d_fourier_rho));
}
| fd831702d0e8ea287f1fe97c394c021fb29d5b2d.cu | // #include <stdio.h>
// #include <curand.h>
// #include <curand_kernel.h>
// #include <cufft.h>
// #include <iostream>
// using namespace std;
//
// #define ELECTRON_MASS 9.10938356e-31
// #define PROTON_MASS 1.6726219e-27
// #define ELECTRON_CHARGE 1
// // NOTE: setting electron charge to the default SI 1.6e-19 value breaks interpolation
// #define EPSILON_ZERO 8.854e-12
//
// //TODO: THIS HERE TIMESTEP I AM NOT COMPLETELY CERTAIN ABOUT
// #define NT 1000
// #define N_grid 16
#define N_particles_1_axis 64
#define N_particles (N_particles_1_axis*N_particles_1_axis*N_particles_1_axis)
#define L 1e-4
#define dt 1e-25
#define N_grid_all (N_grid *N_grid * N_grid)
#define dx (L/float(N_grid))
#define dy dx
#define dz dx
dim3 particleThreads(512);
dim3 particleBlocks((N_particles+particleThreads.x - 1)/particleThreads.x);
dim3 gridThreads(8,8,8);
dim3 gridBlocks((N_grid+gridThreads.x-1)/gridThreads.x, (N_grid + gridThreads.y - 1)/gridThreads.y, (N_grid+gridThreads.z-1)/gridThreads.z);
static void CUDA_ERROR( cudaError_t err){
if (err != cudaSuccess) {
printf("CUDA ERROR: %s, exiting\n", cudaGetErrorString(err));
exit(-1);
}
}
struct Grid{
float *rho;
float *Ex;
float *Ey;
float *Ez;
float *d_rho;
float *d_Ex;
float *d_Ey;
float *d_Ez;
//fourier transformed versions of grid quantities, for fields solver
cufftComplex *d_fourier_rho;
cufftComplex *d_fourier_Ex;
cufftComplex *d_fourier_Ey;
cufftComplex *d_fourier_Ez;
//instructions for cuFFT
cufftHandle plan_forward;
cufftHandle plan_backward;
//the wave vector, for the field solver
float *kv;
float *d_kv;
};
struct Particle{
//keeps information about the position of one particle in (6D) phase space (positions, velocities)
float x;
float y;
float z;
float vx;
float vy;
float vz;
};
struct Species{
//keeps information about one distinct group of particles
float m; //mass
float q; //charge
//number of particles in group: not fully used yet
long int N;
Particle *particles;
Particle *d_particles;
};
__global__ void solve_poisson(float *d_kv, cufftComplex *d_fourier_rho, cufftComplex *d_fourier_Ex, cufftComplex *d_fourier_Ey, cufftComplex *d_fourier_Ez){
/*solve poisson equation
d_kv: wave vector
d_fourier_rho: complex array of fourier transformed charge densities
d_fourier_E(i):
*/
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
//wave vector magnitude squared
float k2 = d_kv[i]*d_kv[i] + d_kv[j]*d_kv[j] + d_kv[k]*d_kv[k];
if (i==0 && j==0 && k ==0) {
k2 = 1.0f; //dodge a bullet with a division by zero
}
//see: Birdsall Langdon, Plasma Physics via Computer Simulation, page 19
d_fourier_Ex[index].x = -d_kv[i]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ex[index].y = -d_kv[i]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
d_fourier_Ey[index].x = -d_kv[j]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ey[index].y = -d_kv[j]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
d_fourier_Ez[index].x = -d_kv[k]*d_fourier_rho[index].x/k2/EPSILON_ZERO;
d_fourier_Ez[index].y = -d_kv[k]*d_fourier_rho[index].y/k2/EPSILON_ZERO;
}
}
__global__ void real2complex(float *input, cufftComplex *output){
//converts array of floats to array of real complex numbers
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid) {
output[index].x = input[index];
output[index].y = 0.0f;
}
}
__global__ void complex2real(cufftComplex *input, float *output){
//converts array of complex inputs to floats (discards)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
output[index] = input[index].x/float(N_grid_all);
}
}
__global__ void scale_down_after_fft(float *d_Ex, float *d_Ey, float *d_Ez){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if(i<N_grid && j<N_grid && k<N_grid){
d_Ex[index] /= float(N_grid_all);
d_Ey[index] /= float(N_grid_all);
d_Ez[index] /= float(N_grid_all);
}
}
__global__ void set_grid_array_to_value(float *arr, float value){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int index = k*N_grid*N_grid + j*N_grid + i;
if((i<N_grid) && (j<N_grid) && (k<N_grid)){
arr[index] = value;
}
}
void init_grid(Grid *g){
g->rho = new float[N_grid_all];
g->Ex = new float[N_grid_all];
g->Ey = new float[N_grid_all];
g->Ez = new float[N_grid_all];
g->kv = new float[N_grid];
for (int i =0; i<=N_grid/2; i++)
{
g->kv[i] = i*2*M_PI;
}
for (int i = N_grid/2 + 1; i < N_grid; i++)
{
g->kv[i] = (i-N_grid)*2*M_PI;
}
CUDA_ERROR(cudaMalloc((void**)&(g->d_kv), sizeof(float)*N_grid));
CUDA_ERROR(cudaMemcpy(g->d_kv, g->kv, sizeof(float)*N_grid, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_rho), sizeof(cufftComplex)*N_grid_all));
CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ex), sizeof(cufftComplex)*N_grid_all));
CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ey), sizeof(cufftComplex)*N_grid_all));
CUDA_ERROR(cudaMalloc((void**)&(g->d_fourier_Ez), sizeof(cufftComplex)*N_grid_all));
CUDA_ERROR(cudaMalloc((void**)&(g->d_rho), sizeof(float)*N_grid_all));
CUDA_ERROR(cudaMemcpy(g->d_rho, g->rho, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMalloc((void**)&(g->d_Ex), sizeof(float)*N_grid_all));
CUDA_ERROR(cudaMemcpy(g->d_Ex, g->Ex, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMalloc((void**)&(g->d_Ey), sizeof(float)*N_grid_all));
CUDA_ERROR(cudaMemcpy(g->d_Ey, g->Ey, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMalloc((void**)&(g->d_Ez), sizeof(float)*N_grid_all));
CUDA_ERROR(cudaMemcpy(g->d_Ez, g->Ez, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice));
cufftPlan3d(&(g->plan_forward), N_grid, N_grid, N_grid, CUFFT_R2C);
cufftPlan3d(&(g->plan_backward), N_grid, N_grid, N_grid, CUFFT_C2R);
}
void debug_field_solver_uniform(Grid *g){
float* linear_field_x = new float[N_grid_all];
float* linear_field_y = new float[N_grid_all];
float* linear_field_z = new float[N_grid_all];
for(int i = 0; i<N_grid; i++){
for(int j = 0; j<N_grid; j++){
for(int k = 0; k<N_grid; k++){
int index = i*N_grid*N_grid + j*N_grid + k;
linear_field_x[index] = 1000;
linear_field_y[index] = 0;
linear_field_z[index] = 0;
// printf("%d %f %f %f\n", index, linear_field_x[index], linear_field_y[index],linear_field_z[index]);
}
}
}
// cout << "if happy and know it clap your hands" << endl;
cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
}
void debug_field_solver_sine(Grid *g)
{
float* linear_field_x = new float[N_grid_all];
float* linear_field_y = new float[N_grid_all];
float* linear_field_z = new float[N_grid_all];
for(int i = 0; i<N_grid; i++){
for(int j = 0; j<N_grid; j++){
for(int k = 0; k<N_grid; k++){
int index = i*N_grid*N_grid + j*N_grid + k;
linear_field_x[index] = 1000*sin(2*M_PI*((float)k/(float)N_grid));
linear_field_y[index] = 1000*sin(2*M_PI*((float)j/(float)N_grid));
linear_field_z[index] = 1000*sin(2*M_PI*((float)i/(float)N_grid));
}
}
}
cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
}
// void debug_field_solver_quadratic(Grid *g)
// {
// float* linear_field_x = new float[N_grid_all];
// float* linear_field_y = new float[N_grid_all];
// float* linear_field_z = new float[N_grid_all];
// for(int i = 0; i<N_grid; i++){
// for(int j = 0; j<N_grid; j++){
// for(int k = 0; k<N_grid; k++){
// int index = i*N_grid*N_grid + j*N_grid + k;
// linear_field_x[index] = (dx*i)*(dx*i);
// linear_field_y[index] = (dx*j)*(dx*j);
// linear_field_z[index] = (dx*k)*(dx*k);
// }
// }
// }
// cudaMemcpy(g->d_Ex, linear_field_x, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
// cudaMemcpy(g->d_Ey, linear_field_y, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
// cudaMemcpy(g->d_Ez, linear_field_z, sizeof(float)*N_grid_all, cudaMemcpyHostToDevice);
// }
void field_solver(Grid *g){
cufftExecR2C(g->plan_forward, g->d_rho, g->d_fourier_rho);
CUDA_ERROR(cudaDeviceSynchronize());
solve_poisson<<<gridBlocks, gridThreads>>>(g->d_kv, g->d_fourier_rho, g->d_fourier_Ex, g->d_fourier_Ey, g->d_fourier_Ez);
CUDA_ERROR(cudaDeviceSynchronize());
cufftExecC2R(g->plan_backward, g->d_fourier_Ex, g->d_Ex);
cufftExecC2R(g->plan_backward, g->d_fourier_Ey, g->d_Ey);
cufftExecC2R(g->plan_backward, g->d_fourier_Ez, g->d_Ez);
scale_down_after_fft<<<gridBlocks, gridThreads>>>(g->d_Ex, g->d_Ey, g->d_Ez);
CUDA_ERROR(cudaDeviceSynchronize());
}
__device__ int position_to_grid_index(float X){
return int(X/dx);
}
__device__ float position_in_cell(float x){
int grid_index = position_to_grid_index(x);
return x - grid_index*dx;
}
__global__ void scatter_charge(Particle *d_P, float q, float* d_rho){
int n = blockIdx.x*blockDim.x + threadIdx.x;
float x = d_P[n].x;
float y = d_P[n].y;
float z = d_P[n].z;
int i = position_to_grid_index(x);
int j = position_to_grid_index(y);
int k = position_to_grid_index(z);
float Xr = position_in_cell(x)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y)/dy;
float Yl = 1 - Yr;
float Zr = position_in_cell(z)/dz;
float Zl = 1 - Zr;
//this part is literally hitler - not just unreadable but slow af
//TODO: redo this using a reduce
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]), q*Xl*Yl*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zl);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yl*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]), q*Xl*Yr*Zr);
atomicAdd(&(d_rho[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]), q*Xr*Yr*Zr);
}
__device__ float gather_grid_to_particle(Particle *p, float *grid){
float x = p->x;
float y = p->y;
float z = p->z;
int i = position_to_grid_index(x);
int j = position_to_grid_index(y);
int k = position_to_grid_index(z);
float Xr = position_in_cell(x)/dx;
float Xl = 1 - Xr;
float Yr = position_in_cell(y)/dy;
float Yl = 1 - Yr;
float Zr = position_in_cell(z)/dz;
float Zl = 1 - Zr;
float interpolated_scalar = 0.0f;
//this part is also hitler but not as much
//TODO: zafunkcjować ten kawałek
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i)%N_grid)]*Xl*Yl*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zl;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j)%N_grid) + ((i+1)%N_grid)]*Xr*Yl*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i)%N_grid)]*Xl*Yr*Zr;
interpolated_scalar += grid[N_grid * N_grid * ((k+1)%N_grid) + N_grid * ((j+1)%N_grid) + ((i+1)%N_grid)]*Xr*Yr*Zr;
return interpolated_scalar;
}
__global__ void InitParticleArrays(Particle *d_p, float shiftx, float shifty, float shiftz){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if (n<N_particles){
Particle *p = &(d_p[n]);
int i = n / (int)(N_particles_1_axis*N_particles_1_axis);
int j = (int) (n/N_particles_1_axis) % N_particles_1_axis;
int k = n % N_particles_1_axis;
p->x = L/float(N_particles_1_axis) * i + shiftx;
p->x = p->x - floor(p->x/L)*L;
p->y = L/float(N_particles_1_axis) * j + shifty;
p->y = p->y - floor(p->y/L)*L;
p->z = L/float(N_particles_1_axis) * k + shiftz;
p->z = p->z - floor(p->z/L)*L;
p->vx = 0.0f;
p->vy = 0.0f;
p->vz = 0.0f;
}
}
__global__ void InitialVelocityStep(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){
int n = blockDim.x * blockIdx.x + threadIdx.x;
{
Particle *p = &(d_p[n]);
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex);
float Ey = gather_grid_to_particle(p, d_Ey);
float Ez = gather_grid_to_particle(p, d_Ez);
//use electric field to accelerate particles
p->vx -= 0.5f*dt*q/m*Ex;
p->vy -= 0.5f*dt*q/m*Ey;
p->vz -= 0.5f*dt*q/m*Ez;
}
}
__global__ void ParticleKernel(Particle *d_p, float q, float m, float *d_Ex, float *d_Ey, float *d_Ez){
int n = blockDim.x * blockIdx.x + threadIdx.x;
if(n<N_particles)
{
Particle *p = &(d_p[n]);
//push positions, enforce periodic boundary conditions
p->x = p->x + p->vx*dt;
p->x = p->x - floor(p->x/L)*L;
p->y = p->y + p->vy*dt;
p->y = p->y - floor(p->y/L)*L;
p->z = p->z + p->vz*dt;
p->z = p->z - floor(p->z/L)*L;
//gather electric field
float Ex = gather_grid_to_particle(p, d_Ex);
float Ey = gather_grid_to_particle(p, d_Ey);
float Ez = gather_grid_to_particle(p, d_Ez);
//use electric field to accelerate particles
p->vx += dt*q/m*Ex;
p->vy += dt*q/m*Ey;
p->vz += dt*q/m*Ez;
}
}
void init_species(Species *s, float shiftx, float shifty, float shiftz){
s->particles = new Particle[N_particles];
CUDA_ERROR(cudaMalloc((void**)&(s->d_particles), sizeof(Particle)*N_particles));
cout << "initializing particles" << endl;
InitParticleArrays<<<particleBlocks, particleThreads>>>(s->d_particles, shiftx, shifty, shiftz);
}
void dump_density_data(Grid *g, char* name){
cout << "dumping" << endl;
CUDA_ERROR(cudaMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
FILE *density_data = fopen(name, "w");
float rho_total = 0.0f;
for (int n = 0; n < N_grid_all; n++)
{
fprintf(density_data, "%f %.0f %.0f %.0f\n", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
// printf("%d %f %f %f %f\n", n, g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
rho_total += g->rho[n];
}
printf("rho total: %f\n", rho_total);
}
void dump_running_density_data(Grid *g, char* name){
CUDA_ERROR(cudaMemcpy(g->rho, g->d_rho, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ex, g->d_Ex, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ey, g->d_Ey, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaMemcpy(g->Ez, g->d_Ez, sizeof(float)*N_grid_all, cudaMemcpyDeviceToHost));
FILE *density_data = fopen(name, "w");
for (int n = 0; n < N_grid_all; n++)
{
fprintf(density_data, "\n%f %f %f %f", g->rho[n], g->Ex[n], g->Ey[n], g->Ez[n]);
}
fclose(density_data);
}
void dump_position_data(Species *s, char* name){
cout << "Copying particles from GPU to device"<< endl;
CUDA_ERROR(cudaMemcpy(s->particles, s->d_particles, sizeof(Particle)*N_particles, cudaMemcpyDeviceToHost));
cout << "Copied particles from GPU to device"<< endl;
FILE *initial_position_data = fopen(name, "w");
for (int i =0; i<N_particles; i++)
{
Particle *p = &(s->particles[i]);
fprintf(initial_position_data, "%f %f %f %f %f %f\n", p->x, p->y, p->z, p->vx, p->vy, p->vz);
}
// free(s->particles);
fclose(initial_position_data);
}
void init_timestep(Grid *g, Species *electrons, Species *ions){
set_grid_array_to_value<<<gridBlocks, gridThreads>>>(g->d_rho, 0);
CUDA_ERROR(cudaDeviceSynchronize());
scatter_charge<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, g->d_rho);
CUDA_ERROR(cudaDeviceSynchronize());
scatter_charge<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, g->d_rho);
CUDA_ERROR(cudaDeviceSynchronize());
// debug_field_solver_sine(g);
field_solver(g);
CUDA_ERROR(cudaDeviceSynchronize());
InitialVelocityStep<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez);
InitialVelocityStep<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez);
CUDA_ERROR(cudaDeviceSynchronize());
}
void timestep(Grid *g, Species *electrons, Species *ions){
//1. move particles, gather electric fields at their locations, accelerate particles
ParticleKernel<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, electrons->m, g->d_Ex, g->d_Ey, g->d_Ez);
ParticleKernel<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, ions->m, g->d_Ex, g->d_Ey, g->d_Ez);
//potential TODO: sort particles?????
//2. clear charge density for scattering fields to particles charge
set_grid_array_to_value<<<gridBlocks, gridThreads>>>(g->d_rho, 0);
CUDA_ERROR(cudaDeviceSynchronize());
//3. gather charge from new particle position to grid
//TODO: note that I may need to cudaSyncThreads between these steps
scatter_charge<<<particleBlocks, particleThreads>>>(electrons->d_particles, electrons->q, g->d_rho);
CUDA_ERROR(cudaDeviceSynchronize());
scatter_charge<<<particleBlocks, particleThreads>>>(ions->d_particles, ions->q, g->d_rho);
CUDA_ERROR(cudaDeviceSynchronize());
//4. use charge density to calculate field
field_solver(g);
CUDA_ERROR(cudaDeviceSynchronize());
}
int main(void){
printf("N_grid Threads per block Blocks\n");
printf("%7d %17d %6d\n", N_grid_all, gridThreads.x, gridBlocks.x);
cudaEvent_t startLoop, endLoop;
cudaEventCreate(&startLoop);
cudaEventCreate(&endLoop);
Grid g;
init_grid(&g);
Species electrons;
electrons.q = -ELECTRON_CHARGE;
electrons.m = ELECTRON_MASS;
electrons.N = N_particles;
init_species(&electrons, L/100.0f, 0, 0);
Species ions;
ions.q = +ELECTRON_CHARGE;
ions.m = PROTON_MASS;
ions.N = N_particles;
init_species(&ions, 0, 0, 0);
//TODO: initialize for two stream instability
init_timestep(&g, &electrons, &ions);
CUDA_ERROR(cudaGetLastError());
cudaEventSynchronize(startLoop);
cudaEventRecord(startLoop);
for(int i =0; i<NT; i++){
char* filename = new char[100];
timestep(&g, &electrons, &ions);
}
cudaDeviceSynchronize();
cudaEventSynchronize(endLoop);
cudaEventRecord(endLoop);
cout << endl << "finished time loop" << endl;
float loopRuntimeMS = 0;
cudaEventElapsedTime(&loopRuntimeMS, startLoop, endLoop);
printf("N_grid Threads per block Blocks\tRuntime\n");
printf("%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS);
if (loopRuntimeMS > 0.0001)
{
char* filename = new char[100];
sprintf(filename, "benchmark/pb_%d_%d_%d.bdat", N_grid, gridThreads.x, gridBlocks.x);
FILE *benchmark = fopen(filename, "w");
fprintf(benchmark, "N_grid Threads per block Blocks\tRuntime\n");
fprintf(benchmark, "%7d %17d %6d %f\n", N_grid_all, gridThreads.x, gridBlocks.x, loopRuntimeMS);
fclose(benchmark);
}
else
{
printf("Not saved!\n");
}
CUDA_ERROR(cudaFree(electrons.d_particles));
CUDA_ERROR(cudaFree(g.d_rho));
CUDA_ERROR(cudaFree(g.d_Ex));
CUDA_ERROR(cudaFree(g.d_Ey));
CUDA_ERROR(cudaFree(g.d_Ez));
CUDA_ERROR(cudaFree(g.d_fourier_Ex));
CUDA_ERROR(cudaFree(g.d_fourier_Ey));
CUDA_ERROR(cudaFree(g.d_fourier_Ez));
CUDA_ERROR(cudaFree(g.d_fourier_rho));
}
|
bca41555a924d4f394b093d5a58a8188b87ef7c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
zmgeellmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ magmaDoubleComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaDoubleComplex val = dval [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
hipLaunchKernelGGL(( zmgeellmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
| bca41555a924d4f394b093d5a58a8188b87ef7c6.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
zmgeellmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ magmaDoubleComplex dot[];
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaDoubleComplex val = dval [ num_cols_per_row * row + n ];
if( val != 0){
for( int i=0; i<num_vecs; i++)
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++)
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i * num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
zmgeellmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
|
daa6a905b18e9f7941fabf1300c847b9dc4c1a73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include "kernel_fin_2.cu"
#include "kernel_ecc_2.cu"
#include "kernel_cam_2.cu"
#include "kernel_2.cu"
#include "embedded_fehlberg_7_8_2.cu"
#include "solver_2.cu"
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int work_2( int xmax,
int workload){
//================================================================================80
// VARIABLES
//================================================================================80
//============================================================60
// TIME
//============================================================60
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//============================================================60
// COUNTERS, POINTERS
//============================================================60
long memory;
int i;
int pointer;
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
fp* y;
fp* d_y;
long y_mem;
fp* x;
fp* d_x;
long x_mem;
fp* params;
fp* d_params;
int params_mem;
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
fp* d_com;
int com_mem;
fp* d_err;
int err_mem;
fp* d_scale;
int scale_mem;
fp* d_yy;
int yy_mem;
fp* d_initvalu_temp;
int initvalu_temp_mem;
fp* d_finavalu_temp;
int finavalu_temp_mem;
//============================================================60
// CUDA KERNELS EXECUTION PARAMETERS
//============================================================60
dim3 threads;
dim3 blocks;
int blocks_x;
time1 = get_time();
//================================================================================80
// ALLOCATE MEMORY
//================================================================================80
//============================================================60
// MEMORY CHECK
//============================================================60
memory = workload*(xmax+1)*EQUATIONS*4;
if(memory>1000000000){
printf("ERROR: trying to allocate more than 1.0GB of memory, decrease workload and span parameters or change memory parameter\n");
return 0;
}
//============================================================60
// ALLOCATE ARRAYS
//============================================================60
//========================================40
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//========================================40
y_mem = workload * (xmax+1) * EQUATIONS * sizeof(fp);
y= (fp *) malloc(y_mem);
hipMalloc((void **)&d_y, y_mem);
x_mem = workload * (xmax+1) * sizeof(fp);
x= (fp *) malloc(x_mem);
hipMalloc((void **)&d_x, x_mem);
params_mem = workload * PARAMETERS * sizeof(fp);
params= (fp *) malloc(params_mem);
hipMalloc((void **)&d_params, params_mem);
//========================================40
// TEMPORARY SOLVER VARIABLES
//========================================40
com_mem = workload * 3 * sizeof(fp);
hipMalloc((void **)&d_com, com_mem);
err_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_err, err_mem);
scale_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_scale, scale_mem);
yy_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_yy, yy_mem);
initvalu_temp_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_initvalu_temp, initvalu_temp_mem);
finavalu_temp_mem = workload * 13* EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_finavalu_temp, finavalu_temp_mem);
time2 = get_time();
//================================================================================80
// READ FROM FILES OR SET INITIAL VALUES
//================================================================================80
//========================================40
// X
//========================================40
for(i=0; i<workload; i++){
pointer = i * (xmax+1) + 0;
x[pointer] = 0;
}
hipMemcpy(d_x, x, x_mem, hipMemcpyHostToDevice);
//========================================40
// Y
//========================================40
for(i=0; i<workload; i++){
pointer = i*((xmax+1)*EQUATIONS) + 0*(EQUATIONS);
read("../data/y.txt",
&y[pointer],
91,
1,
0);
}
hipMemcpy(d_y, y, y_mem, hipMemcpyHostToDevice);
//========================================40
// PARAMS
//========================================40
for(i=0; i<workload; i++){
pointer = i*PARAMETERS;
read("../data/params.txt",
¶ms[pointer],
18,
1,
0);
}
hipMemcpy(d_params, params, params_mem, hipMemcpyHostToDevice);
time3 = get_time();
//================================================================================80
// EXECUTION IF THERE ARE MANY WORKLOADS
//================================================================================80
if(workload == 1){
threads.x = 32; // define the number of threads in the block
threads.y = 1;
blocks.x = 4; // define the number of blocks in the grid
blocks.y = 1;
}
else{
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = workload/threads.x;
if (workload % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
}
hipLaunchKernelGGL(( solver_2), dim3(blocks), dim3(threads), 0, 0, workload,
xmax,
d_x,
d_y,
d_params,
d_com,
d_err,
d_scale,
d_yy,
d_initvalu_temp,
d_finavalu_temp);
// hipDeviceSynchronize();
// printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
time4 = get_time();
//================================================================================80
// COPY DATA BACK TO CPU
//================================================================================80
hipMemcpy(x, d_x, x_mem, hipMemcpyDeviceToHost);
hipMemcpy(y, d_y, y_mem, hipMemcpyDeviceToHost);
time5 = get_time();
//================================================================================80
// PRINT RESULTS (ENABLE SELECTIVELY FOR TESTING ONLY)
//================================================================================80
// int j, k;
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// for(k=0; k<EQUATIONS; k++){
// printf("\t\ty[%d][%d][%d]=%13.10f\n", i, j, k, y[i*((xmax+1)*EQUATIONS) + j*(EQUATIONS)+k]);
// }
// }
// }
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// printf("\t\tx[%d][%d]=%13.10f\n", i, j, x[i * (xmax+1) + j]);
// }
// }
//================================================================================80
// DEALLOCATION
//================================================================================80
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
free(y);
hipFree(d_y);
free(x);
hipFree(d_x);
free(params);
hipFree(d_params);
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
hipFree(d_com);
hipFree(d_err);
hipFree(d_scale);
hipFree(d_yy);
hipFree(d_initvalu_temp);
hipFree(d_finavalu_temp);
time6= get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : ALLOCATE CPU MEMORY AND GPU MEMORY\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : READ DATA FROM FILES, COPY TO GPU MEMORY\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : RUN GPU KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : COPY GPU DATA TO CPU MEMORY\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
return 0;
}
| daa6a905b18e9f7941fabf1300c847b9dc4c1a73.cu | //====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include "kernel_fin_2.cu"
#include "kernel_ecc_2.cu"
#include "kernel_cam_2.cu"
#include "kernel_2.cu"
#include "embedded_fehlberg_7_8_2.cu"
#include "solver_2.cu"
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int work_2( int xmax,
int workload){
//================================================================================80
// VARIABLES
//================================================================================80
//============================================================60
// TIME
//============================================================60
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//============================================================60
// COUNTERS, POINTERS
//============================================================60
long memory;
int i;
int pointer;
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
fp* y;
fp* d_y;
long y_mem;
fp* x;
fp* d_x;
long x_mem;
fp* params;
fp* d_params;
int params_mem;
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
fp* d_com;
int com_mem;
fp* d_err;
int err_mem;
fp* d_scale;
int scale_mem;
fp* d_yy;
int yy_mem;
fp* d_initvalu_temp;
int initvalu_temp_mem;
fp* d_finavalu_temp;
int finavalu_temp_mem;
//============================================================60
// CUDA KERNELS EXECUTION PARAMETERS
//============================================================60
dim3 threads;
dim3 blocks;
int blocks_x;
time1 = get_time();
//================================================================================80
// ALLOCATE MEMORY
//================================================================================80
//============================================================60
// MEMORY CHECK
//============================================================60
memory = workload*(xmax+1)*EQUATIONS*4;
if(memory>1000000000){
printf("ERROR: trying to allocate more than 1.0GB of memory, decrease workload and span parameters or change memory parameter\n");
return 0;
}
//============================================================60
// ALLOCATE ARRAYS
//============================================================60
//========================================40
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//========================================40
y_mem = workload * (xmax+1) * EQUATIONS * sizeof(fp);
y= (fp *) malloc(y_mem);
cudaMalloc((void **)&d_y, y_mem);
x_mem = workload * (xmax+1) * sizeof(fp);
x= (fp *) malloc(x_mem);
cudaMalloc((void **)&d_x, x_mem);
params_mem = workload * PARAMETERS * sizeof(fp);
params= (fp *) malloc(params_mem);
cudaMalloc((void **)&d_params, params_mem);
//========================================40
// TEMPORARY SOLVER VARIABLES
//========================================40
com_mem = workload * 3 * sizeof(fp);
cudaMalloc((void **)&d_com, com_mem);
err_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_err, err_mem);
scale_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_scale, scale_mem);
yy_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_yy, yy_mem);
initvalu_temp_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_initvalu_temp, initvalu_temp_mem);
finavalu_temp_mem = workload * 13* EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_finavalu_temp, finavalu_temp_mem);
time2 = get_time();
//================================================================================80
// READ FROM FILES OR SET INITIAL VALUES
//================================================================================80
//========================================40
// X
//========================================40
for(i=0; i<workload; i++){
pointer = i * (xmax+1) + 0;
x[pointer] = 0;
}
cudaMemcpy(d_x, x, x_mem, cudaMemcpyHostToDevice);
//========================================40
// Y
//========================================40
for(i=0; i<workload; i++){
pointer = i*((xmax+1)*EQUATIONS) + 0*(EQUATIONS);
read("../data/y.txt",
&y[pointer],
91,
1,
0);
}
cudaMemcpy(d_y, y, y_mem, cudaMemcpyHostToDevice);
//========================================40
// PARAMS
//========================================40
for(i=0; i<workload; i++){
pointer = i*PARAMETERS;
read("../data/params.txt",
¶ms[pointer],
18,
1,
0);
}
cudaMemcpy(d_params, params, params_mem, cudaMemcpyHostToDevice);
time3 = get_time();
//================================================================================80
// EXECUTION IF THERE ARE MANY WORKLOADS
//================================================================================80
if(workload == 1){
threads.x = 32; // define the number of threads in the block
threads.y = 1;
blocks.x = 4; // define the number of blocks in the grid
blocks.y = 1;
}
else{
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = workload/threads.x;
if (workload % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
}
solver_2<<<blocks, threads>>>( workload,
xmax,
d_x,
d_y,
d_params,
d_com,
d_err,
d_scale,
d_yy,
d_initvalu_temp,
d_finavalu_temp);
// cudaThreadSynchronize();
// printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
time4 = get_time();
//================================================================================80
// COPY DATA BACK TO CPU
//================================================================================80
cudaMemcpy(x, d_x, x_mem, cudaMemcpyDeviceToHost);
cudaMemcpy(y, d_y, y_mem, cudaMemcpyDeviceToHost);
time5 = get_time();
//================================================================================80
// PRINT RESULTS (ENABLE SELECTIVELY FOR TESTING ONLY)
//================================================================================80
// int j, k;
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// for(k=0; k<EQUATIONS; k++){
// printf("\t\ty[%d][%d][%d]=%13.10f\n", i, j, k, y[i*((xmax+1)*EQUATIONS) + j*(EQUATIONS)+k]);
// }
// }
// }
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// printf("\t\tx[%d][%d]=%13.10f\n", i, j, x[i * (xmax+1) + j]);
// }
// }
//================================================================================80
// DEALLOCATION
//================================================================================80
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
free(y);
cudaFree(d_y);
free(x);
cudaFree(d_x);
free(params);
cudaFree(d_params);
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
cudaFree(d_com);
cudaFree(d_err);
cudaFree(d_scale);
cudaFree(d_yy);
cudaFree(d_initvalu_temp);
cudaFree(d_finavalu_temp);
time6= get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : ALLOCATE CPU MEMORY AND GPU MEMORY\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : READ DATA FROM FILES, COPY TO GPU MEMORY\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : RUN GPU KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : COPY GPU DATA TO CPU MEMORY\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
return 0;
}
|
convolution.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file convolution.cu
*
* @author simon.marcin
*
* @brief Messy convolution algorithm to test all possible designs and parameters to
* evaluate the best setup for GPU accelerated Operators.
* Can run synchronous copys or asynchronous, can run multiple streams or not.
* Writes a lot of debug messages.
*
*
*/
#include <stdio.h>
#include "convolution.h"
#include <iostream>
#include <float.h>
#define DSIZE 1024
#define DVAL 10
#define nTPB 256
#define hipHostRegisterPortable 0x01
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
template <class type> __global__ void convolutionKernel(float* d_Output,type* d_Input, size_t chunckWidth,
int overlap, size_t kernelSize, float* d_Kernel, int repetitions)
{
// define shared memory
const int globalWidth = chunckWidth+(2*overlap);
//__shared__ float s_Data[34][34];
extern __shared__ float s_Data[];
// offset to overlaps (halo elements)
const int globalX = (blockIdx.x * ROWS_BLOCKDIM_X) + threadIdx.x;
const int globalY = (blockIdx.y * ROWS_BLOCKDIM_Y) + threadIdx.y;
// load inner chunk elements
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap] = d_Input[((globalY+overlap)*globalWidth)+globalX+overlap];
//printf("thread %d,%d, input=%d to=%d,%d\n", threadIdx.y,threadIdx.x, d_Input[((globalY+overlap)*globalWidth)+globalX+overlap], threadIdx.y+overlap,threadIdx.x+overlap);
// // load upper halo elements
if (threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[globalY*globalWidth + globalX+overlap];
}
//load bottom halo elements
if (threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+overlap];
}
// load left halo elements
if (threadIdx.x < overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+overlap)*globalWidth + globalX];
}
// load right halo elements
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY+overlap)*globalWidth + globalX+2*overlap];
}
// left upper edges
if (threadIdx.x < overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY)*globalWidth + globalX];
}
// right upper edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY)*globalWidth + globalX+2*overlap];
}
// left bottom edges
if (threadIdx.x < overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+2*overlap)*globalWidth + globalX];
}
// right bottom edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+2*overlap];
}
//Compute and store results
__syncthreads();
float sum = 0;
for (int reps=0;reps<repetitions;reps++) {
#pragma unroll
for (int j = -overlap; j <= overlap; j++){
#pragma unroll
for (int i = -overlap; i <= overlap; i++){
//sum += s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
sum += d_Kernel[ ((int)kernelSize/2) +(j*( 2*overlap+1 ))+i] * s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
}
}
}
d_Output[globalY*chunckWidth+globalX] = sum;
}
// init the template function as the CUDA compiler doesn't know anything about SciDB
template __global__ void convolutionKernel<int16_t>(float*,int16_t*, size_t,int, size_t, float*, int);
template __global__ void convolutionKernel<int32_t>(float*,int32_t*, size_t,int, size_t, float*, int);
template <class type> void GPUHandle<type>::copyChunkValues(int i){
// transfer data to device
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
hipMemcpyAsync(d_Input[i], chunkValues[i], valueBytes, hipMemcpyHostToDevice,(streams[i]));
cudaCheckErrors("hipMemcpyAsync (async): chunkValues");
}else{
hipMemcpyAsync(d_Input[0], chunkValues[0], valueBytes, hipMemcpyHostToDevice,(streams[0]));
cudaCheckErrors("hipMemcpyAsync (streams): chunkValues");
}
}else{
hipMemcpy(d_Input[0], chunkValues[0], valueBytes, hipMemcpyHostToDevice);
hipDeviceSynchronize();
cudaCheckErrors("hipMemcpy: chunkValues");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::waitEvent(int i){
hipEventSynchronize(syncEvents[i]);
cudaCheckErrors("hipEventSynchronize");
}
template <class type> void GPUHandle<type>::copyResultValues(int i){
// transfer data to host
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
hipMemcpyAsync(resultValues[i], d_Output[i], resultBytes, hipMemcpyDeviceToHost, (streams[i]));
// Inject a sync Event as we need to be sure that we have the result
hipEventRecord(syncEvents[i],streams[i]);
cudaCheckErrors("hipMemcpyAsync (async): d_Output");
}else{
hipMemcpyAsync(resultValues[0], d_Output[0], resultBytes, hipMemcpyDeviceToHost, (streams[0]));
hipStreamSynchronize((streams[0]));
cudaCheckErrors("hipMemcpyAsync (streams): d_Output");
}
}else{
hipMemcpy(resultValues[0], d_Output[0], resultBytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy: d_Output");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::runConvolution(int i, int repetitions){
// define the kernel grid (2D)
dim3 blocks(max(chunckWidth / ROWS_BLOCKDIM_X,size_t(1)), max(chunckWidth / ROWS_BLOCKDIM_Y,size_t(1)));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
// run kernel
if(debug){LOG4CXX_INFO(logger, chunckWidth<< " "<<overlap<< " "<<kernelSize);}
if(useStreams){
if(async){
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, (streams[i]), d_Output[i],d_Input[i],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}else{
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, (streams[0]), d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}
}else{
if(debug){LOG4CXX_INFO(logger, "blocks: " << chunckWidth / ROWS_BLOCKDIM_X << "," << chunckWidth / ROWS_BLOCKDIM_Y << "," << smBytes);}
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, 0, d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
hipDeviceSynchronize();
cudaCheckErrors("convolutionRowsKernel()");
}
if(debug){LOG4CXX_INFO(logger, "kernel done!");}
}
template <class type> GPUHandle<type>::~GPUHandle(){
// unpin memory
hipHostUnregister(chunkValues[0]);
cudaCheckErrors("cudaHostUnRegister: chunkValues");
hipHostUnregister(resultValues[0]);
cudaCheckErrors("cudaHostUnRegister: resultValues");
if(async){
hipHostUnregister(chunkValues[1]);
cudaCheckErrors("cudaHostUnRegister: chunkValues[1]");
hipHostUnregister(resultValues[1]);
cudaCheckErrors("cudaHostUnRegister: resultValues[1]");
}
// free memory on device
hipFree(d_Input[0]);
cudaCheckErrors("hipFree: d_Input");
hipFree(d_Output[0]);
cudaCheckErrors("hipFree: d_Output");
hipFree(d_Kernel);
cudaCheckErrors("hipFree: d_Kernel");
if(async){
hipFree(d_Input[1]);
cudaCheckErrors("hipFree async: d_Input");
hipFree(d_Output[1]);
cudaCheckErrors("hipFree async: d_Output");
}
// destroy streams
if(useStreams){
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
}
// destroy sync events
if(async){
hipEventDestroy(syncEvents[0]);
cudaCheckErrors("hipEventDestroy: syncEvents[0]");
hipEventDestroy(syncEvents[1]);
cudaCheckErrors("hipEventDestroy: syncEvents[1]");
}
}
template <class type> GPUHandle<type>::GPUHandle(float* in_kernel, size_t in_kernelBytes,type* in_chunkValues[],
size_t in_valueBytes, float* in_resultValues[], size_t in_resultBytes,
log4cxx::LoggerPtr in_logger, size_t in_chunckWidth, size_t in_kernelSize, bool in_debug,
bool in_useStreams, bool in_async){
// save values
kernel = in_kernel;
kernelBytes = in_kernelBytes;
chunkValues[0] = in_chunkValues[0];
chunkValues[1] = in_chunkValues[1];
valueBytes = in_valueBytes;
resultValues[0] = in_resultValues[0];
resultValues[1] = in_resultValues[1];
resultBytes = in_resultBytes;
logger = in_logger;
overlap = (int) sqrt(in_kernelSize)/2;
kernelSize = in_kernelSize;
chunckWidth = in_chunckWidth;
smBytes = (ROWS_BLOCKDIM_X+2*overlap)*(ROWS_BLOCKDIM_Y+2*overlap)*sizeof(float);
debug = in_debug;
useStreams = in_useStreams;
async = in_async;
// create streams
if(useStreams){
streams[0] = hipStream_t();
hipStreamCreateWithFlags(&(streams[0]),hipStreamNonBlocking);
cudaCheckErrors("hipStreamCreateWithFlags: streams[0]");
streams[1] = hipStream_t();
hipStreamCreateWithFlags(&(streams[1]),hipStreamNonBlocking);
cudaCheckErrors("hipStreamCreateWithFlags: streams[1]");
}
// create sync events
if(async){
syncEvents[0] = hipEvent_t();
syncEvents[1] = hipEvent_t();
hipEventCreateWithFlags(&(syncEvents[0]), hipEventDisableTiming);
cudaCheckErrors("hipEventCreateWithFlags: syncEvents[0]");
hipEventCreateWithFlags(&(syncEvents[1]), hipEventDisableTiming);
cudaCheckErrors("hipEventCreateWithFlags: syncEvents[1]");
}
// pin host memory (this is slow but we only do it once to allow async transfers)
hipHostRegister(chunkValues[0], valueBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: chunkValues[0]");
hipHostRegister(resultValues[0], resultBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: resultValues[0]");
if(async){
hipHostRegister(chunkValues[1], valueBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: chunkValues[1]");
hipHostRegister(resultValues[1], resultBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: resultValues[1]");
}
// prepare Memory on device
hipMalloc((void **)&(d_Input[0]), valueBytes);
cudaCheckErrors("hipMalloc: d_Input");
hipMalloc((void **)&(d_Output[0]), resultBytes);
cudaCheckErrors("hipMalloc: d_Output");
hipMalloc((void **)&d_Kernel, kernelBytes);
cudaCheckErrors("hipMalloc: d_Kernel");
if(async){
hipMalloc((void **)&(d_Input[1]), valueBytes);
cudaCheckErrors("hipMalloc async: d_Input");
hipMalloc((void **)&(d_Output[1]), resultBytes);
cudaCheckErrors("hipMalloc async: d_Output");
}
// copy kernel to constant memory
//hipMemcpyToSymbol(c_Kernel, kernel, kernelBytes);
// copy kernel to global memory (as the size is not known at compilation time)
if(useStreams){
hipMemcpyAsync(d_Kernel, kernel, kernelBytes, hipMemcpyHostToDevice, streams[0]);
cudaCheckErrors("hipMemcpyAsync (streams): d_Kernel");
}else{
hipMemcpy(d_Kernel, kernel, kernelBytes, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy: d_Kernel");
}
}
// init the template classes as nvcc doesn't know the used types
template class GPUHandle<int16_t>;
template class GPUHandle<int32_t>;
| convolution.cu | /**
* @file convolution.cu
*
* @author simon.marcin
*
* @brief Messy convolution algorithm to test all possible designs and parameters to
* evaluate the best setup for GPU accelerated Operators.
* Can run synchronous copys or asynchronous, can run multiple streams or not.
* Writes a lot of debug messages.
*
*
*/
#include <stdio.h>
#include "convolution.h"
#include <iostream>
#include <float.h>
#define DSIZE 1024
#define DVAL 10
#define nTPB 256
#define cudaHostRegisterPortable 0x01
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
template <class type> __global__ void convolutionKernel(float* d_Output,type* d_Input, size_t chunckWidth,
int overlap, size_t kernelSize, float* d_Kernel, int repetitions)
{
// define shared memory
const int globalWidth = chunckWidth+(2*overlap);
//__shared__ float s_Data[34][34];
extern __shared__ float s_Data[];
// offset to overlaps (halo elements)
const int globalX = (blockIdx.x * ROWS_BLOCKDIM_X) + threadIdx.x;
const int globalY = (blockIdx.y * ROWS_BLOCKDIM_Y) + threadIdx.y;
// load inner chunk elements
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap] = d_Input[((globalY+overlap)*globalWidth)+globalX+overlap];
//printf("thread %d,%d, input=%d to=%d,%d\n", threadIdx.y,threadIdx.x, d_Input[((globalY+overlap)*globalWidth)+globalX+overlap], threadIdx.y+overlap,threadIdx.x+overlap);
// // load upper halo elements
if (threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[globalY*globalWidth + globalX+overlap];
}
//load bottom halo elements
if (threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+overlap];
}
// load left halo elements
if (threadIdx.x < overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+overlap)*globalWidth + globalX];
}
// load right halo elements
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY+overlap)*globalWidth + globalX+2*overlap];
}
// left upper edges
if (threadIdx.x < overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY)*globalWidth + globalX];
}
// right upper edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY)*globalWidth + globalX+2*overlap];
}
// left bottom edges
if (threadIdx.x < overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+2*overlap)*globalWidth + globalX];
}
// right bottom edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+2*overlap];
}
//Compute and store results
__syncthreads();
float sum = 0;
for (int reps=0;reps<repetitions;reps++) {
#pragma unroll
for (int j = -overlap; j <= overlap; j++){
#pragma unroll
for (int i = -overlap; i <= overlap; i++){
//sum += s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
sum += d_Kernel[ ((int)kernelSize/2) +(j*( 2*overlap+1 ))+i] * s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
}
}
}
d_Output[globalY*chunckWidth+globalX] = sum;
}
// init the template function as the CUDA compiler doesn't know anything about SciDB
template __global__ void convolutionKernel<int16_t>(float*,int16_t*, size_t,int, size_t, float*, int);
template __global__ void convolutionKernel<int32_t>(float*,int32_t*, size_t,int, size_t, float*, int);
template <class type> void GPUHandle<type>::copyChunkValues(int i){
// transfer data to device
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
cudaMemcpyAsync(d_Input[i], chunkValues[i], valueBytes, cudaMemcpyHostToDevice,(streams[i]));
cudaCheckErrors("cudaMemcpyAsync (async): chunkValues");
}else{
cudaMemcpyAsync(d_Input[0], chunkValues[0], valueBytes, cudaMemcpyHostToDevice,(streams[0]));
cudaCheckErrors("cudaMemcpyAsync (streams): chunkValues");
}
}else{
cudaMemcpy(d_Input[0], chunkValues[0], valueBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaCheckErrors("cudaMemcpy: chunkValues");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::waitEvent(int i){
cudaEventSynchronize(syncEvents[i]);
cudaCheckErrors("cudaEventSynchronize");
}
template <class type> void GPUHandle<type>::copyResultValues(int i){
// transfer data to host
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
cudaMemcpyAsync(resultValues[i], d_Output[i], resultBytes, cudaMemcpyDeviceToHost, (streams[i]));
// Inject a sync Event as we need to be sure that we have the result
cudaEventRecord(syncEvents[i],streams[i]);
cudaCheckErrors("cudaMemcpyAsync (async): d_Output");
}else{
cudaMemcpyAsync(resultValues[0], d_Output[0], resultBytes, cudaMemcpyDeviceToHost, (streams[0]));
cudaStreamSynchronize((streams[0]));
cudaCheckErrors("cudaMemcpyAsync (streams): d_Output");
}
}else{
cudaMemcpy(resultValues[0], d_Output[0], resultBytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy: d_Output");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::runConvolution(int i, int repetitions){
// define the kernel grid (2D)
dim3 blocks(max(chunckWidth / ROWS_BLOCKDIM_X,size_t(1)), max(chunckWidth / ROWS_BLOCKDIM_Y,size_t(1)));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
// run kernel
if(debug){LOG4CXX_INFO(logger, chunckWidth<< " "<<overlap<< " "<<kernelSize);}
if(useStreams){
if(async){
convolutionKernel<<<blocks, threads, smBytes, (streams[i])>>>(d_Output[i],d_Input[i],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}else{
convolutionKernel<<<blocks, threads, smBytes, (streams[0])>>>(d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}
}else{
if(debug){LOG4CXX_INFO(logger, "blocks: " << chunckWidth / ROWS_BLOCKDIM_X << "," << chunckWidth / ROWS_BLOCKDIM_Y << "," << smBytes);}
convolutionKernel<<<blocks, threads, smBytes>>>(d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaDeviceSynchronize();
cudaCheckErrors("convolutionRowsKernel()");
}
if(debug){LOG4CXX_INFO(logger, "kernel done!");}
}
template <class type> GPUHandle<type>::~GPUHandle(){
// unpin memory
cudaHostUnregister(chunkValues[0]);
cudaCheckErrors("cudaHostUnRegister: chunkValues");
cudaHostUnregister(resultValues[0]);
cudaCheckErrors("cudaHostUnRegister: resultValues");
if(async){
cudaHostUnregister(chunkValues[1]);
cudaCheckErrors("cudaHostUnRegister: chunkValues[1]");
cudaHostUnregister(resultValues[1]);
cudaCheckErrors("cudaHostUnRegister: resultValues[1]");
}
// free memory on device
cudaFree(d_Input[0]);
cudaCheckErrors("cudaFree: d_Input");
cudaFree(d_Output[0]);
cudaCheckErrors("cudaFree: d_Output");
cudaFree(d_Kernel);
cudaCheckErrors("cudaFree: d_Kernel");
if(async){
cudaFree(d_Input[1]);
cudaCheckErrors("cudaFree async: d_Input");
cudaFree(d_Output[1]);
cudaCheckErrors("cudaFree async: d_Output");
}
// destroy streams
if(useStreams){
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
}
// destroy sync events
if(async){
cudaEventDestroy(syncEvents[0]);
cudaCheckErrors("cudaEventDestroy: syncEvents[0]");
cudaEventDestroy(syncEvents[1]);
cudaCheckErrors("cudaEventDestroy: syncEvents[1]");
}
}
template <class type> GPUHandle<type>::GPUHandle(float* in_kernel, size_t in_kernelBytes,type* in_chunkValues[],
size_t in_valueBytes, float* in_resultValues[], size_t in_resultBytes,
log4cxx::LoggerPtr in_logger, size_t in_chunckWidth, size_t in_kernelSize, bool in_debug,
bool in_useStreams, bool in_async){
// save values
kernel = in_kernel;
kernelBytes = in_kernelBytes;
chunkValues[0] = in_chunkValues[0];
chunkValues[1] = in_chunkValues[1];
valueBytes = in_valueBytes;
resultValues[0] = in_resultValues[0];
resultValues[1] = in_resultValues[1];
resultBytes = in_resultBytes;
logger = in_logger;
overlap = (int) sqrt(in_kernelSize)/2;
kernelSize = in_kernelSize;
chunckWidth = in_chunckWidth;
smBytes = (ROWS_BLOCKDIM_X+2*overlap)*(ROWS_BLOCKDIM_Y+2*overlap)*sizeof(float);
debug = in_debug;
useStreams = in_useStreams;
async = in_async;
// create streams
if(useStreams){
streams[0] = cudaStream_t();
cudaStreamCreateWithFlags(&(streams[0]),cudaStreamNonBlocking);
cudaCheckErrors("cudaStreamCreateWithFlags: streams[0]");
streams[1] = cudaStream_t();
cudaStreamCreateWithFlags(&(streams[1]),cudaStreamNonBlocking);
cudaCheckErrors("cudaStreamCreateWithFlags: streams[1]");
}
// create sync events
if(async){
syncEvents[0] = cudaEvent_t();
syncEvents[1] = cudaEvent_t();
cudaEventCreateWithFlags(&(syncEvents[0]), cudaEventDisableTiming);
cudaCheckErrors("cudaEventCreateWithFlags: syncEvents[0]");
cudaEventCreateWithFlags(&(syncEvents[1]), cudaEventDisableTiming);
cudaCheckErrors("cudaEventCreateWithFlags: syncEvents[1]");
}
// pin host memory (this is slow but we only do it once to allow async transfers)
cudaHostRegister(chunkValues[0], valueBytes, cudaHostRegisterPortable);
cudaCheckErrors("cudaHostRegister: chunkValues[0]");
cudaHostRegister(resultValues[0], resultBytes, cudaHostRegisterPortable);
cudaCheckErrors("cudaHostRegister: resultValues[0]");
if(async){
cudaHostRegister(chunkValues[1], valueBytes, cudaHostRegisterPortable);
cudaCheckErrors("cudaHostRegister: chunkValues[1]");
cudaHostRegister(resultValues[1], resultBytes, cudaHostRegisterPortable);
cudaCheckErrors("cudaHostRegister: resultValues[1]");
}
// prepare Memory on device
cudaMalloc((void **)&(d_Input[0]), valueBytes);
cudaCheckErrors("cudaMalloc: d_Input");
cudaMalloc((void **)&(d_Output[0]), resultBytes);
cudaCheckErrors("cudaMalloc: d_Output");
cudaMalloc((void **)&d_Kernel, kernelBytes);
cudaCheckErrors("cudaMalloc: d_Kernel");
if(async){
cudaMalloc((void **)&(d_Input[1]), valueBytes);
cudaCheckErrors("cudaMalloc async: d_Input");
cudaMalloc((void **)&(d_Output[1]), resultBytes);
cudaCheckErrors("cudaMalloc async: d_Output");
}
// copy kernel to constant memory
//cudaMemcpyToSymbol(c_Kernel, kernel, kernelBytes);
// copy kernel to global memory (as the size is not known at compilation time)
if(useStreams){
cudaMemcpyAsync(d_Kernel, kernel, kernelBytes, cudaMemcpyHostToDevice, streams[0]);
cudaCheckErrors("cudaMemcpyAsync (streams): d_Kernel");
}else{
cudaMemcpy(d_Kernel, kernel, kernelBytes, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy: d_Kernel");
}
}
// init the template classes as nvcc doesn't know the used types
template class GPUHandle<int16_t>;
template class GPUHandle<int32_t>;
|
convolution.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file convolution.cu
*
* @author simon.marcin
*
* @brief Messy convolution algorithm to test all possible designs and parameters to
* evaluate the best setup for GPU accelerated Operators.
* Can run synchronous copys or asynchronous, can run multiple streams or not.
* Writes a lot of debug messages.
*
*
*/
#include <stdio.h>
#include "convolution.h"
#include <iostream>
#include <float.h>
#define DSIZE 1024
#define DVAL 10
#define nTPB 256
#define hipHostRegisterPortable 0x01
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
template <class type> __global__ void convolutionKernel(float* d_Output,type* d_Input, size_t chunckWidth,
int overlap, size_t kernelSize, float* d_Kernel, int repetitions)
{
// define shared memory
const int globalWidth = chunckWidth+(2*overlap);
//__shared__ float s_Data[34][34];
extern __shared__ float s_Data[];
// offset to overlaps (halo elements)
const int globalX = (blockIdx.x * ROWS_BLOCKDIM_X) + threadIdx.x;
const int globalY = (blockIdx.y * ROWS_BLOCKDIM_Y) + threadIdx.y;
// load inner chunk elements
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap] = d_Input[((globalY+overlap)*globalWidth)+globalX+overlap];
//printf("thread %d,%d, input=%d to=%d,%d\n", threadIdx.y,threadIdx.x, d_Input[((globalY+overlap)*globalWidth)+globalX+overlap], threadIdx.y+overlap,threadIdx.x+overlap);
// // load upper halo elements
if (threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[globalY*globalWidth + globalX+overlap];
}
//load bottom halo elements
if (threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+overlap];
}
// load left halo elements
if (threadIdx.x < overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+overlap)*globalWidth + globalX];
}
// load right halo elements
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap){
s_Data[(threadIdx.y+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY+overlap)*globalWidth + globalX+2*overlap];
}
// left upper edges
if (threadIdx.x < overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY)*globalWidth + globalX];
}
// right upper edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y < overlap){
s_Data[(threadIdx.y)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+2*overlap]=d_Input[(globalY)*globalWidth + globalX+2*overlap];
}
// left bottom edges
if (threadIdx.x < overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x]=d_Input[(globalY+2*overlap)*globalWidth + globalX];
}
// right bottom edges
if (threadIdx.x >= ROWS_BLOCKDIM_X-overlap and threadIdx.y >= ROWS_BLOCKDIM_Y-overlap){
s_Data[(threadIdx.y+2*overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+overlap]=d_Input[(globalY+2*overlap)*globalWidth + globalX+2*overlap];
}
//Compute and store results
__syncthreads();
float sum = 0;
for (int reps=0;reps<repetitions;reps++) {
#pragma unroll
for (int j = -overlap; j <= overlap; j++){
#pragma unroll
for (int i = -overlap; i <= overlap; i++){
//sum += s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
sum += d_Kernel[ ((int)kernelSize/2) +(j*( 2*overlap+1 ))+i] * s_Data[(threadIdx.y+j+overlap)*(ROWS_BLOCKDIM_X+2*overlap) + threadIdx.x+i+overlap];
}
}
}
d_Output[globalY*chunckWidth+globalX] = sum;
}
// init the template function as the CUDA compiler doesn't know anything about SciDB
template __global__ void convolutionKernel<int16_t>(float*,int16_t*, size_t,int, size_t, float*, int);
template __global__ void convolutionKernel<int32_t>(float*,int32_t*, size_t,int, size_t, float*, int);
template <class type> void GPUHandle<type>::copyChunkValues(int i){
// transfer data to device
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
hipMemcpyAsync(d_Input[i], chunkValues[i], valueBytes, hipMemcpyHostToDevice,(streams[i]));
cudaCheckErrors("hipMemcpyAsync (async): chunkValues");
}else{
hipMemcpyAsync(d_Input[0], chunkValues[0], valueBytes, hipMemcpyHostToDevice,(streams[0]));
cudaCheckErrors("hipMemcpyAsync (streams): chunkValues");
}
}else{
hipMemcpy(d_Input[0], chunkValues[0], valueBytes, hipMemcpyHostToDevice);
hipDeviceSynchronize();
cudaCheckErrors("hipMemcpy: chunkValues");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::waitEvent(int i){
hipEventSynchronize(syncEvents[i]);
cudaCheckErrors("hipEventSynchronize");
}
template <class type> void GPUHandle<type>::copyResultValues(int i){
// transfer data to host
if(debug){LOG4CXX_INFO(logger, "start memory transfer!");}
if(useStreams){
if(async){
hipMemcpyAsync(resultValues[i], d_Output[i], resultBytes, hipMemcpyDeviceToHost, (streams[i]));
// Inject a sync Event as we need to be sure that we have the result
hipEventRecord(syncEvents[i],streams[i]);
cudaCheckErrors("hipMemcpyAsync (async): d_Output");
}else{
hipMemcpyAsync(resultValues[0], d_Output[0], resultBytes, hipMemcpyDeviceToHost, (streams[0]));
hipStreamSynchronize((streams[0]));
cudaCheckErrors("hipMemcpyAsync (streams): d_Output");
}
}else{
hipMemcpy(resultValues[0], d_Output[0], resultBytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy: d_Output");
}
if(debug){LOG4CXX_INFO(logger, "memory transfer done!");}
}
template <class type> void GPUHandle<type>::runConvolution(int i, int repetitions){
// define the kernel grid (2D)
dim3 blocks(max(chunckWidth / ROWS_BLOCKDIM_X,size_t(1)), max(chunckWidth / ROWS_BLOCKDIM_Y,size_t(1)));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
// run kernel
if(debug){LOG4CXX_INFO(logger, chunckWidth<< " "<<overlap<< " "<<kernelSize);}
if(useStreams){
if(async){
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, (streams[i]), d_Output[i],d_Input[i],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}else{
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, (streams[0]), d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
cudaCheckErrors("convolutionRowsKernel() (streams)");
}
}else{
if(debug){LOG4CXX_INFO(logger, "blocks: " << chunckWidth / ROWS_BLOCKDIM_X << "," << chunckWidth / ROWS_BLOCKDIM_Y << "," << smBytes);}
hipLaunchKernelGGL(( convolutionKernel), dim3(blocks), dim3(threads), smBytes, 0, d_Output[0],d_Input[0],chunckWidth,overlap,kernelSize, d_Kernel, repetitions);
hipDeviceSynchronize();
cudaCheckErrors("convolutionRowsKernel()");
}
if(debug){LOG4CXX_INFO(logger, "kernel done!");}
}
template <class type> GPUHandle<type>::~GPUHandle(){
// unpin memory
hipHostUnregister(chunkValues[0]);
cudaCheckErrors("cudaHostUnRegister: chunkValues");
hipHostUnregister(resultValues[0]);
cudaCheckErrors("cudaHostUnRegister: resultValues");
if(async){
hipHostUnregister(chunkValues[1]);
cudaCheckErrors("cudaHostUnRegister: chunkValues[1]");
hipHostUnregister(resultValues[1]);
cudaCheckErrors("cudaHostUnRegister: resultValues[1]");
}
// free memory on device
hipFree(d_Input[0]);
cudaCheckErrors("hipFree: d_Input");
hipFree(d_Output[0]);
cudaCheckErrors("hipFree: d_Output");
hipFree(d_Kernel);
cudaCheckErrors("hipFree: d_Kernel");
if(async){
hipFree(d_Input[1]);
cudaCheckErrors("hipFree async: d_Input");
hipFree(d_Output[1]);
cudaCheckErrors("hipFree async: d_Output");
}
// destroy streams
if(useStreams){
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
}
// destroy sync events
if(async){
hipEventDestroy(syncEvents[0]);
cudaCheckErrors("hipEventDestroy: syncEvents[0]");
hipEventDestroy(syncEvents[1]);
cudaCheckErrors("hipEventDestroy: syncEvents[1]");
}
}
template <class type> GPUHandle<type>::GPUHandle(float* in_kernel, size_t in_kernelBytes,type* in_chunkValues[],
size_t in_valueBytes, float* in_resultValues[], size_t in_resultBytes,
log4cxx::LoggerPtr in_logger, size_t in_chunckWidth, size_t in_kernelSize, bool in_debug,
bool in_useStreams, bool in_async){
// save values
kernel = in_kernel;
kernelBytes = in_kernelBytes;
chunkValues[0] = in_chunkValues[0];
chunkValues[1] = in_chunkValues[1];
valueBytes = in_valueBytes;
resultValues[0] = in_resultValues[0];
resultValues[1] = in_resultValues[1];
resultBytes = in_resultBytes;
logger = in_logger;
overlap = (int) sqrt(in_kernelSize)/2;
kernelSize = in_kernelSize;
chunckWidth = in_chunckWidth;
smBytes = (ROWS_BLOCKDIM_X+2*overlap)*(ROWS_BLOCKDIM_Y+2*overlap)*sizeof(float);
debug = in_debug;
useStreams = in_useStreams;
async = in_async;
// create streams
if(useStreams){
streams[0] = hipStream_t();
hipStreamCreateWithFlags(&(streams[0]),hipStreamNonBlocking);
cudaCheckErrors("hipStreamCreateWithFlags: streams[0]");
streams[1] = hipStream_t();
hipStreamCreateWithFlags(&(streams[1]),hipStreamNonBlocking);
cudaCheckErrors("hipStreamCreateWithFlags: streams[1]");
}
// create sync events
if(async){
syncEvents[0] = hipEvent_t();
syncEvents[1] = hipEvent_t();
hipEventCreateWithFlags(&(syncEvents[0]), hipEventDisableTiming);
cudaCheckErrors("hipEventCreateWithFlags: syncEvents[0]");
hipEventCreateWithFlags(&(syncEvents[1]), hipEventDisableTiming);
cudaCheckErrors("hipEventCreateWithFlags: syncEvents[1]");
}
// pin host memory (this is slow but we only do it once to allow async transfers)
hipHostRegister(chunkValues[0], valueBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: chunkValues[0]");
hipHostRegister(resultValues[0], resultBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: resultValues[0]");
if(async){
hipHostRegister(chunkValues[1], valueBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: chunkValues[1]");
hipHostRegister(resultValues[1], resultBytes, hipHostRegisterPortable);
cudaCheckErrors("hipHostRegister: resultValues[1]");
}
// prepare Memory on device
hipMalloc((void **)&(d_Input[0]), valueBytes);
cudaCheckErrors("hipMalloc: d_Input");
hipMalloc((void **)&(d_Output[0]), resultBytes);
cudaCheckErrors("hipMalloc: d_Output");
hipMalloc((void **)&d_Kernel, kernelBytes);
cudaCheckErrors("hipMalloc: d_Kernel");
if(async){
hipMalloc((void **)&(d_Input[1]), valueBytes);
cudaCheckErrors("hipMalloc async: d_Input");
hipMalloc((void **)&(d_Output[1]), resultBytes);
cudaCheckErrors("hipMalloc async: d_Output");
}
// copy kernel to constant memory
//hipMemcpyToSymbol(c_Kernel, kernel, kernelBytes);
// copy kernel to global memory (as the size is not known at compilation time)
if(useStreams){
hipMemcpyAsync(d_Kernel, kernel, kernelBytes, hipMemcpyHostToDevice, streams[0]);
cudaCheckErrors("hipMemcpyAsync (streams): d_Kernel");
}else{
hipMemcpy(d_Kernel, kernel, kernelBytes, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy: d_Kernel");
}
}
// init the template classes as nvcc doesn't know the used types
template class GPUHandle<int16_t>;
template class GPUHandle<int32_t>;
| convolution.cuh | #include "common.h"
typedef float FLOAT;
template <typename T>
__global__ void global_conv(T *dev_x,T *dev_z,T *dev_kernel,int height,int width,int kernel_size)
{
/**block 是计算的基础单元 <<<2,256>>>*/
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if(idx>=height*width) return; // 超出边界
// 根据idx 反算出image的行与列
int row=idx/width;
int col=idx%width;
FLOAT img_value=0;
int cur_row=0;
int cur_col=0;
for(int i=0;i<kernel_size;++i)
{
for(int j=0;j<kernel_size;++j)
{
// 找到卷积核左上角的对应的像素坐标
cur_row=row-kernel_size/2+i;
cur_col=col-kernel_size/2+j;
if(cur_row<0 || cur_col<0 || cur_row>=height || cur_col>=width)
{
img_value=0;
}
else
{
// 反算对应的全局坐标
img_value=dev_x[cur_row*width+cur_col];
}
dev_z[idx]+=img_value*dev_kernel[j+i*kernel_size]; // 与对应的卷积核上的值相乘
}
}
}
// 使用共享内存 (推荐)
template <int SIZE>
__global__ void shared_conv(FLOAT *dev_x,FLOAT *dev_z,FLOAT *dev_kernel,int height,int width,int kernel_size)
{
__shared__ FLOAT imgData[SIZE];
__shared__ FLOAT wData[SIZE];
__shared__ FLOAT outData[1]; // 每个block只保留累加的结果,即一个值
/**block 是计算的基础单元 <<<2,256>>>*/
// int idx = threadIdx.x+blockDim.x*blockIdx.x;
int tid = threadIdx.x;
int bid = blockIdx.x;
// if(bid>=height*width) return; // 超出边界
// 根据bid 反算出image的行与列
int row=bid/width;
int col=bid%width;
FLOAT img_value=0;
int cur_row=0;
int cur_col=0;
// 全局内存--->共享内存
for(int i=0;i<kernel_size;++i)
{
for(int j=0;j<kernel_size;++j)
{
// 找到卷积核左上角的对应的像素坐标
cur_row=row-kernel_size/2+i;
cur_col=col-kernel_size/2+j;
if(cur_row<0 || cur_col<0 || cur_row>=height || cur_col>=width)
{
img_value=0;
}
else
{
// 反算对应的全局坐标
img_value=dev_x[cur_row*width+cur_col];
}
imgData[j+i*kernel_size]=img_value;
wData[j+i*kernel_size]=dev_kernel[j+i*kernel_size];
}
}
/*
if (tid==0)
{
for(int i=0;i<SIZE;++i)
{
// atomicAdd(&outData[0],imgData[i]*wData[i]);
outData[0]+=imgData[i]*wData[i]; // 一个线程做,不会有冲突
}
}
//*/
atomicAdd(&outData[0],imgData[tid]*wData[tid]); // block内每个线程同时做,在将结果相加,atomicAdd避免线程冲突
if (tid==0)
// 共享内存到全局内存
dev_z[bid] = outData[0];
}
// 卷积转成矩阵乘法 每次处理一块 大小 32x9
template <int SIZE1,int SIZE2>
__global__ void shared_conv2(FLOAT *dev_x,FLOAT *dev_z,FLOAT *dev_kernel,int height,int width,int kernel_size)
{
__shared__ FLOAT imgData[SIZE1][SIZE2]; // 32x9
__shared__ FLOAT wData[SIZE1][SIZE2];
__shared__ FLOAT outData[SIZE1];
// if(bid>=height*width) return; // 超出边界
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int bid = bx + by*gridDim.x;
// 根据bid 反算出image的行与列
int row=bid/width;
int col=bid%width;
FLOAT img_value=0;
int cur_row=0;
int cur_col=0;
// 全局内存--->共享内存
for(int i=0;i<kernel_size;++i)
{
for(int j=0;j<kernel_size;++j)
{
// 找到卷积核左上角的对应的像素坐标
cur_row=row-kernel_size/2+i;
cur_col=col-kernel_size/2+j;
if(cur_row<0 || cur_col<0 || cur_row>=height || cur_col>=width)
{
img_value=0;
}
else
{
// 反算对应的全局坐标
img_value=dev_x[cur_row*width+cur_col];
}
imgData[ty][j+i*kernel_size]=img_value;
wData[ty][j+i*kernel_size]=dev_kernel[j+i*kernel_size];
}
}
FLOAT csum=0;
for(int i=0;i<SIZE2;++i) // 单线程
outData[ty]+=imgData[ty][i]*wData[ty][i]; // 同一个线程中执行 不会有冲突
// or
// csum+=imgData[ty][i]*wData[ty][i];
/*
// 多个线程同时执行会有冲突
// csum+=imgData[ty][tx]*wData[ty][tx];
// outData[ty]+=imgData[ty][tx]*wData[ty][tx];
// 使用原子加解决冲突
atomicAdd(&outData[ty],imgData[ty][tx]*wData[ty][tx]); //使用了原子变量速度反而更慢
*/
// 共享内存到全局内存
dev_z[bid] = outData[ty];
// dev_z[bid] = csum;
} |
ae87a72e9164b0cbe532a77dff169f8950c365e5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
void initialize (int N, float *a, float *b, float *c){
for (int i = 0; i < N; i++){
if (i < N){
c[i] = 0;
a[i] = 1 + i;
b[i] = 1 - i;
}
}
}
void addVectors (int N, float *a, float *b, float *c){
for (int i = 0; i < N; i++){
if (i < N){
c[i] = a[i] + b[i];
}
}
}
int main (int argc, char **argv){
if (argc != 2) exit (1);
int N = atoi(argv[1]);
float *a, *b, *c;
a = (float *) malloc(N*sizeof(float));
b = (float *) malloc(N*sizeof(float));
c = (float *) malloc(N*sizeof(float));
initialize(N,a,b,c);
addVectors(N,a,b,c);
for (int i = 0; i < 5; i++) {
printf("%f\n", c[i]);
}
free(a);
free(b);
free(c);
}
| ae87a72e9164b0cbe532a77dff169f8950c365e5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
void initialize (int N, float *a, float *b, float *c){
for (int i = 0; i < N; i++){
if (i < N){
c[i] = 0;
a[i] = 1 + i;
b[i] = 1 - i;
}
}
}
void addVectors (int N, float *a, float *b, float *c){
for (int i = 0; i < N; i++){
if (i < N){
c[i] = a[i] + b[i];
}
}
}
int main (int argc, char **argv){
if (argc != 2) exit (1);
int N = atoi(argv[1]);
float *a, *b, *c;
a = (float *) malloc(N*sizeof(float));
b = (float *) malloc(N*sizeof(float));
c = (float *) malloc(N*sizeof(float));
initialize(N,a,b,c);
addVectors(N,a,b,c);
for (int i = 0; i < 5; i++) {
printf("%f\n", c[i]);
}
free(a);
free(b);
free(c);
}
|
46e1a92367d11f5f8b23ddaa0b6a02870fcb4a10.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/flash_attn/flash_fwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_fwd_<cutlass::half_t, 224>(Flash_fwd_params ¶ms, hipStream_t stream) {
run_mha_fwd_hdim224<cutlass::half_t>(params, stream);
}
} // namespace pytorch_flash
| 46e1a92367d11f5f8b23ddaa0b6a02870fcb4a10.cu |
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/flash_attn/flash_fwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_fwd_<cutlass::half_t, 224>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim224<cutlass::half_t>(params, stream);
}
} // namespace pytorch_flash
|
66825c6332c1d0a32697fdd367c8557cd78e3c77.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <algorithm>
#include <hip/hip_runtime.h>
#include "rroi.h"
#include "rotate_rect_ops.h"
#include "cuda_utils.h"
template <typename T>
__device__ inline void get_rotated_rect_bounding_box(const T* pts, int& leftMost, int& topMost,
int& rightMost, int& bottomMost, const int width, const int height)
{
// const T* P = pts;
// leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0));
// rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),width-1.0));
// topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0));
// bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),height-1.0));
leftMost = int(max(min(min(pts[0], pts[2]), min(pts[4], pts[6])), 0.0));
topMost = int(max(min(min(pts[1], pts[3]), min(pts[5], pts[7])), 0.0));
rightMost = int(min(max(max(pts[0], pts[2]), max(pts[4], pts[6])) + 1, width - 1.0));
bottomMost = int(min(max(max(pts[1], pts[3]), max(pts[5], pts[7])) + 1, height - 1.0));
}
template <typename T>
__global__ void RRoIPoolFForward(const int nthreads, const T* bottom_data,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data//, int* argmax_data
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
T P[8];
compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw);
int leftMost, topMost, rightMost, bottomMost;
get_rotated_rect_bounding_box(P, leftMost, topMost, rightMost, bottomMost, width, height);
T maxval = 0;
int maxidx = -1;
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
T AB[2];
AB[0] = P[0] - P[2];
AB[1] = P[1] - P[3];
T ABAB = AB[0]*AB[0] +AB[1]*AB[1];
T AC[2];
AC[0] = P[4] - P[2];
AC[1] = P[5] - P[3];
T ACAC = AC[0]*AC[0] + AC[1]*AC[1];
for (int hh = topMost; hh < bottomMost+1; ++hh) {
for (int ww = leftMost; ww < rightMost+1; ++ww) {
T AP[2];
AP[0] = ww - P[2];
AP[1] = hh - P[3];
T ABAP = AB[0]*AP[0] + AB[1]*AP[1];
T ACAP = AC[0]*AP[0] + AC[1]*AP[1];
if ( ABAP >= 1e-3 && (ABAB - ABAP) > -1e-3 && ACAP >= 1e-3 && (ACAC - ACAP) > -1e-3 )
{
int bottom_index = hh * width + ww;
if (offset_bottom_data[bottom_index] > maxval)
{
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
}
top_data[index] = maxval;
// argmax_data[index] = maxidx;
}
}
void RROIPool_forward_golden(
int batch_size,
int num_rois,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
float spatial_scale,
float* bottom_data_d,
float* rois_d,
float* top_data_d,
hipStream_t stream)
{
auto top_data_size = num_rois * channels * pooled_height * pooled_width;
dim3 grid(::min(static_cast<long>(::ceil(top_data_size * 1.0 / 512L)), 4096L));
dim3 block(512);
hipLaunchKernelGGL(( RRoIPoolFForward<float>), dim3(grid), dim3(block), 0, stream,
// output_size,
// input.contiguous().data<float>(),
// spatial_scale,
// channels,
// height,
// width,
// pooled_height,
// pooled_width,
// rois.contiguous().data<float>(),
// output.data<float>(),
// argmax.data<int>()
top_data_size,
bottom_data_d,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_d,
top_data_d
);
}
| 66825c6332c1d0a32697fdd367c8557cd78e3c77.cu | #include <cstdio>
#include <algorithm>
#include <cuda.h>
#include "rroi.h"
#include "rotate_rect_ops.h"
#include "cuda_utils.h"
template <typename T>
__device__ inline void get_rotated_rect_bounding_box(const T* pts, int& leftMost, int& topMost,
int& rightMost, int& bottomMost, const int width, const int height)
{
// const T* P = pts;
// leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0));
// rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),width-1.0));
// topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0));
// bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),height-1.0));
leftMost = int(max(min(min(pts[0], pts[2]), min(pts[4], pts[6])), 0.0));
topMost = int(max(min(min(pts[1], pts[3]), min(pts[5], pts[7])), 0.0));
rightMost = int(min(max(max(pts[0], pts[2]), max(pts[4], pts[6])) + 1, width - 1.0));
bottomMost = int(min(max(max(pts[1], pts[3]), max(pts[5], pts[7])) + 1, height - 1.0));
}
template <typename T>
__global__ void RRoIPoolFForward(const int nthreads, const T* bottom_data,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data//, int* argmax_data
) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
T P[8];
compute_roi_pool_pts(offset_bottom_rois, P, spatial_scale, pooled_height, pooled_width, ph, pw);
int leftMost, topMost, rightMost, bottomMost;
get_rotated_rect_bounding_box(P, leftMost, topMost, rightMost, bottomMost, width, height);
T maxval = 0;
int maxidx = -1;
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
T AB[2];
AB[0] = P[0] - P[2];
AB[1] = P[1] - P[3];
T ABAB = AB[0]*AB[0] +AB[1]*AB[1];
T AC[2];
AC[0] = P[4] - P[2];
AC[1] = P[5] - P[3];
T ACAC = AC[0]*AC[0] + AC[1]*AC[1];
for (int hh = topMost; hh < bottomMost+1; ++hh) {
for (int ww = leftMost; ww < rightMost+1; ++ww) {
T AP[2];
AP[0] = ww - P[2];
AP[1] = hh - P[3];
T ABAP = AB[0]*AP[0] + AB[1]*AP[1];
T ACAP = AC[0]*AP[0] + AC[1]*AP[1];
if ( ABAP >= 1e-3 && (ABAB - ABAP) > -1e-3 && ACAP >= 1e-3 && (ACAC - ACAP) > -1e-3 )
{
int bottom_index = hh * width + ww;
if (offset_bottom_data[bottom_index] > maxval)
{
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
}
top_data[index] = maxval;
// argmax_data[index] = maxidx;
}
}
void RROIPool_forward_golden(
int batch_size,
int num_rois,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
float spatial_scale,
float* bottom_data_d,
float* rois_d,
float* top_data_d,
cudaStream_t stream)
{
auto top_data_size = num_rois * channels * pooled_height * pooled_width;
dim3 grid(std::min(static_cast<long>(std::ceil(top_data_size * 1.0 / 512L)), 4096L));
dim3 block(512);
RRoIPoolFForward<float><<<grid, block, 0, stream>>>(
// output_size,
// input.contiguous().data<float>(),
// spatial_scale,
// channels,
// height,
// width,
// pooled_height,
// pooled_width,
// rois.contiguous().data<float>(),
// output.data<float>(),
// argmax.data<int>()
top_data_size,
bottom_data_d,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_d,
top_data_d
);
}
|
afb3d6030077c13201642699584885be9c871c65.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include "aes.h"
__device__ unsigned char dob[256] =
{
0x00,0x02,0x04,0x06,0x08,0x0a,0x0c,0x0e,0x10,0x12,0x14,0x16,0x18,0x1a,0x1c,0x1e,
0x20,0x22,0x24,0x26,0x28,0x2a,0x2c,0x2e,0x30,0x32,0x34,0x36,0x38,0x3a,0x3c,0x3e,
0x40,0x42,0x44,0x46,0x48,0x4a,0x4c,0x4e,0x50,0x52,0x54,0x56,0x58,0x5a,0x5c,0x5e,
0x60,0x62,0x64,0x66,0x68,0x6a,0x6c,0x6e,0x70,0x72,0x74,0x76,0x78,0x7a,0x7c,0x7e,
0x80,0x82,0x84,0x86,0x88,0x8a,0x8c,0x8e,0x90,0x92,0x94,0x96,0x98,0x9a,0x9c,0x9e,
0xa0,0xa2,0xa4,0xa6,0xa8,0xaa,0xac,0xae,0xb0,0xb2,0xb4,0xb6,0xb8,0xba,0xbc,0xbe,
0xc0,0xc2,0xc4,0xc6,0xc8,0xca,0xcc,0xce,0xd0,0xd2,0xd4,0xd6,0xd8,0xda,0xdc,0xde,
0xe0,0xe2,0xe4,0xe6,0xe8,0xea,0xec,0xee,0xf0,0xf2,0xf4,0xf6,0xf8,0xfa,0xfc,0xfe,
0x1b,0x19,0x1f,0x1d,0x13,0x11,0x17,0x15,0x0b,0x09,0x0f,0x0d,0x03,0x01,0x07,0x05,
0x3b,0x39,0x3f,0x3d,0x33,0x31,0x37,0x35,0x2b,0x29,0x2f,0x2d,0x23,0x21,0x27,0x25,
0x5b,0x59,0x5f,0x5d,0x53,0x51,0x57,0x55,0x4b,0x49,0x4f,0x4d,0x43,0x41,0x47,0x45,
0x7b,0x79,0x7f,0x7d,0x73,0x71,0x77,0x75,0x6b,0x69,0x6f,0x6d,0x63,0x61,0x67,0x65,
0x9b,0x99,0x9f,0x9d,0x93,0x91,0x97,0x95,0x8b,0x89,0x8f,0x8d,0x83,0x81,0x87,0x85,
0xbb,0xb9,0xbf,0xbd,0xb3,0xb1,0xb7,0xb5,0xab,0xa9,0xaf,0xad,0xa3,0xa1,0xa7,0xa5,
0xdb,0xd9,0xdf,0xdd,0xd3,0xd1,0xd7,0xd5,0xcb,0xc9,0xcf,0xcd,0xc3,0xc1,0xc7,0xc5,
0xfb,0xf9,0xff,0xfd,0xf3,0xf1,0xf7,0xf5,0xeb,0xe9,0xef,0xed,0xe3,0xe1,0xe7,0xe5
};
__device__ unsigned char triple[256] ={
0x00,0x03,0x06,0x05,0x0c,0x0f,0x0a,0x09,0x18,0x1b,0x1e,0x1d,0x14,0x17,0x12,0x11,
0x30,0x33,0x36,0x35,0x3c,0x3f,0x3a,0x39,0x28,0x2b,0x2e,0x2d,0x24,0x27,0x22,0x21,
0x60,0x63,0x66,0x65,0x6c,0x6f,0x6a,0x69,0x78,0x7b,0x7e,0x7d,0x74,0x77,0x72,0x71,
0x50,0x53,0x56,0x55,0x5c,0x5f,0x5a,0x59,0x48,0x4b,0x4e,0x4d,0x44,0x47,0x42,0x41,
0xc0,0xc3,0xc6,0xc5,0xcc,0xcf,0xca,0xc9,0xd8,0xdb,0xde,0xdd,0xd4,0xd7,0xd2,0xd1,
0xf0,0xf3,0xf6,0xf5,0xfc,0xff,0xfa,0xf9,0xe8,0xeb,0xee,0xed,0xe4,0xe7,0xe2,0xe1,
0xa0,0xa3,0xa6,0xa5,0xac,0xaf,0xaa,0xa9,0xb8,0xbb,0xbe,0xbd,0xb4,0xb7,0xb2,0xb1,
0x90,0x93,0x96,0x95,0x9c,0x9f,0x9a,0x99,0x88,0x8b,0x8e,0x8d,0x84,0x87,0x82,0x81,
0x9b,0x98,0x9d,0x9e,0x97,0x94,0x91,0x92,0x83,0x80,0x85,0x86,0x8f,0x8c,0x89,0x8a,
0xab,0xa8,0xad,0xae,0xa7,0xa4,0xa1,0xa2,0xb3,0xb0,0xb5,0xb6,0xbf,0xbc,0xb9,0xba,
0xfb,0xf8,0xfd,0xfe,0xf7,0xf4,0xf1,0xf2,0xe3,0xe0,0xe5,0xe6,0xef,0xec,0xe9,0xea,
0xcb,0xc8,0xcd,0xce,0xc7,0xc4,0xc1,0xc2,0xd3,0xd0,0xd5,0xd6,0xdf,0xdc,0xd9,0xda,
0x5b,0x58,0x5d,0x5e,0x57,0x54,0x51,0x52,0x43,0x40,0x45,0x46,0x4f,0x4c,0x49,0x4a,
0x6b,0x68,0x6d,0x6e,0x67,0x64,0x61,0x62,0x73,0x70,0x75,0x76,0x7f,0x7c,0x79,0x7a,
0x3b,0x38,0x3d,0x3e,0x37,0x34,0x31,0x32,0x23,0x20,0x25,0x26,0x2f,0x2c,0x29,0x2a,
0x0b,0x08,0x0d,0x0e,0x07,0x04,0x01,0x02,0x13,0x10,0x15,0x16,0x1f,0x1c,0x19,0x1a
};
__device__ unsigned char IV[16]={
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0
};
__device__ void print_bytes(unsigned char* str, int len){
for(int i=0;i<len;i++)
printf("%02x",str[i]);
printf("\n");
}
// encription function (the kernel)
__global__ void aes_encript(unsigned char *mat, unsigned char *result, unsigned char *sbox, unsigned char *keys, unsigned int width){
int id = (blockDim.x * blockIdx.x + threadIdx.x) * 16;
__shared__ unsigned char shared_sbox[256]; // the s-box matrix in shared memory
__shared__ unsigned char shared_keys[176]; // the sub-keys in shared memory
if (threadIdx.x == 0){
for (int i = 0; i != 256; ++i){
shared_sbox[i] = sbox[i];
if (i < 176) shared_keys[i] = keys[i];
}
}
__syncthreads(); // barrier
short idx, c = 0;
unsigned char iv_new[16];
unsigned char temp[16];
#pragma unroll
for (int i = 0; i < 16; ++i){
temp[i] = mat[id + i];
iv_new[i]=IV[i];;
}
// build new initialization vector
for (idx = 15; idx >= 0; idx--){
short shift = (16 - (idx + 1)) * 8;
unsigned char op1 = IV[idx];
unsigned char op2 = ((id &(0xff << shift)) >> shift);
iv_new[idx] = op1 + op2 + c;
c = (iv_new[idx] > op1 && iv_new[idx] > op2) ? 0 : 1;
}
// AES algorithm
xor_key(iv_new, shared_keys, 0);
#pragma unroll
for(int i = 1; i < 10; i++){
byte_sub(iv_new,shared_sbox);
shift_rows(iv_new);
mix_columns(iv_new);
xor_key(iv_new, shared_keys, i);
}
byte_sub(iv_new, shared_sbox);
shift_rows(iv_new);
xor_key(iv_new, shared_keys, 10);
//XOR with plain block
#pragma unroll
for (int i = 0; i < 16; ++i){
unsigned char res = iv_new[i] ^ temp[i];
iv_new[i] = res;
}
#pragma unroll
for (int i = 0; i < 16; ++i)
result[id + i] = iv_new[i];
result[width] = 0x0;
}
// Key Addition Kernel
__device__ void xor_key(unsigned char *mat, unsigned char *key, const unsigned int &round){
#pragma unroll
for (int i = 0; i < 16; ++i)
mat[i] ^= key[(16 * round) + i];
}
// byte substitution (S-Boxes)
__device__ void byte_sub(unsigned char *mat,unsigned char* s_sbox){
#pragma unroll
for (int i = 0; i < 16; ++i)
mat[i] = s_sbox[mat[i]];
}
// Shift rows
__device__ void shift_rows(unsigned char *mat){
int k,tmp;
#pragma unroll
for(int i = 1; i < 4; i++){
for(int j = 0; j < i; j++){
tmp = mat[i*4];
for(k = 0; k < 3; k++)
mat[i*4+k] = mat[i*4+k+1];
mat[i*4+k] = tmp;
}
}
}
// Mix column
__device__ void mix_columns(unsigned char* text_mat){
unsigned char b0, b1, b2, b3;
unsigned char result[16];
int i;
#pragma unroll
for (i = 0; i < 4; i ++){
b0 = text_mat[i];
b1 = text_mat[i + 4];
b2 = text_mat[i + 8];
b3 = text_mat[i + 12];
result[i] = dob[b0] ^ triple[b1] ^ b2 ^ b3;
result[i + 4] = b0 ^ dob[b1] ^ triple[b2] ^ b3;
result[i + 8] = b0 ^ b1 ^ dob[b2] ^ triple[b3];
result[i + 12] = triple[b0] ^ b1 ^ b2 ^ dob[b3];
}
#pragma unroll
for(i = 0; i < 16; i++)
text_mat[i]=result[i];
}
void build_subkeys(unsigned char* key,unsigned char* sub_keys, int totsize,int rounds){
for(int i = 0; i < rounds; i++){
if(i == 0){
for(int j = 0; j < totsize; j++){
sub_keys[j] = (key[j] == 0x0)?0x0:key[j];
}
}else{
sub_keys[i*totsize] = (sub_keys[(i-1)*totsize]^(get_sbox_value(sub_keys[(i-1)*totsize+13]) ^rc[i-1]));
sub_keys[i*totsize+1] = (sub_keys[(i-1)*totsize+1]^get_sbox_value(sub_keys[(i-1)*totsize+14]));
sub_keys[i*totsize+2] = (sub_keys[(i-1)*totsize+2]^get_sbox_value(sub_keys[(i-1)*totsize+15]));
sub_keys[i*totsize+3] = (sub_keys[(i-1)*totsize+3]^get_sbox_value(sub_keys[(i-1)*totsize+12]));
for(int j = 4; j < totsize; j+=4){
sub_keys[i*totsize+j] = (sub_keys[(i*totsize)+j-4]^sub_keys[((i-1)*totsize)+j]);
sub_keys[i*totsize+(j+1)] = (sub_keys[(i*totsize)+j-3]^sub_keys[((i-1)*totsize)+j+1]);
sub_keys[i*totsize+(j+2)] = (sub_keys[(i*totsize)+j-2]^sub_keys[((i-1)*totsize)+j+2]);
sub_keys[i*totsize+(j+3)] = (sub_keys[(i*totsize)+j-1]^sub_keys[((i-1)*totsize)+j+3]);
}
}
}
}
| afb3d6030077c13201642699584885be9c871c65.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include "aes.h"
__device__ unsigned char dob[256] =
{
0x00,0x02,0x04,0x06,0x08,0x0a,0x0c,0x0e,0x10,0x12,0x14,0x16,0x18,0x1a,0x1c,0x1e,
0x20,0x22,0x24,0x26,0x28,0x2a,0x2c,0x2e,0x30,0x32,0x34,0x36,0x38,0x3a,0x3c,0x3e,
0x40,0x42,0x44,0x46,0x48,0x4a,0x4c,0x4e,0x50,0x52,0x54,0x56,0x58,0x5a,0x5c,0x5e,
0x60,0x62,0x64,0x66,0x68,0x6a,0x6c,0x6e,0x70,0x72,0x74,0x76,0x78,0x7a,0x7c,0x7e,
0x80,0x82,0x84,0x86,0x88,0x8a,0x8c,0x8e,0x90,0x92,0x94,0x96,0x98,0x9a,0x9c,0x9e,
0xa0,0xa2,0xa4,0xa6,0xa8,0xaa,0xac,0xae,0xb0,0xb2,0xb4,0xb6,0xb8,0xba,0xbc,0xbe,
0xc0,0xc2,0xc4,0xc6,0xc8,0xca,0xcc,0xce,0xd0,0xd2,0xd4,0xd6,0xd8,0xda,0xdc,0xde,
0xe0,0xe2,0xe4,0xe6,0xe8,0xea,0xec,0xee,0xf0,0xf2,0xf4,0xf6,0xf8,0xfa,0xfc,0xfe,
0x1b,0x19,0x1f,0x1d,0x13,0x11,0x17,0x15,0x0b,0x09,0x0f,0x0d,0x03,0x01,0x07,0x05,
0x3b,0x39,0x3f,0x3d,0x33,0x31,0x37,0x35,0x2b,0x29,0x2f,0x2d,0x23,0x21,0x27,0x25,
0x5b,0x59,0x5f,0x5d,0x53,0x51,0x57,0x55,0x4b,0x49,0x4f,0x4d,0x43,0x41,0x47,0x45,
0x7b,0x79,0x7f,0x7d,0x73,0x71,0x77,0x75,0x6b,0x69,0x6f,0x6d,0x63,0x61,0x67,0x65,
0x9b,0x99,0x9f,0x9d,0x93,0x91,0x97,0x95,0x8b,0x89,0x8f,0x8d,0x83,0x81,0x87,0x85,
0xbb,0xb9,0xbf,0xbd,0xb3,0xb1,0xb7,0xb5,0xab,0xa9,0xaf,0xad,0xa3,0xa1,0xa7,0xa5,
0xdb,0xd9,0xdf,0xdd,0xd3,0xd1,0xd7,0xd5,0xcb,0xc9,0xcf,0xcd,0xc3,0xc1,0xc7,0xc5,
0xfb,0xf9,0xff,0xfd,0xf3,0xf1,0xf7,0xf5,0xeb,0xe9,0xef,0xed,0xe3,0xe1,0xe7,0xe5
};
__device__ unsigned char triple[256] ={
0x00,0x03,0x06,0x05,0x0c,0x0f,0x0a,0x09,0x18,0x1b,0x1e,0x1d,0x14,0x17,0x12,0x11,
0x30,0x33,0x36,0x35,0x3c,0x3f,0x3a,0x39,0x28,0x2b,0x2e,0x2d,0x24,0x27,0x22,0x21,
0x60,0x63,0x66,0x65,0x6c,0x6f,0x6a,0x69,0x78,0x7b,0x7e,0x7d,0x74,0x77,0x72,0x71,
0x50,0x53,0x56,0x55,0x5c,0x5f,0x5a,0x59,0x48,0x4b,0x4e,0x4d,0x44,0x47,0x42,0x41,
0xc0,0xc3,0xc6,0xc5,0xcc,0xcf,0xca,0xc9,0xd8,0xdb,0xde,0xdd,0xd4,0xd7,0xd2,0xd1,
0xf0,0xf3,0xf6,0xf5,0xfc,0xff,0xfa,0xf9,0xe8,0xeb,0xee,0xed,0xe4,0xe7,0xe2,0xe1,
0xa0,0xa3,0xa6,0xa5,0xac,0xaf,0xaa,0xa9,0xb8,0xbb,0xbe,0xbd,0xb4,0xb7,0xb2,0xb1,
0x90,0x93,0x96,0x95,0x9c,0x9f,0x9a,0x99,0x88,0x8b,0x8e,0x8d,0x84,0x87,0x82,0x81,
0x9b,0x98,0x9d,0x9e,0x97,0x94,0x91,0x92,0x83,0x80,0x85,0x86,0x8f,0x8c,0x89,0x8a,
0xab,0xa8,0xad,0xae,0xa7,0xa4,0xa1,0xa2,0xb3,0xb0,0xb5,0xb6,0xbf,0xbc,0xb9,0xba,
0xfb,0xf8,0xfd,0xfe,0xf7,0xf4,0xf1,0xf2,0xe3,0xe0,0xe5,0xe6,0xef,0xec,0xe9,0xea,
0xcb,0xc8,0xcd,0xce,0xc7,0xc4,0xc1,0xc2,0xd3,0xd0,0xd5,0xd6,0xdf,0xdc,0xd9,0xda,
0x5b,0x58,0x5d,0x5e,0x57,0x54,0x51,0x52,0x43,0x40,0x45,0x46,0x4f,0x4c,0x49,0x4a,
0x6b,0x68,0x6d,0x6e,0x67,0x64,0x61,0x62,0x73,0x70,0x75,0x76,0x7f,0x7c,0x79,0x7a,
0x3b,0x38,0x3d,0x3e,0x37,0x34,0x31,0x32,0x23,0x20,0x25,0x26,0x2f,0x2c,0x29,0x2a,
0x0b,0x08,0x0d,0x0e,0x07,0x04,0x01,0x02,0x13,0x10,0x15,0x16,0x1f,0x1c,0x19,0x1a
};
__device__ unsigned char IV[16]={
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0
};
__device__ void print_bytes(unsigned char* str, int len){
for(int i=0;i<len;i++)
printf("%02x",str[i]);
printf("\n");
}
// encription function (the kernel)
__global__ void aes_encript(unsigned char *mat, unsigned char *result, unsigned char *sbox, unsigned char *keys, unsigned int width){
int id = (blockDim.x * blockIdx.x + threadIdx.x) * 16;
__shared__ unsigned char shared_sbox[256]; // the s-box matrix in shared memory
__shared__ unsigned char shared_keys[176]; // the sub-keys in shared memory
if (threadIdx.x == 0){
for (int i = 0; i != 256; ++i){
shared_sbox[i] = sbox[i];
if (i < 176) shared_keys[i] = keys[i];
}
}
__syncthreads(); // barrier
short idx, c = 0;
unsigned char iv_new[16];
unsigned char temp[16];
#pragma unroll
for (int i = 0; i < 16; ++i){
temp[i] = mat[id + i];
iv_new[i]=IV[i];;
}
// build new initialization vector
for (idx = 15; idx >= 0; idx--){
short shift = (16 - (idx + 1)) * 8;
unsigned char op1 = IV[idx];
unsigned char op2 = ((id &(0xff << shift)) >> shift);
iv_new[idx] = op1 + op2 + c;
c = (iv_new[idx] > op1 && iv_new[idx] > op2) ? 0 : 1;
}
// AES algorithm
xor_key(iv_new, shared_keys, 0);
#pragma unroll
for(int i = 1; i < 10; i++){
byte_sub(iv_new,shared_sbox);
shift_rows(iv_new);
mix_columns(iv_new);
xor_key(iv_new, shared_keys, i);
}
byte_sub(iv_new, shared_sbox);
shift_rows(iv_new);
xor_key(iv_new, shared_keys, 10);
//XOR with plain block
#pragma unroll
for (int i = 0; i < 16; ++i){
unsigned char res = iv_new[i] ^ temp[i];
iv_new[i] = res;
}
#pragma unroll
for (int i = 0; i < 16; ++i)
result[id + i] = iv_new[i];
result[width] = 0x0;
}
// Key Addition Kernel
__device__ void xor_key(unsigned char *mat, unsigned char *key, const unsigned int &round){
#pragma unroll
for (int i = 0; i < 16; ++i)
mat[i] ^= key[(16 * round) + i];
}
// byte substitution (S-Boxes)
__device__ void byte_sub(unsigned char *mat,unsigned char* s_sbox){
#pragma unroll
for (int i = 0; i < 16; ++i)
mat[i] = s_sbox[mat[i]];
}
// Shift rows
__device__ void shift_rows(unsigned char *mat){
int k,tmp;
#pragma unroll
for(int i = 1; i < 4; i++){
for(int j = 0; j < i; j++){
tmp = mat[i*4];
for(k = 0; k < 3; k++)
mat[i*4+k] = mat[i*4+k+1];
mat[i*4+k] = tmp;
}
}
}
// Mix column
__device__ void mix_columns(unsigned char* text_mat){
unsigned char b0, b1, b2, b3;
unsigned char result[16];
int i;
#pragma unroll
for (i = 0; i < 4; i ++){
b0 = text_mat[i];
b1 = text_mat[i + 4];
b2 = text_mat[i + 8];
b3 = text_mat[i + 12];
result[i] = dob[b0] ^ triple[b1] ^ b2 ^ b3;
result[i + 4] = b0 ^ dob[b1] ^ triple[b2] ^ b3;
result[i + 8] = b0 ^ b1 ^ dob[b2] ^ triple[b3];
result[i + 12] = triple[b0] ^ b1 ^ b2 ^ dob[b3];
}
#pragma unroll
for(i = 0; i < 16; i++)
text_mat[i]=result[i];
}
void build_subkeys(unsigned char* key,unsigned char* sub_keys, int totsize,int rounds){
for(int i = 0; i < rounds; i++){
if(i == 0){
for(int j = 0; j < totsize; j++){
sub_keys[j] = (key[j] == 0x0)?0x0:key[j];
}
}else{
sub_keys[i*totsize] = (sub_keys[(i-1)*totsize]^(get_sbox_value(sub_keys[(i-1)*totsize+13]) ^rc[i-1]));
sub_keys[i*totsize+1] = (sub_keys[(i-1)*totsize+1]^get_sbox_value(sub_keys[(i-1)*totsize+14]));
sub_keys[i*totsize+2] = (sub_keys[(i-1)*totsize+2]^get_sbox_value(sub_keys[(i-1)*totsize+15]));
sub_keys[i*totsize+3] = (sub_keys[(i-1)*totsize+3]^get_sbox_value(sub_keys[(i-1)*totsize+12]));
for(int j = 4; j < totsize; j+=4){
sub_keys[i*totsize+j] = (sub_keys[(i*totsize)+j-4]^sub_keys[((i-1)*totsize)+j]);
sub_keys[i*totsize+(j+1)] = (sub_keys[(i*totsize)+j-3]^sub_keys[((i-1)*totsize)+j+1]);
sub_keys[i*totsize+(j+2)] = (sub_keys[(i*totsize)+j-2]^sub_keys[((i-1)*totsize)+j+2]);
sub_keys[i*totsize+(j+3)] = (sub_keys[(i*totsize)+j-1]^sub_keys[((i-1)*totsize)+j+3]);
}
}
}
}
|
fe28337f3ddc2b6620883e489735c8057eea762c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cudf/legacy/binaryop.hpp>
#include <cudf/cudf.h>
#include <utilities/legacy/cudf_utils.h>
#include <cudf/legacy/join.hpp>
#include <cudf/legacy/functions.h>
#include <cudf/types.h>
#include <cudf/legacy/copying.hpp>
#include <utilities/legacy/column_utils.hpp>
#include <iostream>
#include <random>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <cstring>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <tests/utilities/legacy/nvcategory_utils.cuh>
#include <bitmask/legacy/bit_mask.cuh>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/legacy/valid_vectors.h>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
gdf_column * create_column_ints(int32_t* host_data, cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int32_t * data;
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int32_t) , 0), RMM_SUCCESS);
CUDA_TRY( hipMemcpy(data, host_data, sizeof(int32_t) * num_rows, hipMemcpyHostToDevice) );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *)valid,
num_rows,
GDF_INT32);
return column;
}
gdf_column * create_column_constant(cudf::size_type num_rows, int value){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
hipMemset(data,value,sizeof(int) * num_rows);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
int32_t* generate_int_data(cudf::size_type num_rows, size_t max_value, bool print=false){
int32_t* host_data = new int32_t[num_rows];
for(cudf::size_type row_index = 0; row_index < num_rows; row_index++){
host_data[row_index] = std::rand() % max_value;
if(print)
std::cout<<host_data[row_index]<<"\t";
}
if(print)
std::cout<<std::endl;
return host_data;
}
struct NVCategoryTest : public GdfTest
{
gdf_column * create_boolean_column(cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int8_t) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT8);
return column;
}
gdf_column * create_indices_column(cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
};
//todo refactor tests
TEST_F(NVCategoryTest, TEST_NVCATEGORY_SORTING)
{
bool print = false;
const int rows_size = 64;
const int length = 2;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
gdf_column * column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_column * output_column = create_indices_column(rows_size);
gdf_column ** input_columns = new gdf_column *[1];
input_columns[0] = column;
if(print){
print_gdf_column(input_columns[0]);
}
int8_t *asc_desc;
EXPECT_EQ(RMM_ALLOC(&asc_desc, 1, 0), RMM_SUCCESS);
int8_t minus_one = -1; //desc
hipMemset(asc_desc, minus_one, 1);
gdf_context context;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
//doesnt output nvcategory type columns so works as is
gdf_error err = gdf_order_by(input_columns, asc_desc, 1, output_column, &context);
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(output_column);
}
int* host_data = new int[rows_size];
CUDA_TRY( hipMemcpy(
host_data,
output_column->data,
sizeof(int) * output_column->size,
hipMemcpyDeviceToHost) );
std::vector<std::string> strings_vector(string_data, string_data + rows_size);
for(size_t i = 0; i < rows_size - 1; i++){
EXPECT_TRUE(strings_vector[host_data[i]] >= strings_vector[host_data[i+1]]);
}
}
// Selects the kind of join operation that is performed
enum struct agg_op
{
MIN,//0
MAX,//1
SUM,//2
CNT,//3
AVG //4
};
template <agg_op op>
struct AggOp {
template <typename T>
T operator()(const T a, const T b) {
return static_cast<T>(0);
}
template <typename T>
T operator()(const T a) {
return static_cast<T>(0);
}
};
template<>
struct AggOp<agg_op::MIN> {
template <typename T>
T operator()(const T a, const T b) {
return (a < b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::MAX> {
template <typename T>
T operator()(const T a, const T b) {
return (a > b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::SUM> {
template <typename T>
T operator()(const T a, const T b) {
return a + b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::CNT> {
size_t count{0};
template <typename T>
T operator()(const T a, const T b) {
count = a+1;
return count;
}
template <typename T>
T operator()(const T a) {
count = 1;
return count;
}
};
TEST_F(NVCategoryTest, TEST_NVCATEGORY_COMPARISON)
{
bool print = false;
const int rows_size = 64;
const size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
std::vector<std::string> left_host_column (left_string_data, left_string_data + rows_size);
std::vector<std::string> right_host_column (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * output_column = create_boolean_column(rows_size);
NVStrings * temp_string = static_cast<NVCategory *>(right_column->dtype_info.category)->to_strings();
NVCategory * new_category = static_cast<NVCategory *>(left_column->dtype_info.category)->add_strings(
*temp_string);
unsigned int * indices;
EXPECT_EQ(RMM_ALLOC(&indices, sizeof(unsigned int) * new_category->size(), 0), RMM_SUCCESS);
//now reset data
new_category->get_values( (int*)indices, true);
CUDA_TRY( hipMemcpy(left_column->data,indices,sizeof(unsigned int) * left_column->size,hipMemcpyDeviceToDevice) );
CUDA_TRY( hipMemcpy(right_column->data,indices + left_column->size,sizeof(unsigned int) * right_column->size,hipMemcpyDeviceToDevice) );
if(print){
print_gdf_column(left_column);
print_gdf_column(right_column);
}
left_column->dtype_info.category = new_category;
right_column->dtype_info.category = new_category;
CUDF_EXPECT_NO_THROW(cudf::binary_operation(output_column, left_column, right_column, gdf_binary_operator::GDF_EQUAL));
int8_t * data = new int8_t[rows_size];
CUDA_TRY( hipMemcpy(data, output_column->data, sizeof(int8_t) * rows_size, hipMemcpyDeviceToHost) );
for(size_t i = 0; i < rows_size; ++i){
EXPECT_EQ((bool)data[i], left_host_column[i] == right_host_column[i]);
}
delete data;
}
struct NVCategoryConcatTest : public GdfTest
{
std::vector<gdf_column *> concat_columns;
gdf_column * concat_out;
const int length = 2;
std::vector<std::string> compute_gdf_result(bool print = false){
size_t keys_size = 0;
for(size_t i=0;i<concat_columns.size();i++)
keys_size+=concat_columns[i]->size;
concat_out = cudf::test::create_nv_category_column(keys_size, true);
gdf_error err = gdf_column_concat(concat_out, concat_columns.data(), concat_columns.size());
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(concat_out);
}
NVStrings * temp_strings = static_cast<NVCategory *>(concat_out->dtype_info.category)->gather_strings(
(nv_category_index_type *) concat_out->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> strings_vector(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
return strings_vector;
}
};
TEST_F(NVCategoryConcatTest, concat_test){
bool print = false;
const int rows_size = 64;
const char *** string_data = new const char**[2];
string_data[0] = cudf::test::generate_string_data(rows_size, length, print);
string_data[1] = cudf::test::generate_string_data(rows_size, length, print);
concat_columns.resize(2);
concat_columns[0] = cudf::test::create_nv_category_column_strings(string_data[0], rows_size);
concat_columns[1] = cudf::test::create_nv_category_column_strings(string_data[1], rows_size);
std::vector<std::string> reference_result;
reference_result.insert(reference_result.end(), string_data[0], string_data[0] + rows_size);
reference_result.insert(reference_result.end(), string_data[1], string_data[1] + rows_size);
if(print){
print_gdf_column(concat_columns[0]);
print_gdf_column(concat_columns[1]);
}
std::vector<std::string> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << "\t" << p.second;
std::cout << "\n";
return os;
}
}
struct NVCategoryJoinTest : public GdfTest
{
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
std::vector<gdf_column*> gdf_raw_result_columns;
std::vector<std::string> left_string_column;
std::vector<std::string> right_string_column;
gdf_context ctxt{0, GDF_HASH, 0};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes a reference solution for joining the left and right sets of columns
*
* @Param print Option to print the solution for debug
* @Param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @Returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(join_op op, bool print = false, bool sort = true)
{
using key_type = std::string;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = right_string_column;
for(size_t right_index = 0; right_index < build_column.size(); ++right_index){
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = left_string_column;
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
if(left_string_column[left_index] == right_string_column[right_index]){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
// For left joins, insert a NULL if no match is found
if((false == match) && ((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index){
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "\nReference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @Param op The join operator
* @Param left_join_idx The vector of column indexes to join from left dataframe
* @Param right_join_idx The vector of column indexes to join from right dataframe
* @Param print Option to print the result computed by the libgdf function
* @Param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(join_op op, std::vector<int> left_join_idx, std::vector<int> right_join_idx, bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
EXPECT_EQ(gdf_raw_left_columns.size(), gdf_raw_right_columns.size()) << "Mismatch columns size";
EXPECT_EQ(left_join_idx.size(), right_join_idx.size()) << "Mismatch join indexes size";
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
size_t num_columns = gdf_raw_left_columns.size();
size_t result_num_cols = gdf_raw_left_columns.size() + gdf_raw_right_columns.size() - left_join_idx.size();
gdf_column * result_col = new gdf_column{};
// Number of join cols should be same in both left and right
size_t num_join_cols = left_join_idx.size();
std::vector <std::pair <int, int>> columns_in_common (num_join_cols);
for (unsigned int i = 0; i < num_join_cols; ++i)
{
columns_in_common[i].first = left_join_idx[i];
columns_in_common[i].second= right_join_idx[i];
}
// Same set of column set for both, as we are assuming that they have same name
std::vector <gdf_column *> result_idx_cols = {&left_result, &right_result};
cudf::table left_gdf_columns (gdf_raw_left_columns);
cudf::table right_gdf_columns (gdf_raw_right_columns);
cudf::table result_idx_table (result_idx_cols);
cudf::table result;
switch(op)
{
case join_op::LEFT:
{
result = cudf::left_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::INNER:
{
result = cudf::inner_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::FULL:
{
result = cudf::full_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// Copy result of gdf join to local result
EXPECT_EQ(RMM_ALLOC(&result_col->data, result.get_column(0)->size * cudf::size_of(result.get_column(0)->dtype), 0), RMM_SUCCESS);
EXPECT_EQ(RMM_ALLOC(&result_col->valid, gdf_valid_allocation_size(result.get_column(0)->size) * sizeof(cudf::valid_type), 0), RMM_SUCCESS);
result_col->size = result.get_column(0)->size;
result_col->dtype = result.get_column(0)->dtype;
result_col->null_count = 0;
result_col->dtype_info = result.get_column(0)->dtype_info;
EXPECT_EQ(hipMemcpy(result_col->data, result.get_column(0)->data,
result.get_column(0)->size * cudf::size_of(result.get_column(0)->dtype), hipMemcpyDeviceToDevice), hipSuccess);
EXPECT_EQ(hipMemcpy(result_col->valid, result.get_column(0)->valid,
gdf_valid_allocation_size(result.get_column(0)->size) * sizeof(cudf::valid_type), hipMemcpyDeviceToDevice), hipSuccess);
gdf_raw_result_columns.push_back (result_col);
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(hipMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
EXPECT_EQ(hipMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "\nGDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index\tright index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
void check_output(join_op op, std::vector<result_type>& reference_result, size_t length, bool print=false, bool sort=true){
gdf_column* result_column = gdf_raw_result_columns[0];
if(print){
std::cout<<"Raw string result:\n";
print_gdf_column(result_column);
}
size_t result_size = result_column->size;
if(result_size>0){
NVStrings * temp_strings = static_cast<NVCategory *>(result_column->dtype_info.category)->gather_strings(
(nv_category_index_type *) result_column->data, result_size , DEVICE_ALLOCATED );
char** host_strings = new char*[result_size];
for(size_t i=0;i<result_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, result_size);
for(size_t i=0;i<result_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> result_output = std::vector<std::string>(host_strings, host_strings + result_size);
std::vector<std::string> reference_output;
for(size_t i=0; i<result_size; i++){
if(reference_result[i].first != -1)
reference_output.push_back(left_string_column[reference_result[i].first]);
else
reference_output.push_back(right_string_column[reference_result[i].second]);
}
EXPECT_EQ(reference_output.size(), result_size);
if(sort){
std::sort(result_output.begin(), result_output.end());
std::sort(reference_output.begin(), reference_output.end());
}
if(print){
for(auto str : result_output){
std::cout<<str<<"\t";
}
std::cout<<std::endl;
}
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < result_size; i++){
delete host_strings[i];
}
delete host_strings;
for(size_t i=0; i<result_size; i++){
EXPECT_EQ(reference_output[i], result_output[i]);
}
}
}
};
TEST_F(NVCategoryJoinTest, join_test){
bool print = false;
size_t rows_size = 64;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
this->check_output(op, reference_result, length, print);
}
TEST_F(NVCategoryJoinTest, join_test_nulls){
bool print = false;
size_t rows_size = 16;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TEST_F(NVCategoryJoinTest, join_test_bug){
bool print = false;
join_op op = join_op::LEFT;
const size_t left_size = 3;
const char *column_left_b[] = {"one ", "two ", "NO MATCH"};
int column_left_a[] = { 5, 14, 8 };
const size_t right_size = 2;
const char *column_right_b[] = {"two ", "one "};
int column_left_c[] = { 0, 1 };
left_string_column = std::vector<std::string> (column_left_b, column_left_b + left_size);
right_string_column = std::vector<std::string> (column_right_b, column_right_b + right_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
left_column->valid = nullptr;
gdf_column * left_non_join_column = create_column_ints(column_left_a, left_size);
left_non_join_column ->valid = nullptr;
gdf_column * right_column = cudf::test::create_nv_category_column_strings(column_right_b, right_size);
right_column->valid = nullptr;
gdf_column * right_non_join_column = create_column_ints(column_left_c, right_size);
right_non_join_column->valid = nullptr;
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_non_join_column);
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_non_join_column);
gdf_raw_right_columns.push_back(right_column);
gdf_column * result_column_nonjoin_left = create_column_ints(column_left_a, left_size);
gdf_column * result_column_nonjoin_right = create_column_ints(column_left_a, left_size);
gdf_column * result_column_joined = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
gdf_raw_result_columns.push_back(result_column_nonjoin_left);
gdf_raw_result_columns.push_back(result_column_joined);
gdf_raw_result_columns.push_back(result_column_nonjoin_right);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={1};
std::vector<int> right_join_idx={1};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
if(print){
std::cout<<"Output columns:\n";
for(size_t i=0; i<gdf_raw_result_columns.size(); i++){
print_gdf_column(gdf_raw_result_columns[i]);
std::cout<<"\n-----\n";
}
}
}
| fe28337f3ddc2b6620883e489735c8057eea762c.cu | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cudf/legacy/binaryop.hpp>
#include <cudf/cudf.h>
#include <utilities/legacy/cudf_utils.h>
#include <cudf/legacy/join.hpp>
#include <cudf/legacy/functions.h>
#include <cudf/types.h>
#include <cudf/legacy/copying.hpp>
#include <utilities/legacy/column_utils.hpp>
#include <iostream>
#include <random>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <cstring>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <tests/utilities/legacy/nvcategory_utils.cuh>
#include <bitmask/legacy/bit_mask.cuh>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/legacy/valid_vectors.h>
#include <cudf/utilities/legacy/nvcategory_util.hpp>
gdf_column * create_column_ints(int32_t* host_data, cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int32_t * data;
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int32_t) , 0), RMM_SUCCESS);
CUDA_TRY( cudaMemcpy(data, host_data, sizeof(int32_t) * num_rows, cudaMemcpyHostToDevice) );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *)valid,
num_rows,
GDF_INT32);
return column;
}
gdf_column * create_column_constant(cudf::size_type num_rows, int value){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
cudaMemset(data,value,sizeof(int) * num_rows);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
int32_t* generate_int_data(cudf::size_type num_rows, size_t max_value, bool print=false){
int32_t* host_data = new int32_t[num_rows];
for(cudf::size_type row_index = 0; row_index < num_rows; row_index++){
host_data[row_index] = std::rand() % max_value;
if(print)
std::cout<<host_data[row_index]<<"\t";
}
if(print)
std::cout<<std::endl;
return host_data;
}
struct NVCategoryTest : public GdfTest
{
gdf_column * create_boolean_column(cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int8_t) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT8);
return column;
}
gdf_column * create_indices_column(cudf::size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(cudf::valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
};
//todo refactor tests
TEST_F(NVCategoryTest, TEST_NVCATEGORY_SORTING)
{
bool print = false;
const int rows_size = 64;
const int length = 2;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
gdf_column * column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_column * output_column = create_indices_column(rows_size);
gdf_column ** input_columns = new gdf_column *[1];
input_columns[0] = column;
if(print){
print_gdf_column(input_columns[0]);
}
int8_t *asc_desc;
EXPECT_EQ(RMM_ALLOC(&asc_desc, 1, 0), RMM_SUCCESS);
int8_t minus_one = -1; //desc
cudaMemset(asc_desc, minus_one, 1);
gdf_context context;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
//doesnt output nvcategory type columns so works as is
gdf_error err = gdf_order_by(input_columns, asc_desc, 1, output_column, &context);
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(output_column);
}
int* host_data = new int[rows_size];
CUDA_TRY( cudaMemcpy(
host_data,
output_column->data,
sizeof(int) * output_column->size,
cudaMemcpyDeviceToHost) );
std::vector<std::string> strings_vector(string_data, string_data + rows_size);
for(size_t i = 0; i < rows_size - 1; i++){
EXPECT_TRUE(strings_vector[host_data[i]] >= strings_vector[host_data[i+1]]);
}
}
// Selects the kind of join operation that is performed
enum struct agg_op
{
MIN,//0
MAX,//1
SUM,//2
CNT,//3
AVG //4
};
template <agg_op op>
struct AggOp {
template <typename T>
T operator()(const T a, const T b) {
return static_cast<T>(0);
}
template <typename T>
T operator()(const T a) {
return static_cast<T>(0);
}
};
template<>
struct AggOp<agg_op::MIN> {
template <typename T>
T operator()(const T a, const T b) {
return (a < b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::MAX> {
template <typename T>
T operator()(const T a, const T b) {
return (a > b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::SUM> {
template <typename T>
T operator()(const T a, const T b) {
return a + b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::CNT> {
size_t count{0};
template <typename T>
T operator()(const T a, const T b) {
count = a+1;
return count;
}
template <typename T>
T operator()(const T a) {
count = 1;
return count;
}
};
TEST_F(NVCategoryTest, TEST_NVCATEGORY_COMPARISON)
{
bool print = false;
const int rows_size = 64;
const size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
std::vector<std::string> left_host_column (left_string_data, left_string_data + rows_size);
std::vector<std::string> right_host_column (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * output_column = create_boolean_column(rows_size);
NVStrings * temp_string = static_cast<NVCategory *>(right_column->dtype_info.category)->to_strings();
NVCategory * new_category = static_cast<NVCategory *>(left_column->dtype_info.category)->add_strings(
*temp_string);
unsigned int * indices;
EXPECT_EQ(RMM_ALLOC(&indices, sizeof(unsigned int) * new_category->size(), 0), RMM_SUCCESS);
//now reset data
new_category->get_values( (int*)indices, true);
CUDA_TRY( cudaMemcpy(left_column->data,indices,sizeof(unsigned int) * left_column->size,cudaMemcpyDeviceToDevice) );
CUDA_TRY( cudaMemcpy(right_column->data,indices + left_column->size,sizeof(unsigned int) * right_column->size,cudaMemcpyDeviceToDevice) );
if(print){
print_gdf_column(left_column);
print_gdf_column(right_column);
}
left_column->dtype_info.category = new_category;
right_column->dtype_info.category = new_category;
CUDF_EXPECT_NO_THROW(cudf::binary_operation(output_column, left_column, right_column, gdf_binary_operator::GDF_EQUAL));
int8_t * data = new int8_t[rows_size];
CUDA_TRY( cudaMemcpy(data, output_column->data, sizeof(int8_t) * rows_size, cudaMemcpyDeviceToHost) );
for(size_t i = 0; i < rows_size; ++i){
EXPECT_EQ((bool)data[i], left_host_column[i] == right_host_column[i]);
}
delete data;
}
struct NVCategoryConcatTest : public GdfTest
{
std::vector<gdf_column *> concat_columns;
gdf_column * concat_out;
const int length = 2;
std::vector<std::string> compute_gdf_result(bool print = false){
size_t keys_size = 0;
for(size_t i=0;i<concat_columns.size();i++)
keys_size+=concat_columns[i]->size;
concat_out = cudf::test::create_nv_category_column(keys_size, true);
gdf_error err = gdf_column_concat(concat_out, concat_columns.data(), concat_columns.size());
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(concat_out);
}
NVStrings * temp_strings = static_cast<NVCategory *>(concat_out->dtype_info.category)->gather_strings(
(nv_category_index_type *) concat_out->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> strings_vector(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
return strings_vector;
}
};
TEST_F(NVCategoryConcatTest, concat_test){
bool print = false;
const int rows_size = 64;
const char *** string_data = new const char**[2];
string_data[0] = cudf::test::generate_string_data(rows_size, length, print);
string_data[1] = cudf::test::generate_string_data(rows_size, length, print);
concat_columns.resize(2);
concat_columns[0] = cudf::test::create_nv_category_column_strings(string_data[0], rows_size);
concat_columns[1] = cudf::test::create_nv_category_column_strings(string_data[1], rows_size);
std::vector<std::string> reference_result;
reference_result.insert(reference_result.end(), string_data[0], string_data[0] + rows_size);
reference_result.insert(reference_result.end(), string_data[1], string_data[1] + rows_size);
if(print){
print_gdf_column(concat_columns[0]);
print_gdf_column(concat_columns[1]);
}
std::vector<std::string> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << "\t" << p.second;
std::cout << "\n";
return os;
}
}
struct NVCategoryJoinTest : public GdfTest
{
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
std::vector<gdf_column*> gdf_raw_result_columns;
std::vector<std::string> left_string_column;
std::vector<std::string> right_string_column;
gdf_context ctxt{0, GDF_HASH, 0};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes a reference solution for joining the left and right sets of columns
*
* @Param print Option to print the solution for debug
* @Param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @Returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(join_op op, bool print = false, bool sort = true)
{
using key_type = std::string;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = right_string_column;
for(size_t right_index = 0; right_index < build_column.size(); ++right_index){
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = left_string_column;
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
if(left_string_column[left_index] == right_string_column[right_index]){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
// For left joins, insert a NULL if no match is found
if((false == match) && ((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index){
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "\nReference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @Param op The join operator
* @Param left_join_idx The vector of column indexes to join from left dataframe
* @Param right_join_idx The vector of column indexes to join from right dataframe
* @Param print Option to print the result computed by the libgdf function
* @Param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(join_op op, std::vector<int> left_join_idx, std::vector<int> right_join_idx, bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
EXPECT_EQ(gdf_raw_left_columns.size(), gdf_raw_right_columns.size()) << "Mismatch columns size";
EXPECT_EQ(left_join_idx.size(), right_join_idx.size()) << "Mismatch join indexes size";
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
size_t num_columns = gdf_raw_left_columns.size();
size_t result_num_cols = gdf_raw_left_columns.size() + gdf_raw_right_columns.size() - left_join_idx.size();
gdf_column * result_col = new gdf_column{};
// Number of join cols should be same in both left and right
size_t num_join_cols = left_join_idx.size();
std::vector <std::pair <int, int>> columns_in_common (num_join_cols);
for (unsigned int i = 0; i < num_join_cols; ++i)
{
columns_in_common[i].first = left_join_idx[i];
columns_in_common[i].second= right_join_idx[i];
}
// Same set of column set for both, as we are assuming that they have same name
std::vector <gdf_column *> result_idx_cols = {&left_result, &right_result};
cudf::table left_gdf_columns (gdf_raw_left_columns);
cudf::table right_gdf_columns (gdf_raw_right_columns);
cudf::table result_idx_table (result_idx_cols);
cudf::table result;
switch(op)
{
case join_op::LEFT:
{
result = cudf::left_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::INNER:
{
result = cudf::inner_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::FULL:
{
result = cudf::full_join(
left_gdf_columns, right_gdf_columns,
left_join_idx, right_join_idx, columns_in_common,
&result_idx_table, &ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// Copy result of gdf join to local result
EXPECT_EQ(RMM_ALLOC(&result_col->data, result.get_column(0)->size * cudf::size_of(result.get_column(0)->dtype), 0), RMM_SUCCESS);
EXPECT_EQ(RMM_ALLOC(&result_col->valid, gdf_valid_allocation_size(result.get_column(0)->size) * sizeof(cudf::valid_type), 0), RMM_SUCCESS);
result_col->size = result.get_column(0)->size;
result_col->dtype = result.get_column(0)->dtype;
result_col->null_count = 0;
result_col->dtype_info = result.get_column(0)->dtype_info;
EXPECT_EQ(cudaMemcpy(result_col->data, result.get_column(0)->data,
result.get_column(0)->size * cudf::size_of(result.get_column(0)->dtype), cudaMemcpyDeviceToDevice), cudaSuccess);
EXPECT_EQ(cudaMemcpy(result_col->valid, result.get_column(0)->valid,
gdf_valid_allocation_size(result.get_column(0)->size) * sizeof(cudf::valid_type), cudaMemcpyDeviceToDevice), cudaSuccess);
gdf_raw_result_columns.push_back (result_col);
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(cudaMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "\nGDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index\tright index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
void check_output(join_op op, std::vector<result_type>& reference_result, size_t length, bool print=false, bool sort=true){
gdf_column* result_column = gdf_raw_result_columns[0];
if(print){
std::cout<<"Raw string result:\n";
print_gdf_column(result_column);
}
size_t result_size = result_column->size;
if(result_size>0){
NVStrings * temp_strings = static_cast<NVCategory *>(result_column->dtype_info.category)->gather_strings(
(nv_category_index_type *) result_column->data, result_size , DEVICE_ALLOCATED );
char** host_strings = new char*[result_size];
for(size_t i=0;i<result_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, result_size);
for(size_t i=0;i<result_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> result_output = std::vector<std::string>(host_strings, host_strings + result_size);
std::vector<std::string> reference_output;
for(size_t i=0; i<result_size; i++){
if(reference_result[i].first != -1)
reference_output.push_back(left_string_column[reference_result[i].first]);
else
reference_output.push_back(right_string_column[reference_result[i].second]);
}
EXPECT_EQ(reference_output.size(), result_size);
if(sort){
std::sort(result_output.begin(), result_output.end());
std::sort(reference_output.begin(), reference_output.end());
}
if(print){
for(auto str : result_output){
std::cout<<str<<"\t";
}
std::cout<<std::endl;
}
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < result_size; i++){
delete host_strings[i];
}
delete host_strings;
for(size_t i=0; i<result_size; i++){
EXPECT_EQ(reference_output[i], result_output[i]);
}
}
}
};
TEST_F(NVCategoryJoinTest, join_test){
bool print = false;
size_t rows_size = 64;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
this->check_output(op, reference_result, length, print);
}
TEST_F(NVCategoryJoinTest, join_test_nulls){
bool print = false;
size_t rows_size = 16;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TEST_F(NVCategoryJoinTest, join_test_bug){
bool print = false;
join_op op = join_op::LEFT;
const size_t left_size = 3;
const char *column_left_b[] = {"one ", "two ", "NO MATCH"};
int column_left_a[] = { 5, 14, 8 };
const size_t right_size = 2;
const char *column_right_b[] = {"two ", "one "};
int column_left_c[] = { 0, 1 };
left_string_column = std::vector<std::string> (column_left_b, column_left_b + left_size);
right_string_column = std::vector<std::string> (column_right_b, column_right_b + right_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
left_column->valid = nullptr;
gdf_column * left_non_join_column = create_column_ints(column_left_a, left_size);
left_non_join_column ->valid = nullptr;
gdf_column * right_column = cudf::test::create_nv_category_column_strings(column_right_b, right_size);
right_column->valid = nullptr;
gdf_column * right_non_join_column = create_column_ints(column_left_c, right_size);
right_non_join_column->valid = nullptr;
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_non_join_column);
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_non_join_column);
gdf_raw_right_columns.push_back(right_column);
gdf_column * result_column_nonjoin_left = create_column_ints(column_left_a, left_size);
gdf_column * result_column_nonjoin_right = create_column_ints(column_left_a, left_size);
gdf_column * result_column_joined = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
gdf_raw_result_columns.push_back(result_column_nonjoin_left);
gdf_raw_result_columns.push_back(result_column_joined);
gdf_raw_result_columns.push_back(result_column_nonjoin_right);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={1};
std::vector<int> right_join_idx={1};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
if(print){
std::cout<<"Output columns:\n";
for(size_t i=0; i<gdf_raw_result_columns.size(); i++){
print_gdf_column(gdf_raw_result_columns[i]);
std::cout<<"\n-----\n";
}
}
}
|
f397cdb7963e13a8af28fdc7fdb320a4182ebad8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "rgb2yuvKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *imgr = NULL;
hipMalloc(&imgr, XSIZE*YSIZE);
int *imgg = NULL;
hipMalloc(&imgg, XSIZE*YSIZE);
int *imgb = NULL;
hipMalloc(&imgb, XSIZE*YSIZE);
int *imgy = NULL;
hipMalloc(&imgy, XSIZE*YSIZE);
int *imgcb = NULL;
hipMalloc(&imgcb, XSIZE*YSIZE);
int *imgcr = NULL;
hipMalloc(&imgcr, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
rgb2yuvKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,imgy,imgcb,imgcr,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
rgb2yuvKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,imgy,imgcb,imgcr,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
rgb2yuvKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,imgy,imgcb,imgcr,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f397cdb7963e13a8af28fdc7fdb320a4182ebad8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "rgb2yuvKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *imgr = NULL;
cudaMalloc(&imgr, XSIZE*YSIZE);
int *imgg = NULL;
cudaMalloc(&imgg, XSIZE*YSIZE);
int *imgb = NULL;
cudaMalloc(&imgb, XSIZE*YSIZE);
int *imgy = NULL;
cudaMalloc(&imgy, XSIZE*YSIZE);
int *imgcb = NULL;
cudaMalloc(&imgcb, XSIZE*YSIZE);
int *imgcr = NULL;
cudaMalloc(&imgcr, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
rgb2yuvKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,imgy,imgcb,imgcr,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
rgb2yuvKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,imgy,imgcb,imgcr,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
rgb2yuvKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,imgy,imgcb,imgcr,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2b25c08435f511dff1ab55a2221002738cf30a7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
//Function to check for errors
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
//Number of Vertices
#define vertices 10000
//Number of Edges per Vertex
#define Edge_per_node 4000
//Used to define the weight of each edge
#define Maximum_weight 5
//Setting a value to infinity
#define infinity 10000000
//Kernel Call to initialize the values of the node weights to infinity except for the Source Node which is set to 0
//We mark the source Node to be settled after that and all other nodes unsettled
__global__ void Initializing(int *node_weight_array, int *mask_array, int Source) // CUDA kernel
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(id==Source)
{
node_weight_array[id]=0;
mask_array[id]=0;
}
else
{
node_weight_array[id]=infinity;
mask_array[id]=0;
}
}
}
//Kernel Call to choose the node with minimum node weight whose edges are to be relaxed
__global__ void Minimum(int *mask_array,int *vertex_array,int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]<infinity)
{
atomicMin(&min[0],node_weight_array[id]);
}
}
}
//Kernel call to relax all the edges of a node
__global__ void Relax(int *mask_array,int *vertex_array,int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
//Iterative variable
int m,n;
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]==min[0])
{
mask_array[id]=1;
for(m=id*Edge_per_node;m<id*Edge_per_node+Edge_per_node;m++)
{
n=edge_array[m];
atomicMin(&node_weight_array[n],node_weight_array[id]+edge_weight_array[m]);
}
}
}
}
int main( int argc, char* argv[] )
{
//Size of the Vertex array
size_t vertex_array_size = vertices*sizeof(int);
//Size of the edge array and edge_weight array
size_t edge_array_size = vertices*Edge_per_node*sizeof(int);
//Intializing the vertex array
int *vertex_array = (int*)malloc(vertex_array_size);
//Initializing a copy of the vertex array
int *vertex_copy = (int*)malloc(vertex_array_size);
//Intializing the edge array
int *edge_array=(int*)malloc(edge_array_size);
//Initializing edge_weight_array which stores the weights of each edge
int *edge_weight_array = (int*)malloc(edge_array_size);
//Initializing Node weight array which stores the value for the current weight to reach the node
int *node_weight_array = (int*)malloc(vertex_array_size);
//Array to mark if a node is settled or not
int *mask_array = (int*)malloc(vertex_array_size);
//Iterative operator
int i,j,k;
printf("Initializing Verte Array...\n");
//Setting node number in vertex_array
for(i=0;i<vertices;i++)
{
vertex_array[i]=i;
}
//Setting the RNG seed to system clock
srand(time(NULL));
//temp variable
int temp;
printf("Initializing Edge Array...\n");
//Adding random edges to each node
memcpy(vertex_copy,vertex_array,vertex_array_size);
for(i=0;i<vertices;i++)
{
for(j=vertices-1;j>0;j--)
{
k=rand()%(j+1);
temp = vertex_copy[j];
vertex_copy[j]=vertex_copy[k];
vertex_copy[k]=temp;
}
for(j=0;j<Edge_per_node;j++)
{
if(vertex_copy[j]==i)
{
j=j+1;
edge_array[i*Edge_per_node+(j-1)]= vertex_copy[j];
}
else
{
edge_array[i*Edge_per_node+j]= vertex_copy[j];
}
}
}
printf("Initializing weights of each edge...\n");
//Adding weights to the edge_weight array
for(i=0;i<vertices;i++)
{
int a = rand()%Maximum_weight+1;
int b = rand()%Maximum_weight+1;
for(j=0;j<Edge_per_node;j++)
{
edge_weight_array[i*Edge_per_node+j]=a+j*b;
}
}
//Initializing gpu variables
int *gpu_vertex_array;
int *gpu_edge_array;
int *gpu_edge_weight_array;
int *gpu_node_weight_array;
int *gpu_mask_array;
checkCuda(hipMalloc(&gpu_vertex_array,vertex_array_size));
checkCuda(hipMalloc(&gpu_node_weight_array,vertex_array_size));
checkCuda(hipMalloc(&gpu_mask_array,vertex_array_size));
checkCuda(hipMalloc(&gpu_edge_array,edge_array_size));
checkCuda(hipMalloc(&gpu_edge_weight_array,edge_array_size));
//Copying memory from Host to Device
checkCuda(hipMemcpy(gpu_vertex_array,vertex_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_node_weight_array,node_weight_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_mask_array,mask_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_edge_array,edge_array,edge_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_edge_weight_array,edge_weight_array,edge_array_size,hipMemcpyHostToDevice));
//Declaring the block and grid size
int blockSize, gridSize;
blockSize=1024;
gridSize = (int)ceil((float)vertices/blockSize); // Number of thread blocks in grid
//Start Timer
float start_time;
TIMER_CREATE(start_time);
TIMER_START(start_time);
//Kernel Call for initializating of the node weights array
hipLaunchKernelGGL(( Initializing), dim3(gridSize), dim3(blockSize), 0, 0, gpu_node_weight_array,gpu_mask_array, 0);
hipError_t err = hipGetLastError();
if (err != hipSuccess) checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
{
printf("Error: %s\n", hipGetErrorString(err));
}
//Initial value of min which stores the minimum node weight in each iteration of relax.
int *min=(int*)malloc(2*sizeof(int));
min[0]=0;
min[1]=0;
//GPU variable to store the minimum value
int *gpu_min;
checkCuda(hipMalloc((void**)&gpu_min,2*sizeof(int)));
while(min[0]<infinity)
{
min[0] = infinity;
checkCuda(hipMemcpy(gpu_min,min,sizeof(int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( Minimum), dim3(gridSize), dim3(blockSize), 0, 0, gpu_mask_array,gpu_vertex_array,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
if (err != hipSuccess) checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
{
printf("Error: %s\n", hipGetErrorString(err));
}
hipLaunchKernelGGL(( Relax), dim3(gridSize), dim3(blockSize), 0, 0, gpu_mask_array,gpu_vertex_array,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
if (err != hipSuccess) checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
{
printf("Error: %s\n", hipGetErrorString(err));
}
checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
checkCuda(hipMemcpy(min,gpu_min,2*sizeof(int),hipMemcpyDeviceToHost));
}
//copying the final node weights from device to host
checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
//Stop Timer
TIMER_END(start_time);
printf("Kernel Execution Time: %f ms\n", start_time);
hipFree(gpu_vertex_array);
hipFree(gpu_node_weight_array);
hipFree(gpu_edge_array);
hipFree(gpu_edge_weight_array);
hipFree(gpu_mask_array);
free(vertex_array);
free(node_weight_array);
free(edge_array);
free(edge_weight_array);
free(mask_array);
return 0;
}
| 2b25c08435f511dff1ab55a2221002738cf30a7c.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
//Function to check for errors
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
//Number of Vertices
#define vertices 10000
//Number of Edges per Vertex
#define Edge_per_node 4000
//Used to define the weight of each edge
#define Maximum_weight 5
//Setting a value to infinity
#define infinity 10000000
//Kernel Call to initialize the values of the node weights to infinity except for the Source Node which is set to 0
//We mark the source Node to be settled after that and all other nodes unsettled
__global__ void Initializing(int *node_weight_array, int *mask_array, int Source) // CUDA kernel
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(id==Source)
{
node_weight_array[id]=0;
mask_array[id]=0;
}
else
{
node_weight_array[id]=infinity;
mask_array[id]=0;
}
}
}
//Kernel Call to choose the node with minimum node weight whose edges are to be relaxed
__global__ void Minimum(int *mask_array,int *vertex_array,int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]<infinity)
{
atomicMin(&min[0],node_weight_array[id]);
}
}
}
//Kernel call to relax all the edges of a node
__global__ void Relax(int *mask_array,int *vertex_array,int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
//Iterative variable
int m,n;
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]==min[0])
{
mask_array[id]=1;
for(m=id*Edge_per_node;m<id*Edge_per_node+Edge_per_node;m++)
{
n=edge_array[m];
atomicMin(&node_weight_array[n],node_weight_array[id]+edge_weight_array[m]);
}
}
}
}
int main( int argc, char* argv[] )
{
//Size of the Vertex array
size_t vertex_array_size = vertices*sizeof(int);
//Size of the edge array and edge_weight array
size_t edge_array_size = vertices*Edge_per_node*sizeof(int);
//Intializing the vertex array
int *vertex_array = (int*)malloc(vertex_array_size);
//Initializing a copy of the vertex array
int *vertex_copy = (int*)malloc(vertex_array_size);
//Intializing the edge array
int *edge_array=(int*)malloc(edge_array_size);
//Initializing edge_weight_array which stores the weights of each edge
int *edge_weight_array = (int*)malloc(edge_array_size);
//Initializing Node weight array which stores the value for the current weight to reach the node
int *node_weight_array = (int*)malloc(vertex_array_size);
//Array to mark if a node is settled or not
int *mask_array = (int*)malloc(vertex_array_size);
//Iterative operator
int i,j,k;
printf("Initializing Verte Array...\n");
//Setting node number in vertex_array
for(i=0;i<vertices;i++)
{
vertex_array[i]=i;
}
//Setting the RNG seed to system clock
srand(time(NULL));
//temp variable
int temp;
printf("Initializing Edge Array...\n");
//Adding random edges to each node
memcpy(vertex_copy,vertex_array,vertex_array_size);
for(i=0;i<vertices;i++)
{
for(j=vertices-1;j>0;j--)
{
k=rand()%(j+1);
temp = vertex_copy[j];
vertex_copy[j]=vertex_copy[k];
vertex_copy[k]=temp;
}
for(j=0;j<Edge_per_node;j++)
{
if(vertex_copy[j]==i)
{
j=j+1;
edge_array[i*Edge_per_node+(j-1)]= vertex_copy[j];
}
else
{
edge_array[i*Edge_per_node+j]= vertex_copy[j];
}
}
}
printf("Initializing weights of each edge...\n");
//Adding weights to the edge_weight array
for(i=0;i<vertices;i++)
{
int a = rand()%Maximum_weight+1;
int b = rand()%Maximum_weight+1;
for(j=0;j<Edge_per_node;j++)
{
edge_weight_array[i*Edge_per_node+j]=a+j*b;
}
}
//Initializing gpu variables
int *gpu_vertex_array;
int *gpu_edge_array;
int *gpu_edge_weight_array;
int *gpu_node_weight_array;
int *gpu_mask_array;
checkCuda(cudaMalloc(&gpu_vertex_array,vertex_array_size));
checkCuda(cudaMalloc(&gpu_node_weight_array,vertex_array_size));
checkCuda(cudaMalloc(&gpu_mask_array,vertex_array_size));
checkCuda(cudaMalloc(&gpu_edge_array,edge_array_size));
checkCuda(cudaMalloc(&gpu_edge_weight_array,edge_array_size));
//Copying memory from Host to Device
checkCuda(cudaMemcpy(gpu_vertex_array,vertex_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_node_weight_array,node_weight_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_mask_array,mask_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_edge_array,edge_array,edge_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_edge_weight_array,edge_weight_array,edge_array_size,cudaMemcpyHostToDevice));
//Declaring the block and grid size
int blockSize, gridSize;
blockSize=1024;
gridSize = (int)ceil((float)vertices/blockSize); // Number of thread blocks in grid
//Start Timer
float start_time;
TIMER_CREATE(start_time);
TIMER_START(start_time);
//Kernel Call for initializating of the node weights array
Initializing<<<gridSize, blockSize>>>(gpu_node_weight_array,gpu_mask_array, 0);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
//Initial value of min which stores the minimum node weight in each iteration of relax.
int *min=(int*)malloc(2*sizeof(int));
min[0]=0;
min[1]=0;
//GPU variable to store the minimum value
int *gpu_min;
checkCuda(cudaMalloc((void**)&gpu_min,2*sizeof(int)));
while(min[0]<infinity)
{
min[0] = infinity;
checkCuda(cudaMemcpy(gpu_min,min,sizeof(int),cudaMemcpyHostToDevice));
Minimum<<<gridSize, blockSize>>>(gpu_mask_array,gpu_vertex_array,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
Relax<<<gridSize, blockSize>>>(gpu_mask_array,gpu_vertex_array,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
checkCuda(cudaMemcpy(min,gpu_min,2*sizeof(int),cudaMemcpyDeviceToHost));
}
//copying the final node weights from device to host
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
//Stop Timer
TIMER_END(start_time);
printf("Kernel Execution Time: %f ms\n", start_time);
cudaFree(gpu_vertex_array);
cudaFree(gpu_node_weight_array);
cudaFree(gpu_edge_array);
cudaFree(gpu_edge_weight_array);
cudaFree(gpu_mask_array);
free(vertex_array);
free(node_weight_array);
free(edge_array);
free(edge_weight_array);
free(mask_array);
return 0;
}
|
1e832189eafce0fe29b0b9b834a8a0c9dcb8d365.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "real.h"
#include "constant.cuh"
#include "macros.h"
/*
* ============================ bc =====================
* BC sets the boundary conditions
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* q1 real array values at current time step
* iW,iE integers indices bounding array data
* nx integer main array size, not including
* extra 'ghost' zones/points
*/
//u = u[idx3D(level,row,col,nydim,nxdim+1)]
//v = v[idx3D(level,row,col,nydim+1,nxdim)]
//w = w[idx3D(level,row,col,nydim,nxdim)]
//t = t[idx3D(level,row,col,nydim,nxdim)]
__global__
void bcTB(real *u,real *v,real *w, real*t1, real *t2)
{
const int col = blockIdx.x*blockDim.x+threadIdx.x + iW;
const int row = blockIdx.y*blockDim.y+threadIdx.y + jS;
real temp1,temp2;
// Top and bottom faces for u //
if (col <= iE+1 && row <=jN) {
temp1=u[idx3D(kB,row,col,nydim,nxdim+1)];
temp2=u[idx3D(kT,row,col,nydim,nxdim+1)];
for (int k=1; k<=bcw; ++k) {
u[idx3D(kB-k,row,col,nydim,nxdim+1)] = temp1;
u[idx3D(kT+k,row,col,nydim,nxdim+1)] = temp2;
} // end for //
} // end if //
// Top and bottom faces for v //
if (col <= iE && row <=jN+1) {
temp1=v[idx3D(kB,row,col,nydim+1,nxdim)];// v[kB][row][col];
temp2=v[idx3D(kT,row,col,nydim+1,nxdim)]; //v[kT][row][col];
for (int k=1; k<=bcw; ++k) {
v[idx3D(kB-k,row,col,nydim+1,nxdim)]= temp1;
v[idx3D(kT+k,row,col,nydim+1,nxdim)]= temp2;
} // end for //
} // end if //
// Top and bottom faces for w //
if (col <= iE && row <=jN) {
temp1 = t1[idx3D(kB,row,col,nydim,nxdim)];
temp2 = t1[idx3D(kT,row,col,nydim,nxdim)];
for (int k=0; k<=bcw; ++k) {
w[idx3D(kB-k,row,col,nydim,nxdim)] = 0.0;
w[idx3D(kT+1+k,row,col,nydim,nxdim)] = 0.0;
t2[idx3D(kB-k,row,col,nydim,nxdim)] = t1[idx3D(kB-k,row,col,nydim,nxdim)] = temp1;
t2[idx3D(kT+k,row,col,nydim,nxdim)] = t1[idx3D(kT+k,row,col,nydim,nxdim)] = temp2;
} // end for //
} // end if //
} // end of bcTB() //
| 1e832189eafce0fe29b0b9b834a8a0c9dcb8d365.cu | #include "real.h"
#include "constant.cuh"
#include "macros.h"
/*
* ============================ bc =====================
* BC sets the boundary conditions
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* q1 real array values at current time step
* iW,iE integers indices bounding array data
* nx integer main array size, not including
* extra 'ghost' zones/points
*/
//u = u[idx3D(level,row,col,nydim,nxdim+1)]
//v = v[idx3D(level,row,col,nydim+1,nxdim)]
//w = w[idx3D(level,row,col,nydim,nxdim)]
//t = t[idx3D(level,row,col,nydim,nxdim)]
__global__
void bcTB(real *u,real *v,real *w, real*t1, real *t2)
{
const int col = blockIdx.x*blockDim.x+threadIdx.x + iW;
const int row = blockIdx.y*blockDim.y+threadIdx.y + jS;
real temp1,temp2;
// Top and bottom faces for u //
if (col <= iE+1 && row <=jN) {
temp1=u[idx3D(kB,row,col,nydim,nxdim+1)];
temp2=u[idx3D(kT,row,col,nydim,nxdim+1)];
for (int k=1; k<=bcw; ++k) {
u[idx3D(kB-k,row,col,nydim,nxdim+1)] = temp1;
u[idx3D(kT+k,row,col,nydim,nxdim+1)] = temp2;
} // end for //
} // end if //
// Top and bottom faces for v //
if (col <= iE && row <=jN+1) {
temp1=v[idx3D(kB,row,col,nydim+1,nxdim)];// v[kB][row][col];
temp2=v[idx3D(kT,row,col,nydim+1,nxdim)]; //v[kT][row][col];
for (int k=1; k<=bcw; ++k) {
v[idx3D(kB-k,row,col,nydim+1,nxdim)]= temp1;
v[idx3D(kT+k,row,col,nydim+1,nxdim)]= temp2;
} // end for //
} // end if //
// Top and bottom faces for w //
if (col <= iE && row <=jN) {
temp1 = t1[idx3D(kB,row,col,nydim,nxdim)];
temp2 = t1[idx3D(kT,row,col,nydim,nxdim)];
for (int k=0; k<=bcw; ++k) {
w[idx3D(kB-k,row,col,nydim,nxdim)] = 0.0;
w[idx3D(kT+1+k,row,col,nydim,nxdim)] = 0.0;
t2[idx3D(kB-k,row,col,nydim,nxdim)] = t1[idx3D(kB-k,row,col,nydim,nxdim)] = temp1;
t2[idx3D(kT+k,row,col,nydim,nxdim)] = t1[idx3D(kT+k,row,col,nydim,nxdim)] = temp2;
} // end for //
} // end if //
} // end of bcTB() //
|
e0e3e48578f3bcff6c49dc8a30598f2f1d254074.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHTensorSort.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
#include <thrust/system/hip/execution_policy.h>
#endif
namespace at { namespace native {
using namespace at::sparse;
SparseTensor coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
if (self.is_coalesced()) {
return self;
}
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
LongTensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
LongTensor origIndices = at::empty({nnz}, self._indices().options());
LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data<int64_t>());
thrust_ptr origIndicesIter(origIndices.data<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
values = values.contiguous();
int64_t stride = at::prod_intlist(values.sizes().slice(1));
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream,
uniqueOffsets.data<int64_t>(),
origIndices.data<int64_t>(),
values.data<scalar_t>(),
newValues.data<scalar_t>(),
nnz,
newNnz,
stride
);
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
////////////////////////////////////////////////////////////
// unflatten indices if necessary
LongTensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
LongTensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.div_(self.size(d));
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes())._coalesced_(true);
THCudaCheck(hipGetLastError());
return dst;
}
}} // namespace at::native
| e0e3e48578f3bcff6c49dc8a30598f2f1d254074.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/NativeFunctions.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCTensorSort.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
#include <thrust/system/cuda/execution_policy.h>
#endif
namespace at { namespace native {
using namespace at::sparse;
SparseTensor coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
if (self.is_coalesced()) {
return self;
}
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
LongTensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
LongTensor origIndices = at::empty({nnz}, self._indices().options());
LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data<int64_t>());
thrust_ptr origIndicesIter(origIndices.data<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, ThrustLTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
values = values.contiguous();
int64_t stride = at::prod_intlist(values.sizes().slice(1));
dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_ALL_TYPES_AND_HALF(
values.type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>(
uniqueOffsets.data<int64_t>(),
origIndices.data<int64_t>(),
values.data<scalar_t>(),
newValues.data<scalar_t>(),
nnz,
newNnz,
stride
);
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
////////////////////////////////////////////////////////////
// unflatten indices if necessary
LongTensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
LongTensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.div_(self.size(d));
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes())._coalesced_(true);
THCudaCheck(cudaGetLastError());
return dst;
}
}} // namespace at::native
|
4568fd8c62960a49e832281b2a446aeedb130f0e.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************************** \
* Copyright (c) 2015, NVIDIA open source projects
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of SASSI nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This is a SASSI instrumentation library for gathering branch statistics. It
* corresponds to Case Study I in,
*
* "Flexible Software Profiling of GPU Architectures"
* Stephenson et al., ISCA 2015.
*
* The application code the user instruments should be instrumented with the
* following SASSI flag: -Xptxas --sassi-inst-before="cond-branches" \
* -Xptxas --sassi-before-args="cond-branch-info".
*
* In addition, be sure to link your application with flags necessary to
* hijack "main" and "exit". You can trivially do this using GNU tools with
*
* -Xlinker "--wrap=main" -Xlinker "--wrap=exit"
*
* This will cause calls to main and exit to be replaced by calls to
* __wrap_exit(int status) and __wrap_main(int argc, char **argv), which we have
* defined below. This allows us to do initialization and finalization without
* having to worry about object constructor and destructor orders.
*
* This version of the library also lets us correlate SASS location to the
* corresponding CUDA source locations. To use this feature, you must
* compile your application with the "-lineinfo" option.
*
* See the branch example in example/Makfile for all the flags you should use.
*
\***********************************************************************************/
#define __STDC_FORMAT_MACROS
#include <assert.h>
#include <cupti.h>
#include <inttypes.h>
#include <map>
#include <stdint.h>
#include <stdio.h>
#include <string>
#include <unistd.h>
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_srcmap.hpp"
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-branch.hpp>
struct BranchCounter {
uint64_t address;
int32_t branchType; // The branch type.
int32_t taggedUnanimous; // Branch had .U modifier, so compiler knows...
unsigned long long totalBranches;
unsigned long long takenThreads;
unsigned long long takenNotThreads;
unsigned long long divergentBranches; // Not all branches go the same way.
unsigned long long activeThreads; // Number of active threads.
};
static sassi::src_mapper *sassiMapper;
// The actual dictionary of counters, where the key is a branch's PC, and
// the value is the set of counters associated with it.
static __managed__ sassi::dictionary<uint64_t, BranchCounter> *sassiStats;
// Convert the SASSIBranchType to a string that we can print. See the
// CUDA binary utilities webpage for more information about these types.
const char *SASSIBranchTypeAsString[] = {
"BRX", "BRA", "RET", "EXIT", "SYNC", "OTHER"
};
///////////////////////////////////////////////////////////////////////////////////
///
/// Collect the stats and print them out before the device counters are reset.
///
///////////////////////////////////////////////////////////////////////////////////
static void sassi_finalize(__attribute__((unused)) sassi::cupti_wrapper *wrapper,
__attribute__((unused)) const CUpti_CallbackData *cb)
{
// This function will be called either when 1) the device is reset, or 2) the
// the program is about to exit. Let's check to see whether the sassiStats
// map is still valid. For instance, the user could have reset the device
// before the program exited, which would essentially invalidate all device
// data. (In fact, explicitly reseting the device before program exit is
// considered best practice.)
if (sassiMapper->is_device_state_valid())
{
FILE *fRes = fopen("sassi-branch.txt", "w");
fprintf(fRes, "%-16.16s %-10.10s %-10.10s %-10.10s %-10.10s %-10.10s %-8.8s %-8.8s Location\n",
"Address", "Total/32", "Dvrge/32", "Active", "Taken", "NTaken",
"Type", ".U");
// Get the SASS PUPC to source code line mapping.
auto const locMapper = sassiMapper->get_location_map();
sassiStats->map([fRes,&locMapper](uint64_t& pupc, BranchCounter& val) {
assert(val.address == pupc);
fprintf(fRes, "%-16.16" PRIx64
" %-10.llu %-10.llu %-10.llu %-10.llu %-10.llu %-8.4s %-8.d ",
pupc,
val.totalBranches,
val.divergentBranches,
val.activeThreads,
val.takenThreads,
val.takenNotThreads,
SASSIBranchTypeAsString[val.branchType],
val.taggedUnanimous
);
// See if there is a source code mapping for this PUPC. If you
// compiled your code with "-lineinfo" there should be a valid
// mapping.
auto it = locMapper.find(pupc);
if (it != locMapper.end()) {
fprintf(fRes, "%s, line %d\n", it->second.file_name->c_str(), it->second.line_num);
} else {
fprintf(fRes, "\n");
}
});
fclose(fRes);
}
}
///////////////////////////////////////////////////////////////////////////////////
///
/// We will compile our application using ld's --wrap option, which in this
/// case lets us replace calls to "exit" with calls to "__wrap_exit". See
/// the make target "ophist-fermi" in ./example/Makefile to see how this
/// is done.
///
/// This should allow us to perform CUDA operations before the CUDA runtime
/// starts shutting down. In particular, we want to copy our
/// "dynamic_instr_counts" off the device. If we used UVM, this would happen
/// automatically for us. But since we don't have the luxury of using UVM
/// for Fermi, we have to make sure that the CUDA runtime is still up and
/// running before trying to issue a hipMemcpy. Hence these shenanigans.
///
///////////////////////////////////////////////////////////////////////////////////
extern "C" void __real_exit(int status);
extern "C" void __wrap_exit(int status)
{
sassi_finalize(NULL, NULL);
__real_exit(status);
}
///////////////////////////////////////////////////////////////////////////////////
///
/// For programs that don't call exit explicitly, let's catch the fallthrough.
///
///////////////////////////////////////////////////////////////////////////////////
extern "C" int __real_main(int argc, char **argv);
extern "C" int __wrap_main(int argc, char **argv)
{
// Initialize a src_mapper to give us SASS PC->CUDA line mappings.
sassiMapper = new sassi::src_mapper();
// Initialize a hashmap to keep track of statistics of branches. The key
// is the PC, the value is a BranchCounter.
sassiStats = new sassi::dictionary<uint64_t, BranchCounter>();
// Whenever the device is reset, be sure to print out the counters before
// they are clobbered.
sassiMapper->register_callback(sassi::cupti_wrapper::event_type::DEVICE_RESET,
sassi::cupti_wrapper::callback_before,
sassi_finalize);
int ret = __real_main(argc, argv);
sassi_finalize(NULL, NULL);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////
//
/// This function will be inserted before every conditional branch instruction.
//
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_before_handler(SASSIBeforeParams *bp, SASSICondBranchParams *brp)
{
// Find out thread index within the warp.
int threadIdxInWarp = get_laneid();
// Get masks and counts of 1) active threads in this warp,
// 2) threads that take the branch, and
// 3) threads that do not take the branch.
int active = __ballot(1);
bool dir = brp->GetDirection();
int taken = __ballot(dir == true);
int ntaken = __ballot(dir == false);
int numActive = __popc(active);
int numTaken = __popc(taken);
int numNotTaken = __popc(ntaken);
bool divergent = (numTaken != numActive && numNotTaken != numActive);
// The first active thread in each warp gets to write results.
if ((__ffs(active)-1) == threadIdxInWarp) {
// Get the address, we'll use it for hashing.
uint64_t instAddr = bp->GetPUPC();
// Looks up the counters associated with 'instAddr', but if no such entry
// exits, initialize the counters in the lambda.
BranchCounter *stats = (*sassiStats).getOrInit(instAddr, [instAddr,brp](BranchCounter* v) {
v->address = instAddr;
v->branchType = brp->GetType();
v->taggedUnanimous = brp->IsUnanimous();
});
// Why not sanity check the hash map?
assert(stats->address == instAddr);
assert(numTaken + numNotTaken == numActive);
// Increment the various counters that are associated
// with this instruction appropriately.
atomicAdd(&(stats->totalBranches), 1ULL);
atomicAdd(&(stats->activeThreads), numActive);
atomicAdd(&(stats->takenThreads), numTaken);
atomicAdd(&(stats->takenNotThreads), numNotTaken);
atomicAdd(&(stats->divergentBranches), divergent);
}
}
| 4568fd8c62960a49e832281b2a446aeedb130f0e.cu | /*********************************************************************************** \
* Copyright (c) 2015, NVIDIA open source projects
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of SASSI nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This is a SASSI instrumentation library for gathering branch statistics. It
* corresponds to Case Study I in,
*
* "Flexible Software Profiling of GPU Architectures"
* Stephenson et al., ISCA 2015.
*
* The application code the user instruments should be instrumented with the
* following SASSI flag: -Xptxas --sassi-inst-before="cond-branches" \
* -Xptxas --sassi-before-args="cond-branch-info".
*
* In addition, be sure to link your application with flags necessary to
* hijack "main" and "exit". You can trivially do this using GNU tools with
*
* -Xlinker "--wrap=main" -Xlinker "--wrap=exit"
*
* This will cause calls to main and exit to be replaced by calls to
* __wrap_exit(int status) and __wrap_main(int argc, char **argv), which we have
* defined below. This allows us to do initialization and finalization without
* having to worry about object constructor and destructor orders.
*
* This version of the library also lets us correlate SASS location to the
* corresponding CUDA source locations. To use this feature, you must
* compile your application with the "-lineinfo" option.
*
* See the branch example in example/Makfile for all the flags you should use.
*
\***********************************************************************************/
#define __STDC_FORMAT_MACROS
#include <assert.h>
#include <cupti.h>
#include <inttypes.h>
#include <map>
#include <stdint.h>
#include <stdio.h>
#include <string>
#include <unistd.h>
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_srcmap.hpp"
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-branch.hpp>
struct BranchCounter {
uint64_t address;
int32_t branchType; // The branch type.
int32_t taggedUnanimous; // Branch had .U modifier, so compiler knows...
unsigned long long totalBranches;
unsigned long long takenThreads;
unsigned long long takenNotThreads;
unsigned long long divergentBranches; // Not all branches go the same way.
unsigned long long activeThreads; // Number of active threads.
};
static sassi::src_mapper *sassiMapper;
// The actual dictionary of counters, where the key is a branch's PC, and
// the value is the set of counters associated with it.
static __managed__ sassi::dictionary<uint64_t, BranchCounter> *sassiStats;
// Convert the SASSIBranchType to a string that we can print. See the
// CUDA binary utilities webpage for more information about these types.
const char *SASSIBranchTypeAsString[] = {
"BRX", "BRA", "RET", "EXIT", "SYNC", "OTHER"
};
///////////////////////////////////////////////////////////////////////////////////
///
/// Collect the stats and print them out before the device counters are reset.
///
///////////////////////////////////////////////////////////////////////////////////
static void sassi_finalize(__attribute__((unused)) sassi::cupti_wrapper *wrapper,
__attribute__((unused)) const CUpti_CallbackData *cb)
{
// This function will be called either when 1) the device is reset, or 2) the
// the program is about to exit. Let's check to see whether the sassiStats
// map is still valid. For instance, the user could have reset the device
// before the program exited, which would essentially invalidate all device
// data. (In fact, explicitly reseting the device before program exit is
// considered best practice.)
if (sassiMapper->is_device_state_valid())
{
FILE *fRes = fopen("sassi-branch.txt", "w");
fprintf(fRes, "%-16.16s %-10.10s %-10.10s %-10.10s %-10.10s %-10.10s %-8.8s %-8.8s Location\n",
"Address", "Total/32", "Dvrge/32", "Active", "Taken", "NTaken",
"Type", ".U");
// Get the SASS PUPC to source code line mapping.
auto const locMapper = sassiMapper->get_location_map();
sassiStats->map([fRes,&locMapper](uint64_t& pupc, BranchCounter& val) {
assert(val.address == pupc);
fprintf(fRes, "%-16.16" PRIx64
" %-10.llu %-10.llu %-10.llu %-10.llu %-10.llu %-8.4s %-8.d ",
pupc,
val.totalBranches,
val.divergentBranches,
val.activeThreads,
val.takenThreads,
val.takenNotThreads,
SASSIBranchTypeAsString[val.branchType],
val.taggedUnanimous
);
// See if there is a source code mapping for this PUPC. If you
// compiled your code with "-lineinfo" there should be a valid
// mapping.
auto it = locMapper.find(pupc);
if (it != locMapper.end()) {
fprintf(fRes, "%s, line %d\n", it->second.file_name->c_str(), it->second.line_num);
} else {
fprintf(fRes, "\n");
}
});
fclose(fRes);
}
}
///////////////////////////////////////////////////////////////////////////////////
///
/// We will compile our application using ld's --wrap option, which in this
/// case lets us replace calls to "exit" with calls to "__wrap_exit". See
/// the make target "ophist-fermi" in ./example/Makefile to see how this
/// is done.
///
/// This should allow us to perform CUDA operations before the CUDA runtime
/// starts shutting down. In particular, we want to copy our
/// "dynamic_instr_counts" off the device. If we used UVM, this would happen
/// automatically for us. But since we don't have the luxury of using UVM
/// for Fermi, we have to make sure that the CUDA runtime is still up and
/// running before trying to issue a cudaMemcpy. Hence these shenanigans.
///
///////////////////////////////////////////////////////////////////////////////////
extern "C" void __real_exit(int status);
extern "C" void __wrap_exit(int status)
{
sassi_finalize(NULL, NULL);
__real_exit(status);
}
///////////////////////////////////////////////////////////////////////////////////
///
/// For programs that don't call exit explicitly, let's catch the fallthrough.
///
///////////////////////////////////////////////////////////////////////////////////
extern "C" int __real_main(int argc, char **argv);
extern "C" int __wrap_main(int argc, char **argv)
{
// Initialize a src_mapper to give us SASS PC->CUDA line mappings.
sassiMapper = new sassi::src_mapper();
// Initialize a hashmap to keep track of statistics of branches. The key
// is the PC, the value is a BranchCounter.
sassiStats = new sassi::dictionary<uint64_t, BranchCounter>();
// Whenever the device is reset, be sure to print out the counters before
// they are clobbered.
sassiMapper->register_callback(sassi::cupti_wrapper::event_type::DEVICE_RESET,
sassi::cupti_wrapper::callback_before,
sassi_finalize);
int ret = __real_main(argc, argv);
sassi_finalize(NULL, NULL);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////
//
/// This function will be inserted before every conditional branch instruction.
//
///////////////////////////////////////////////////////////////////////////////////
__device__ void sassi_before_handler(SASSIBeforeParams *bp, SASSICondBranchParams *brp)
{
// Find out thread index within the warp.
int threadIdxInWarp = get_laneid();
// Get masks and counts of 1) active threads in this warp,
// 2) threads that take the branch, and
// 3) threads that do not take the branch.
int active = __ballot(1);
bool dir = brp->GetDirection();
int taken = __ballot(dir == true);
int ntaken = __ballot(dir == false);
int numActive = __popc(active);
int numTaken = __popc(taken);
int numNotTaken = __popc(ntaken);
bool divergent = (numTaken != numActive && numNotTaken != numActive);
// The first active thread in each warp gets to write results.
if ((__ffs(active)-1) == threadIdxInWarp) {
// Get the address, we'll use it for hashing.
uint64_t instAddr = bp->GetPUPC();
// Looks up the counters associated with 'instAddr', but if no such entry
// exits, initialize the counters in the lambda.
BranchCounter *stats = (*sassiStats).getOrInit(instAddr, [instAddr,brp](BranchCounter* v) {
v->address = instAddr;
v->branchType = brp->GetType();
v->taggedUnanimous = brp->IsUnanimous();
});
// Why not sanity check the hash map?
assert(stats->address == instAddr);
assert(numTaken + numNotTaken == numActive);
// Increment the various counters that are associated
// with this instruction appropriately.
atomicAdd(&(stats->totalBranches), 1ULL);
atomicAdd(&(stats->activeThreads), numActive);
atomicAdd(&(stats->takenThreads), numTaken);
atomicAdd(&(stats->takenNotThreads), numNotTaken);
atomicAdd(&(stats->divergentBranches), divergent);
}
}
|
3cd77d2dd05159f13529967513cfd1a7050b5630.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include "SFML\Graphics.hpp"
#define N 64
int onedim(int n, int i, int j) { return n*i + j; }
void Update(int w, int h, int *map, int *stats) {
for (int i = 1; i < w - 1; i++) {
for (int j = 1; j < h - 1; j++) {
int a = map[onedim(h, i - 1, j + 1)] + map[onedim(h, i, j + 1)] + map[onedim(h, i + 1, j + 1)] +
map[onedim(h, i - 1, j)] + 0 + map[onedim(h, i + 1, j)] +
map[onedim(h, i - 1, j - 1)] + map[onedim(h, i, j - 1)] + map[onedim(h, i + 1, j - 1)];
if (map[onedim(h, i, j)] == 1) {
stats[onedim(h, i, j)] = (int)((a == 2 || a == 3) ? 1 : 0);
}
else {
stats[onedim(h, i, j)] = (int)(a == 3 ? 1 : 0);
}
}
}
}
__global__ void Update_CUDA(int w, int h, int *map, int *stats) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int stride_x = gridDim.x * blockDim.x;
int stride_y = gridDim.y * blockDim.y;
if (row > 0 && col > 0 && row < w - 1 && col < h - 1) {
for (int i = row; i < w - 1; i += stride_x) {
for (int j = col; j < h - 1; j += stride_y) {
int a = map[h*(i - 1)+j + 1] + map[h*i+j + 1] + map[h*(i + 1)+j + 1] +
map[h*(i - 1)+j] + 0 + map[h*(i + 1)+j] +
map[h*(i - 1)+j - 1] + map[h*i+j - 1] + map[h*(i + 1)+j - 1];
if (map[h*i+j] == 1) {
stats[h*i+j] = (int)((a == 2 || a == 3) ? 1 : 0);
}
else {
stats[h*i+j] = (int)(a == 3 ? 1 : 0);
}
}
}
}
}
int main() {
const int w = 1300;
const int h = 766;
const float wf = (float)w;
const float hf = (float)h;
int *map, *stats;
sf::Uint8 *pixels=new sf::Uint8[w * h * 4];
sf::RenderWindow window(sf::VideoMode(w, h), "SFML works!");
sf::Clock clock;
sf::Time time;
sf::RectangleShape player(sf::Vector2f(wf, hf));
player.setPosition(0.f, 0.f);
sf::Texture player_texture;
sf::Image image;
sf::Image *imo;
image.create(w, h);
hipMallocManaged(&map, w*h*sizeof(int));
hipMallocManaged(&stats, w*h*sizeof(int));
dim3 threads_per_block(32, 32, 1); // A 16 x 16 block threads
dim3 number_of_blocks((w / threads_per_block.x) + 1, (h / threads_per_block.y) + 1, 1);
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
map[onedim(h,i,j)] = 0;
if (((float)rand() / (RAND_MAX + 1.0)) < 0.75f) {
image.setPixel(i, j, sf::Color::Blue);
map[onedim(h,i,j)] = 1;
}
}
}
player_texture.loadFromImage(image);
player.setTexture(&player_texture);
while (window.isOpen())
{
time = clock.getElapsedTime();
clock.restart().asSeconds();
float fps = 1.0f / time.asSeconds();
printf("FPS:: %f \r", fps);
sf::Event event;
while (window.pollEvent(event))
{
switch (event.type)
{
case sf::Event::Closed:
window.close();
break;
case sf::Event::Resized:
printf("Windows size : %d, %d \n", event.size.height, event.size.width);
//window.setView(sf::View(sf::FloatRect(0, 0, event.size.width, event.size.height)));
break;
case sf::Event::TextEntered:
if (event.text.unicode < 128) {
printf("%c", event.text.unicode);
}
break;
}
}
// Update(w, h, map,stats, &image); // for test
Update_CUDA << < number_of_blocks, threads_per_block >> > (w, h, map, stats);
hipDeviceSynchronize();
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
map[onedim(h, i, j)] = stats[onedim(h, i, j)];
if (map[onedim(h, i, j)] == 1) {
image.setPixel(i, j, sf::Color::Blue);
}
else {
image.setPixel(i, j, sf::Color::Black);
}
}
}
player_texture.loadFromImage(image);
player.setTexture(&player_texture);
window.clear();
window.draw(player);
window.display();
}
hipFree(map); hipFree(stats);
return 0;
} | 3cd77d2dd05159f13529967513cfd1a7050b5630.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include "SFML\Graphics.hpp"
#define N 64
int onedim(int n, int i, int j) { return n*i + j; }
void Update(int w, int h, int *map, int *stats) {
for (int i = 1; i < w - 1; i++) {
for (int j = 1; j < h - 1; j++) {
int a = map[onedim(h, i - 1, j + 1)] + map[onedim(h, i, j + 1)] + map[onedim(h, i + 1, j + 1)] +
map[onedim(h, i - 1, j)] + 0 + map[onedim(h, i + 1, j)] +
map[onedim(h, i - 1, j - 1)] + map[onedim(h, i, j - 1)] + map[onedim(h, i + 1, j - 1)];
if (map[onedim(h, i, j)] == 1) {
stats[onedim(h, i, j)] = (int)((a == 2 || a == 3) ? 1 : 0);
}
else {
stats[onedim(h, i, j)] = (int)(a == 3 ? 1 : 0);
}
}
}
}
__global__ void Update_CUDA(int w, int h, int *map, int *stats) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int stride_x = gridDim.x * blockDim.x;
int stride_y = gridDim.y * blockDim.y;
if (row > 0 && col > 0 && row < w - 1 && col < h - 1) {
for (int i = row; i < w - 1; i += stride_x) {
for (int j = col; j < h - 1; j += stride_y) {
int a = map[h*(i - 1)+j + 1] + map[h*i+j + 1] + map[h*(i + 1)+j + 1] +
map[h*(i - 1)+j] + 0 + map[h*(i + 1)+j] +
map[h*(i - 1)+j - 1] + map[h*i+j - 1] + map[h*(i + 1)+j - 1];
if (map[h*i+j] == 1) {
stats[h*i+j] = (int)((a == 2 || a == 3) ? 1 : 0);
}
else {
stats[h*i+j] = (int)(a == 3 ? 1 : 0);
}
}
}
}
}
int main() {
const int w = 1300;
const int h = 766;
const float wf = (float)w;
const float hf = (float)h;
int *map, *stats;
sf::Uint8 *pixels=new sf::Uint8[w * h * 4];
sf::RenderWindow window(sf::VideoMode(w, h), "SFML works!");
sf::Clock clock;
sf::Time time;
sf::RectangleShape player(sf::Vector2f(wf, hf));
player.setPosition(0.f, 0.f);
sf::Texture player_texture;
sf::Image image;
sf::Image *imo;
image.create(w, h);
cudaMallocManaged(&map, w*h*sizeof(int));
cudaMallocManaged(&stats, w*h*sizeof(int));
dim3 threads_per_block(32, 32, 1); // A 16 x 16 block threads
dim3 number_of_blocks((w / threads_per_block.x) + 1, (h / threads_per_block.y) + 1, 1);
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
map[onedim(h,i,j)] = 0;
if (((float)rand() / (RAND_MAX + 1.0)) < 0.75f) {
image.setPixel(i, j, sf::Color::Blue);
map[onedim(h,i,j)] = 1;
}
}
}
player_texture.loadFromImage(image);
player.setTexture(&player_texture);
while (window.isOpen())
{
time = clock.getElapsedTime();
clock.restart().asSeconds();
float fps = 1.0f / time.asSeconds();
printf("FPS:: %f \r", fps);
sf::Event event;
while (window.pollEvent(event))
{
switch (event.type)
{
case sf::Event::Closed:
window.close();
break;
case sf::Event::Resized:
printf("Windows size : %d, %d \n", event.size.height, event.size.width);
//window.setView(sf::View(sf::FloatRect(0, 0, event.size.width, event.size.height)));
break;
case sf::Event::TextEntered:
if (event.text.unicode < 128) {
printf("%c", event.text.unicode);
}
break;
}
}
// Update(w, h, map,stats, &image); // for test
Update_CUDA << < number_of_blocks, threads_per_block >> > (w, h, map, stats);
cudaDeviceSynchronize();
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
map[onedim(h, i, j)] = stats[onedim(h, i, j)];
if (map[onedim(h, i, j)] == 1) {
image.setPixel(i, j, sf::Color::Blue);
}
else {
image.setPixel(i, j, sf::Color::Black);
}
}
}
player_texture.loadFromImage(image);
player.setTexture(&player_texture);
window.clear();
window.draw(player);
window.display();
}
cudaFree(map); cudaFree(stats);
return 0;
} |
4a7824f2c73132a81d05878c9261c85ea37dc0b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
//#define TEST 1
__global__ void static equal(int * a, int n, unsigned int constC){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<n; i+=stride){
a[i] = constC;
}
}
__global__ void static genScanFilter_int_lth_bit(int * col,int n, unsigned int constC,int * lt, int * eq){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i < n; i+=stride){
lt[i] = lt[i] | (eq[i] & ~constC & col[i]);
eq[i] = eq[i] & ~(col[i] ^ constC);
//printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]);
}
}
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
inline int bit_constC(int where,int j){
int constC = (((1U << (31 - j )) & where)>>(31 - j));
if(constC != 0)
constC = (1LL << 32) - 1;
return constC;
}
void profilebitscan(int *h_a,
int *h_b,
int *d,
int *lt,
int *eq,
int n,
unsigned int where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid(2048);
float time,stime;
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
stime=0;
int bytes=n * sizeof(int);
for(int loop = 1; loop <= loopTotal; loop++){
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
checkCuda(hipDeviceSynchronize());
unsigned int c = 0;
for(int i = 0;i < 32;i++) c += (1u << i);
hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, lt, n/32, 0) ;
hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, eq, n/32, c) ;
checkCuda(hipDeviceSynchronize());
for(int j = 0; j < 32; ++j){
int constC =bit_constC(where,j);
// printf("%u %u\n",j,((((1U << (31 - j )) & where)>>(31-j))<< k));
hipLaunchKernelGGL(( genScanFilter_int_lth_bit), dim3(grid),dim3(block), 0, 0, d + j * (n / 32), n / 32, constC, lt, eq);
checkCuda(hipDeviceSynchronize());
}
checkCuda( hipMemcpy(h_b, lt, n / 32 * 4, hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(h_b + n / 32 , eq, n / 32 * 4, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
stime += time;
printf("time=%f\n",stime);
}
printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
#ifdef TEST
freopen("scan.in","r",stdin);
freopen("scan_bit.out","w",stdout);
#endif
dim3 block(256);
dim3 grid(2048);
int inputN;
sscanf(argv[1],"%d",&inputN);
unsigned int nElements = inputN;
const unsigned int bytes = nElements * sizeof(int);
#ifdef TEST
scanf("%d",&nElements);
#endif
// host arrays
int *h_aPageable, *h_bPageable,*h_bitPageable;
int *h_aPinned, *h_bPinned;
// device array
int *d_a,*lt,*eq;
// allocate and initialize
h_aPageable = (int*)malloc(bytes );
h_bPageable = (int*)malloc(bytes );
h_bitPageable =(int *)malloc(bytes ); // host pageable
checkCuda( hipHostMalloc((void**)&h_aPinned, bytes ) ); // host pinned
checkCuda( hipHostMalloc((void**)&h_bPinned, bytes ) );
checkCuda( hipMalloc((void**)&d_a, bytes ) ); // device
checkCuda( hipMalloc((void**)<, bytes ) ); // device return
checkCuda( hipMalloc((void**)&eq, bytes ) );
srand(time(0));
for (int i = 0; i < nElements; ++i) h_aPageable[i] = rand()%(1U<<31);
#ifdef TEST
for (int i = 0; i < nElements; ++i) scanf("%d",h_aPageable + i);
#endif
for (int i = 0; i < nElements; ++i)
for(int j = 31; j >= 0; --j){
h_bitPageable[i / 32 + (31-j)*(nElements/32)] += (((h_aPageable[i] &(1<<j))>>j)<<(31 - i % 32));
//h_bitPageable[i / 32 + (31-j)*(nElements/32)] += 0;
}
//for(int i = 0;i < nElements; i++) h_bitPageable[i] = rand()%(1<<31);
memcpy(h_aPinned, h_aPageable, bytes );
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
// printf("\nDevice: %s\n", prop.name);
// if(bytes< 1024){
// printf("scan size (B): %d\n", bytes);
// }else if (bytes < 1024 * 1024)
// {
// printf("scan size (KB): %d\n", bytes / (1024));
// }else{
// printf("scan size (MB): %d\n", bytes / (1024 * 1024));
// }
int constC = rand()%(1U<<31);
#ifdef TEST
scanf("%d",&constC);
#endif
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
//profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
// for(int i = 0; i < nElements; i++) printf("%3u ",h_aPageable[i]);printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n");
#ifdef TEST
for(int i = 0; i < nElements; i++) {
int x =(h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32);
int y =(h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32);
if(x ==0 && y == 0) printf("%d\n",1);
else printf("%d\n", 0);
// printf("%d|%d\n",x,y);
}
#endif
// cleanup
hipFree(lt);
hipFree(eq);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
}
| 4a7824f2c73132a81d05878c9261c85ea37dc0b9.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
//#define TEST 1
__global__ void static equal(int * a, int n, unsigned int constC){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<n; i+=stride){
a[i] = constC;
}
}
__global__ void static genScanFilter_int_lth_bit(int * col,int n, unsigned int constC,int * lt, int * eq){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i < n; i+=stride){
lt[i] = lt[i] | (eq[i] & ~constC & col[i]);
eq[i] = eq[i] & ~(col[i] ^ constC);
//printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]);
}
}
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
inline int bit_constC(int where,int j){
int constC = (((1U << (31 - j )) & where)>>(31 - j));
if(constC != 0)
constC = (1LL << 32) - 1;
return constC;
}
void profilebitscan(int *h_a,
int *h_b,
int *d,
int *lt,
int *eq,
int n,
unsigned int where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid(2048);
float time,stime;
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
stime=0;
int bytes=n * sizeof(int);
for(int loop = 1; loop <= loopTotal; loop++){
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
checkCuda(cudaThreadSynchronize());
unsigned int c = 0;
for(int i = 0;i < 32;i++) c += (1u << i);
equal<<<grid,block>>>(lt, n/32, 0) ;
equal<<<grid,block>>>(eq, n/32, c) ;
checkCuda(cudaThreadSynchronize());
for(int j = 0; j < 32; ++j){
int constC =bit_constC(where,j);
// printf("%u %u\n",j,((((1U << (31 - j )) & where)>>(31-j))<< k));
genScanFilter_int_lth_bit<<<grid,block>>>(d + j * (n / 32), n / 32, constC, lt, eq);
checkCuda(cudaThreadSynchronize());
}
checkCuda( cudaMemcpy(h_b, lt, n / 32 * 4, cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(h_b + n / 32 , eq, n / 32 * 4, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
stime += time;
printf("time=%f\n",stime);
}
printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
#ifdef TEST
freopen("scan.in","r",stdin);
freopen("scan_bit.out","w",stdout);
#endif
dim3 block(256);
dim3 grid(2048);
int inputN;
sscanf(argv[1],"%d",&inputN);
unsigned int nElements = inputN;
const unsigned int bytes = nElements * sizeof(int);
#ifdef TEST
scanf("%d",&nElements);
#endif
// host arrays
int *h_aPageable, *h_bPageable,*h_bitPageable;
int *h_aPinned, *h_bPinned;
// device array
int *d_a,*lt,*eq;
// allocate and initialize
h_aPageable = (int*)malloc(bytes );
h_bPageable = (int*)malloc(bytes );
h_bitPageable =(int *)malloc(bytes ); // host pageable
checkCuda( cudaMallocHost((void**)&h_aPinned, bytes ) ); // host pinned
checkCuda( cudaMallocHost((void**)&h_bPinned, bytes ) );
checkCuda( cudaMalloc((void**)&d_a, bytes ) ); // device
checkCuda( cudaMalloc((void**)<, bytes ) ); // device return
checkCuda( cudaMalloc((void**)&eq, bytes ) );
srand(time(0));
for (int i = 0; i < nElements; ++i) h_aPageable[i] = rand()%(1U<<31);
#ifdef TEST
for (int i = 0; i < nElements; ++i) scanf("%d",h_aPageable + i);
#endif
for (int i = 0; i < nElements; ++i)
for(int j = 31; j >= 0; --j){
h_bitPageable[i / 32 + (31-j)*(nElements/32)] += (((h_aPageable[i] &(1<<j))>>j)<<(31 - i % 32));
//h_bitPageable[i / 32 + (31-j)*(nElements/32)] += 0;
}
//for(int i = 0;i < nElements; i++) h_bitPageable[i] = rand()%(1<<31);
memcpy(h_aPinned, h_aPageable, bytes );
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
// printf("\nDevice: %s\n", prop.name);
// if(bytes< 1024){
// printf("scan size (B): %d\n", bytes);
// }else if (bytes < 1024 * 1024)
// {
// printf("scan size (KB): %d\n", bytes / (1024));
// }else{
// printf("scan size (MB): %d\n", bytes / (1024 * 1024));
// }
int constC = rand()%(1U<<31);
#ifdef TEST
scanf("%d",&constC);
#endif
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
//profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
profilebitscan(h_bitPageable, h_bPageable, d_a, lt, eq, nElements, constC,"Pageable",1);
// for(int i = 0; i < nElements; i++) printf("%3u ",h_aPageable[i]);printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n");
// for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n");
#ifdef TEST
for(int i = 0; i < nElements; i++) {
int x =(h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32);
int y =(h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32);
if(x ==0 && y == 0) printf("%d\n",1);
else printf("%d\n", 0);
// printf("%d|%d\n",x,y);
}
#endif
// cleanup
cudaFree(lt);
cudaFree(eq);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
}
|
89950b075bdcd457df8769cb4c028860d769b39e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellThermoComputeGPU.cu
* \brief Explicitly instantiates reduction operators and declares kernel drivers
* for mpcd::CellThermoComputeGPU.
*/
#include "CellThermoComputeGPU.cuh"
#include "CellThermoTypes.h"
#include "CellCommunicator.cuh"
#include "ReductionOperators.h"
#include "hoomd/extern/cub/hipcub/hipcub.hpp"
namespace mpcd
{
namespace gpu
{
//! Shuffle-based warp reduction
/*!
* \param val Value to be reduced
*
* \tparam LOGICAL_WARP_SIZE Number of threads in a "logical" warp to reduce, must be a power-of-two
* and less than the hardware warp size.
* \tparam T Type of value to be reduced (inferred).
*
* \returns Reduced value.
*
* The value \a val is reduced into the 0-th lane of the "logical" warp using
* shuffle-based intrinsics. This allows for the summation of quantities when
* using multiple threads per object within a kernel.
*/
template<int LOGICAL_WARP_SIZE, typename T>
__device__ static T warp_reduce(T val)
{
static_assert(LOGICAL_WARP_SIZE <= CUB_PTX_WARP_THREADS, "Logical warp size cannot exceed hardware warp size");
static_assert(LOGICAL_WARP_SIZE && !(LOGICAL_WARP_SIZE & (LOGICAL_WARP_SIZE-1)), "Logical warp size must be a power of 2");
#pragma unroll
for (int dest_count = LOGICAL_WARP_SIZE/2; dest_count >= 1; dest_count /= 2)
{
val += cub::ShuffleDown(val, dest_count);
}
return val;
}
namespace kernel
{
//! Begins the cell thermo compute by summing cell quantities on outer cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param d_cells Cell indexes to compute
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param num_cells Number of cells to compute for
*
* \tparam need_energy If true, compute the cell-level energy properties
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory.
*/
template<bool need_energy, unsigned int tpp>
__global__ void begin_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int num_cells)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * num_cells)
return;
const unsigned int cell_id = d_cells[idx / tpp];
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
momentum.x = warp_reduce<tpp>(momentum.x);
momentum.y = warp_reduce<tpp>(momentum.y);
momentum.z = warp_reduce<tpp>(momentum.z);
momentum.w = warp_reduce<tpp>(momentum.w);
if (need_energy)
ke = warp_reduce<tpp>(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w);
if (need_energy)
d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np));
}
}
//! Finalizes the cell thermo compute by properly averaging cell quantities
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per cell, the properties are averaged by mass, number of particles,
* etc. The temperature is computed from the cell kinetic energy.
*/
template<bool need_energy>
__global__ void end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions)
{
// one thread per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= Ncell)
return;
const unsigned int cell_id = d_cells[idx];
// average cell properties if the cell has mass
const double4 cell_vel = d_cell_vel[cell_id];
double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z);
const double mass = cell_vel.w;
if (mass > 0.)
{
// average velocity is only defined when there is some mass in the cell
vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[cell_id];
const double ke = cell_energy.x;
double temp(0.0);
const unsigned int np = __double_as_int(cell_energy.z);
// temperature is only defined for 2 or more particles
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
//! Computes the cell thermo for inner cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param n_dimensions System dimensionality
*
* \tparam need_energy If true, compute the cell-level energy properties.
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory. The properties are properly normalized
*
* See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation
* without the normalization at the end, which is used for the outer cells.
*/
template<bool need_energy, unsigned int tpp>
__global__ void inner_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const Index3D ci,
const Index3D inner_ci,
const uint3 offset,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int n_dimensions)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * inner_ci.getNumElements())
return;
// reinterpret the thread id as a cell by first mapping the thread into the inner indexer,
// shifting by the offset of the inner indexer from the full indexer, and then compressing
// back into a 1D cell id
const uint3 inner_cell = inner_ci.getTriple(idx/tpp);
const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z);
const unsigned int cell_id = ci(cell.x, cell.y, cell.z);
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
momentum.x = warp_reduce<tpp>(momentum.x);
momentum.y = warp_reduce<tpp>(momentum.y);
momentum.z = warp_reduce<tpp>(momentum.z);
momentum.w = warp_reduce<tpp>(momentum.w);
if (need_energy)
ke = warp_reduce<tpp>(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
const double mass = momentum.w;
double3 vel_cm = make_double3(0.0,0.0,0.0);
if (mass > 0.)
{
vel_cm.x = momentum.x / mass;
vel_cm.y = momentum.y / mass;
vel_cm.z = momentum.z / mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
double temp(0.0);
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per \a temporary cell, the cell properties are normalized
* in a way suitable for reduction of net properties, e.g. the cell velocities
* are converted to momentum. The temperature is set to the cell energy, and a
* flag is set to 1 or 0 to indicate whether this cell has an energy that should
* be used in averaging the total temperature.
*/
template<bool need_energy>
__global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D tmp_ci,
const Index3D ci)
{
// one thread per cell
unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (tmp_idx >= tmp_ci.getNumElements())
return;
// use the temporary cell indexer to map to a cell, then use the real cell indexer to
// get the read index
uint3 cell = tmp_ci.getTriple(tmp_idx);
const unsigned int idx = ci(cell.x, cell.y, cell.z);
const double4 vel_mass = d_cell_vel[idx];
const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z);
const double mass = vel_mass.w;
mpcd::detail::cell_thermo_element thermo;
thermo.momentum = make_double3(mass * vel.x,
mass * vel.y,
mass * vel.z);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[idx];
thermo.energy = cell_energy.x;
if (__double_as_int(cell_energy.z) > 1)
{
thermo.temperature = cell_energy.y;
thermo.flag = 1;
}
else
{
thermo.temperature = 0.0;
thermo.flag = 0;
}
}
else
{
thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0;
}
d_tmp_thermo[tmp_idx] = thermo;
}
} // end namespace kernel
//! Templated launcher for multiple threads-per-cell kernel for outer cells
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads to use per-cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
}
else
{
launch_begin_cell_thermo<cur_tpp/2>(args,
d_cells,
num_cells,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::launch_begin_cell_thermo
* \sa mpcd::gpu::kernel::begin_cell_thermo
*/
hipError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (num_cells == 0) return hipSuccess;
launch_begin_cell_thermo<32>(args,
d_cells,
num_cells,
block_size,
tpp);
return hipSuccess;
}
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
* \param need_energy If true, compute the cell-level energy properties
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::kernel::end_cell_thermo
*/
hipError_t end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions,
const bool need_energy,
const unsigned int block_size)
{
if (Ncell == 0) return hipSuccess;
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(Ncell / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(Ncell / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
return hipSuccess;
}
//! Templated launcher for multiple threads-per-cell kernel for inner cells
/*
* \param args Common arguments to thermo kernels
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
}
else
{
launch_inner_cell_thermo<cur_tpp/2>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*!
* \param args Common arguments for cell thermo compute
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::launch_inner_cell_thermo
* \sa mpcd::gpu::kernel::inner_cell_thermo
*/
hipError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (inner_ci.getNumElements() == 0) return hipSuccess;
launch_inner_cell_thermo<32>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
return hipSuccess;
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
* \param need_energy If true, compute the cell-level energy properties
* \param block_size Number of threads per block
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::kernel::stage_net_cell_thermo
*/
hipError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D& tmp_ci,
const Index3D& ci,
bool need_energy,
const unsigned int block_size)
{
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
return hipSuccess;
}
/*!
* \param d_reduced Cell thermo properties reduced across all cells (output on second call)
* \param d_tmp Temporary storage for reduction (output on first call)
* \param tmp_bytes Number of bytes allocated for temporary storage (output on first call)
* \param d_tmp_thermo Cell thermo properties to reduce
* \param Ncell The number of cells to reduce across
*
* \returns hipSuccess on completion
*
* \b Implementation details:
* CUB DeviceReduce is used to perform the reduction. Hence, this function requires
* two calls to perform the reduction. The first call sizes the temporary storage,
* which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate
* the required bytes, and call the function a second time. This performs the
* reducetion and returns the result in \a d_reduced.
*/
hipError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced,
void *d_tmp,
size_t& tmp_bytes,
const mpcd::detail::cell_thermo_element *d_tmp_thermo,
const unsigned int Ncell)
{
hipcub::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell);
return hipSuccess;
}
//! Explicit template instantiation of pack for cell velocity
template hipError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf,
const double4 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of pack for cell energy
template hipError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf,
const double3 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of unpack for cell velocity
template hipError_t unpack_cell_buffer(double4 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
//! Explicit template instantiation of unpack for cell energy
template hipError_t unpack_cell_buffer(double3 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
} // end namespace gpu
} // end namespace mpcd
| 89950b075bdcd457df8769cb4c028860d769b39e.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellThermoComputeGPU.cu
* \brief Explicitly instantiates reduction operators and declares kernel drivers
* for mpcd::CellThermoComputeGPU.
*/
#include "CellThermoComputeGPU.cuh"
#include "CellThermoTypes.h"
#include "CellCommunicator.cuh"
#include "ReductionOperators.h"
#include "hoomd/extern/cub/cub/cub.cuh"
namespace mpcd
{
namespace gpu
{
//! Shuffle-based warp reduction
/*!
* \param val Value to be reduced
*
* \tparam LOGICAL_WARP_SIZE Number of threads in a "logical" warp to reduce, must be a power-of-two
* and less than the hardware warp size.
* \tparam T Type of value to be reduced (inferred).
*
* \returns Reduced value.
*
* The value \a val is reduced into the 0-th lane of the "logical" warp using
* shuffle-based intrinsics. This allows for the summation of quantities when
* using multiple threads per object within a kernel.
*/
template<int LOGICAL_WARP_SIZE, typename T>
__device__ static T warp_reduce(T val)
{
static_assert(LOGICAL_WARP_SIZE <= CUB_PTX_WARP_THREADS, "Logical warp size cannot exceed hardware warp size");
static_assert(LOGICAL_WARP_SIZE && !(LOGICAL_WARP_SIZE & (LOGICAL_WARP_SIZE-1)), "Logical warp size must be a power of 2");
#pragma unroll
for (int dest_count = LOGICAL_WARP_SIZE/2; dest_count >= 1; dest_count /= 2)
{
val += cub::ShuffleDown(val, dest_count);
}
return val;
}
namespace kernel
{
//! Begins the cell thermo compute by summing cell quantities on outer cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param d_cells Cell indexes to compute
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param num_cells Number of cells to compute for
*
* \tparam need_energy If true, compute the cell-level energy properties
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory.
*/
template<bool need_energy, unsigned int tpp>
__global__ void begin_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int num_cells)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * num_cells)
return;
const unsigned int cell_id = d_cells[idx / tpp];
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
momentum.x = warp_reduce<tpp>(momentum.x);
momentum.y = warp_reduce<tpp>(momentum.y);
momentum.z = warp_reduce<tpp>(momentum.z);
momentum.w = warp_reduce<tpp>(momentum.w);
if (need_energy)
ke = warp_reduce<tpp>(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w);
if (need_energy)
d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np));
}
}
//! Finalizes the cell thermo compute by properly averaging cell quantities
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per cell, the properties are averaged by mass, number of particles,
* etc. The temperature is computed from the cell kinetic energy.
*/
template<bool need_energy>
__global__ void end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions)
{
// one thread per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= Ncell)
return;
const unsigned int cell_id = d_cells[idx];
// average cell properties if the cell has mass
const double4 cell_vel = d_cell_vel[cell_id];
double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z);
const double mass = cell_vel.w;
if (mass > 0.)
{
// average velocity is only defined when there is some mass in the cell
vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[cell_id];
const double ke = cell_energy.x;
double temp(0.0);
const unsigned int np = __double_as_int(cell_energy.z);
// temperature is only defined for 2 or more particles
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
//! Computes the cell thermo for inner cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param n_dimensions System dimensionality
*
* \tparam need_energy If true, compute the cell-level energy properties.
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory. The properties are properly normalized
*
* See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation
* without the normalization at the end, which is used for the outer cells.
*/
template<bool need_energy, unsigned int tpp>
__global__ void inner_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const Index3D ci,
const Index3D inner_ci,
const uint3 offset,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int n_dimensions)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * inner_ci.getNumElements())
return;
// reinterpret the thread id as a cell by first mapping the thread into the inner indexer,
// shifting by the offset of the inner indexer from the full indexer, and then compressing
// back into a 1D cell id
const uint3 inner_cell = inner_ci.getTriple(idx/tpp);
const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z);
const unsigned int cell_id = ci(cell.x, cell.y, cell.z);
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
momentum.x = warp_reduce<tpp>(momentum.x);
momentum.y = warp_reduce<tpp>(momentum.y);
momentum.z = warp_reduce<tpp>(momentum.z);
momentum.w = warp_reduce<tpp>(momentum.w);
if (need_energy)
ke = warp_reduce<tpp>(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
const double mass = momentum.w;
double3 vel_cm = make_double3(0.0,0.0,0.0);
if (mass > 0.)
{
vel_cm.x = momentum.x / mass;
vel_cm.y = momentum.y / mass;
vel_cm.z = momentum.z / mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
double temp(0.0);
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per \a temporary cell, the cell properties are normalized
* in a way suitable for reduction of net properties, e.g. the cell velocities
* are converted to momentum. The temperature is set to the cell energy, and a
* flag is set to 1 or 0 to indicate whether this cell has an energy that should
* be used in averaging the total temperature.
*/
template<bool need_energy>
__global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D tmp_ci,
const Index3D ci)
{
// one thread per cell
unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (tmp_idx >= tmp_ci.getNumElements())
return;
// use the temporary cell indexer to map to a cell, then use the real cell indexer to
// get the read index
uint3 cell = tmp_ci.getTriple(tmp_idx);
const unsigned int idx = ci(cell.x, cell.y, cell.z);
const double4 vel_mass = d_cell_vel[idx];
const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z);
const double mass = vel_mass.w;
mpcd::detail::cell_thermo_element thermo;
thermo.momentum = make_double3(mass * vel.x,
mass * vel.y,
mass * vel.z);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[idx];
thermo.energy = cell_energy.x;
if (__double_as_int(cell_energy.z) > 1)
{
thermo.temperature = cell_energy.y;
thermo.flag = 1;
}
else
{
thermo.temperature = 0.0;
thermo.flag = 0;
}
}
else
{
thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0;
}
d_tmp_thermo[tmp_idx] = thermo;
}
} // end namespace kernel
//! Templated launcher for multiple threads-per-cell kernel for outer cells
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads to use per-cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
}
else
{
launch_begin_cell_thermo<cur_tpp/2>(args,
d_cells,
num_cells,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::launch_begin_cell_thermo
* \sa mpcd::gpu::kernel::begin_cell_thermo
*/
cudaError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (num_cells == 0) return cudaSuccess;
launch_begin_cell_thermo<32>(args,
d_cells,
num_cells,
block_size,
tpp);
return cudaSuccess;
}
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
* \param need_energy If true, compute the cell-level energy properties
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::kernel::end_cell_thermo
*/
cudaError_t end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions,
const bool need_energy,
const unsigned int block_size)
{
if (Ncell == 0) return cudaSuccess;
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(Ncell / run_block_size + 1);
mpcd::gpu::kernel::end_cell_thermo<true><<<grid, run_block_size>>>(d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(Ncell / run_block_size + 1);
mpcd::gpu::kernel::end_cell_thermo<false><<<grid, run_block_size>>>(d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
return cudaSuccess;
}
//! Templated launcher for multiple threads-per-cell kernel for inner cells
/*
* \param args Common arguments to thermo kernels
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
}
else
{
launch_inner_cell_thermo<cur_tpp/2>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*!
* \param args Common arguments for cell thermo compute
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::launch_inner_cell_thermo
* \sa mpcd::gpu::kernel::inner_cell_thermo
*/
cudaError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (inner_ci.getNumElements() == 0) return cudaSuccess;
launch_inner_cell_thermo<32>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
return cudaSuccess;
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
* \param need_energy If true, compute the cell-level energy properties
* \param block_size Number of threads per block
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::kernel::stage_net_cell_thermo
*/
cudaError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D& tmp_ci,
const Index3D& ci,
bool need_energy,
const unsigned int block_size)
{
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::stage_net_cell_thermo<true><<<grid, run_block_size>>>(d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::stage_net_cell_thermo<false><<<grid, run_block_size>>>(d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
return cudaSuccess;
}
/*!
* \param d_reduced Cell thermo properties reduced across all cells (output on second call)
* \param d_tmp Temporary storage for reduction (output on first call)
* \param tmp_bytes Number of bytes allocated for temporary storage (output on first call)
* \param d_tmp_thermo Cell thermo properties to reduce
* \param Ncell The number of cells to reduce across
*
* \returns cudaSuccess on completion
*
* \b Implementation details:
* CUB DeviceReduce is used to perform the reduction. Hence, this function requires
* two calls to perform the reduction. The first call sizes the temporary storage,
* which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate
* the required bytes, and call the function a second time. This performs the
* reducetion and returns the result in \a d_reduced.
*/
cudaError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced,
void *d_tmp,
size_t& tmp_bytes,
const mpcd::detail::cell_thermo_element *d_tmp_thermo,
const unsigned int Ncell)
{
cub::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell);
return cudaSuccess;
}
//! Explicit template instantiation of pack for cell velocity
template cudaError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf,
const double4 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of pack for cell energy
template cudaError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf,
const double3 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of unpack for cell velocity
template cudaError_t unpack_cell_buffer(double4 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
//! Explicit template instantiation of unpack for cell energy
template cudaError_t unpack_cell_buffer(double3 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
} // end namespace gpu
} // end namespace mpcd
|
18fba99afc4860626d4a8f816c5b398c982d8874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ** CHANGE LIST ** See below \section sec_changes
// !!DBTv1.5!! (June 28, 2017)
// -- Improved detector model and fluorescence
// -- Added electronic noise and Swank factor for detected charges output
// -- Anti-scatter grid based on Day and Dance, Phys Med Biol 28, pp. 1429-1433 (1983)
// !!DBTv1.4!!
// -- Improved detector model with input thickness, attenuation, fluorescence escape.
// -- User-defined rotation axis for the tomography scan
// -- Enabled the simulation of tomosynthesis scans: half cone source, emission angle offset, etc
// -- Enable translation of the voxelized geometry and of the image sensor within the detector plane
// -- Code upgraded from CUDA 4 to CUDA 5, after cutil_inline.h has been eliminated from the SDK:
// CUDA 5.0: Using "getLastCudaError" instead of "cutilCheckMsg"
// CUDA 5.0: Using "checkCudaErrors" instead of "cutilSafeCall" or "cutilCheckError"
// CUDA 5.0: Using "gpuGetMaxGflopsDeviceId instead of "cutGetMaxGflopsDeviceId"
// CUDA 5.0: Substitute all the CUDA timer functions (cutResetTimer, cutStartTimer, cutStopTimer, cutGetTimerValue) for standard C clock() calls.
////////////////////////////////////////////////////////////////////////////////////////
//
// ****************************
// *** MC-GPU , version 1.5 ***
// ****************************
//
/**
* \mainpage MC-GPU v1.5_DBT
*
* \code
*
* Andreu Badal, PhD (Andreu.Badal-Soler{at}fda.hhs.gov)
*
* Division of Imaging and Applied Mathematics
* Office of Science and Engineering Laboratories
* Center for Devices and Radiological Health
* U.S. Food and Drug Administration
*
* Code release date: 2012/12/12
*
*
*
* \endcode
*
*
*
* \b MC-GPU [1-4] is a Monte Carlo simulation code that can generate synthetic radiographic
* images and computed tomography (CT) scans of realistic models of the human anatomy using the
* computational power of commodity Graphics Processing Unit (GPU) cards.
* The code implements a massively multi-threaded Monte Carlo simulation algorithm
* for the transport of x rays in a voxelized geometry. The x ray interaction models and material
* properties have been adapted from \b PENELOPE \b 2006 [5].
*
*
* \b MC-GPU was developed using the \b CUDA programming model from \b NVIDIA [6] to achieve
* maximum performance on NVIDIA GPUs. The code can also be compiled with a standard C compiler
* to be executed in a regular CPU.
* In a typical medical imaging simulation, the use of GPU computing with MC-GPU has been shown
* to provide a speed up of between 20 and 40 times, compared to the execution on a single CPU core.
*
* The MC-GPU code has been described in different scientific publications [1-4].
* The main reference of this work, which the users should cite, is the following [1]:
* \code
* Andreu Badal and Aldo Badano, "Accelerating Monte Carlo simulations of
* photon transport in a voxelized geometry using a massively parallel
* Graphics Processing Unit", Medical Physics 36, pp. 48784880 (2009)
* \endcode
* The main developer of MC-GPU is \b Andreu \b Badal, working at the U.S. \b Food \b and
* \b Drug \b Administration (Center for Devices and Radiological Health, Office of Science
* and Engineering Laboratories, Division of Imaging and Applied Mathematics).
* The source code of MC-GPU is free and open software in the public domain, as explained
* in the Disclaimer section below.
* The source code of MC-GPU and its auxiliary files are distributed from the website: http://code.google.com/.
*
*
* This documentation has been automatically generated by \b Doxygen parsing the comments in
* the MC-GPU source code.
* This code is still in development, please report to the author any issue/bug
* that you may encounter. Feel free to suggest improvements to the code too!
*
*
*
* \section sec_changes List of modifications in different versions of the code
*
* \subsection sec_changes_v13 Version 1.3 (release date: 2012/12/12)
*
* - Code upgraded to CUDA 5.0 (not compatible with previous versions of CUDA!).
* - Removed limit on the amount of projection images that can be simulated per CT scan (source and
* detector parameters now stored in global memory and transferring to shared memory at run time
* to avoid using the limited constant memory).
* - New material dose tally implemented to estimate the dose deposited in each material independently
* of the voxel dose tally (the voxel dose tally measures the dose in each material adding the energy
* deposited in each voxel of that material within the defined voxelized region-of-interest).
* - Interaction loop re-organized to maximize performance (virtual interactions simulated before real ones).
* - Improvements and small corrections in the source sampling and tally routines.
* - Allow input of material and voxel geometry files compressed with gzip (zlib library now required for compilation).
*
*
*
* \subsection sec_changes_v12 Version 1.2 (release date: 2011/10/25)
*
* - Implemented a voxel dose tally.
* - Polyenergetic source model.
* - MPI support for simulating individual projections.
* - Simulation by time limit.
* - Improved flexibility of the CT trajectories, helical scans.
*
*
*
* \section sec_disc Disclaimer
*
* This software and documentation (the "Software") were developed at the Food and
* Drug Administration (FDA) by employees of the Federal Government in the course
* of their official duties. Pursuant to Title 17, Section 105 of the United States
* Code, this work is not subject to copyright protection and is in the public
* domain. Permission is hereby granted, free of charge, to any person obtaining a
* copy of the Software, to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, or sell copies of the Software or derivatives, and to permit persons
* to whom the Software is furnished to do so. FDA assumes no responsibility
* whatsoever for use by other parties of the Software, its source code,
* documentation or compiled executables, and makes no guarantees, expressed or
* implied, about its quality, reliability, or any other characteristic. Further,
* use of this code in no way implies endorsement by the FDA or confers any
* advantage in regulatory decisions. Although this software can be redistributed
* and/or modified freely, we ask that any derivative works bear some notice that
* they are derived from it, and any modified versions bear some notice that they
* have been modified.
*
*
*
* \section sec_Intro Code features
*
* In this section we provide a brief description of the features of the MC-GPU code. A
* more complete description of the code can be found in our published articles.
* important information regarding the operation of the code is provided as comments in the
* input files of the sample simulations provided with the MC-GPU package.
* Detailed information on each function of the code can be found in the complete Doxygen
* documentation of the source code
*
* The basic operation of the code consists in adapting the simulation input file
* to describe the location and characteristics of the x ray source, define the CT trajectory
* (if any), list the materials to be used in the simulation, define the geometry of
* the x ray detector and, finally, specify the voxelized object file to be
* used as the simulation material universe.
* In the first line of the input file, the user can fix the total number of x rays that have
* to be simulated (> 1e5 histories) or the total simulation time (maximum 1e5 seconds).
*
*
* The coordinate system of the simulated world is determined by the input voxelized geometry.
* The origin of coordinates is assumed to be located at the lower-back corner of the voxelized
* volume, and the axis are located on the vertices of the voxelized volume.
* This means that the lower-back corner of the first voxel is on the origin and the
* following voxels are located along the positive X, Y and Z axis (first quadrant).
*
*
* To simulate the atomic interactions, MC-GPU uses a database of material properties based on the
* database from PENELOPE. A PENELOPE 2006 material file can be converted into an MC-GPU material
* file using the auxiliary utility "MC-GPU_create_material_data.f" provided with the MC-GPU
* package. Pre-defined material files for a set of materials typically used in medical imaging
* simulations are already provided in the folder "MC-GPU_material_files".
*
*
* The code includes two tally options: an \b image \b tally that creates projection x-ray images,
* and a radiation \b dose \b tally that estimates the dose deposited inside the patient model.
* MC-GPU does not currently simulate the transport of electrons and therefore the dose
* deposition tally (KERMA tally rigorously) will not be accurate for high energies or near
* material interfaces and small voxels.
* In the image tally the images are formed by counting the energy that enters a user-defined 2D
* grid of pixels, which is a simple approximation to a noise-free flat-panel detector with
* 100% detection efficiency. The pixel values have units of eV/cm^2.
* Four different images are reported at the end of the simulation, corresponding
* to the signal produced by x rays that did not interact between the source and the detector
* (non-scattered), x rays that suffered a single Compton (inelastic) interaction, a single
* Rayleigh (elastic) interaction, and multi-scattered x rays.
* The dose tally counts the energy deposited by each x ray track inside each voxel of the
* geometry, within a user-defined volumetric region-of-interest (ROI). The average dose deposited
* inside each voxel and in each material (and the associated statistical uncertainties) are reported
* at the end of the simulation.
*
*
* MC-GPU can simulate a single projection image or a full CT scan.
* The CT is simulated generating many projection images around the static
* voxelized geometry. Currently, the code is limited to perform a simple
* CT trajectory rotating around the Z axis. The user can specify the angular shift and longitudinal
* translation (pitch) of the source between each projection and also the distance between the
* source and the axis of rotation (the axis is assumed to be parallel to the Z axis).
* By now, the code does not simulate some relevant components of a CT scanner such as the
* anti-scatter grid, a bow-tie filter or a curved detector (flat-panel detector only).
*
*
* The x ray source is defined as a point source emitting x rays with an energy randomly sampled
* from the user-provided energy spectrum. The polyenergetic spectrum is efficiently sampled
* using the Walker aliasing algorithm. The emitted cone beam is computationally
* collimated to produce a rectangular field on the detector plane, within the azimuthal and
* polar angles specified by the user.
* The detector plane is automatically located at the specified distance right in front of the
* source focal spot, with the collimated cone beam pointing towards the geometric center of the detector.
*
*
* In order to optimize the particle tracking algorithm (ray-tracing) and minimize
* the accesses to the slow GPU main memory, the photon trajectories across the voxels
* are computed using the Woodcock tracking algorithm.
* With this technique the photons perceive the geometry as a uniform medium
* composed of the material of the most attenuating voxel.
* In this way, the voxel boundaries do not have to be explicitly calculated and
* multiple voxels can be crossed in a single step.
* To keep the simulation unbiased, some of the interactions are considered
* "virtual" (i.e., do not change the photon energy or direction of movement),
* depending on the x ray energy and the actual material at the interaction site.
* In typical medical imaging simulations where the most attenuating material is cortical bone,
* the Woodcock tracking algorithm gives an speed up of almost one order of magnitude compared
* to computing voxel boundaries all the time. However, if the geometry includes a high
* density voxel, such as a metallic implant, the performance of the code can be severely
* reduced because a large fraction of the sampled interactions will be virtual.
*
*
* The random number generator used in PENELOPE [5], RANECU, is also used in the GPU
* program. To ensure that the simulated tracks are not correlated, each thread initializes
* the generator to a unique position in the random sequence, far enough from the
* other threads, using the algorithm implemented in the seedsMLCG code [7].
*
*
* In a typical simulation, several thousand threads are launched simultaneously in
* the GPU, each one of them simulating a batch of several x ray tracks.
* If the code is compiled with MPI support (see below), multiple GPUs can be used in parallel.
* The code will perform a short speed test to estimate the relative speed of each GPU used
* in the simulation and then distribute the number of particles among the available GPUs correspondingly.
* If the user specified a time limit in the simulation, all the GPUs will simulate in parallel
* for the allowed time. Since the code is already optimized to scale well in
* thousands of GPU threads, it scales almost linearly with the number of GPUs in most
* situations, with only a few seconds of overhead in the initialization of the multiple GPUs
* and in the reduction of the final results.
*
*
*
*
* \section sec_output Code output
*
* At the end of the simulation the code reports the tallied 3D dose distribution and the
* final simulated images in RAW binary form, as 32-bits float values. The image data is provided
* as a collection of five consecutive images corresponding to: total image (scatter+primaries),
* primary particles, Compton, Rayleigh and multi-scatter.
* The dose data is reported as two RAW files with the mean dose and twice the standard deviation
* of the dose in each voxel of the geometry respectively, within the input ROI.
* The average dose deposited in each material of the geometry is also reported to the standard output.
* Organ doses can be obtained by post-processing the output dose file, knowing which voxel
* corresponds to each organ.
* The pixel and voxel dose data values are stored with the X coordinate incrementing first, the Y
* coordinate incrementing second, and the Z coordinate incrementing last.
*
* The program also reports the simulated images and the dose at the Z plane at the level of the x ray
* source as ASCII text files. The ASCII output can be readily visualized with the GNUPLOT scripts
* distributed with MC-GPU. The header section at the beginning of these text files provides the
* information required to easily read the RAW binary files with IMAGEJ, OCTAVE or other programs.
*
*
*
* \section sec_compilation Code compilation and execution
*
* MC-GPU has been developed and tested only in the Linux operating system.
* A Makefile script is provided to compile the MC-GPU code in Linux.
* The CUDA libraries and the GNU GCC compiler must be previously installed.
* The Makefile may have to be edited to modify the library path.
* The code requires the "zlib.h" library to be able to open gzipped input files.
*
*
* MC-GPU uses CUDA to access NVIDIA GPUs but all the actual computations are coded
* in standard C and the CUDA-specific commands are enclosed within preprocessor
* "if" statements. Defining the pre-processor variable "USING_CUDA" (i.e.,
* compiling with "-DUSING_CUDA") the particle transport routines are compiled to simulate
* many x ray histories in parallel in an NVIDIA GPU using CUDA. Otherwise, the code is
* sequentially executed in the CPU.
* The same coding approach has been used to allow the use of multiple GPUs.
* Defining the pre-processor variable "USING_MPI" (i.e., compiling with
* "-DUSING_MPI"), Message Passing Interface (MPI) library calls are used to share information
* between multiple CPU threads in different computers.
* Each MPI thread gets a unique id in the CPU and addresses a unique GPU.
* At the end of the simulation the images and doses tallied by the different GPUs are
* reduced to form single output file equivalent to a sequential simulation of the same
* number of particles.
*
* The code can be easily compiled executing the command "make" or running the provided
* "./make.sh" script.
* Optionally, the code can be executed from the command line with a command like this
* (example using CUDA and MPI, openMPI library in this case):
* \code
* nvcc -DUSING_CUDA -DUSING_MPI MC-GPU_v1.3.cu -o MC-GPU_v1.3.x -O3
* -use_fast_math -L/usr/lib/ -I. -I/usr/local/cuda/include
* -I/usr/local/cuda/samples/common/inc -I/usr/local/cuda/samples/shared/inc/
* -I/usr/include/openmpi -lmpi -lz --ptxas-options=-v
* -gencode=arch=compute_20,code=sm_20 -gencode=arch=compute_30,code=sm_30
* \endcode
*
* The same source code can also be compiled for a regular CPU using:
* \code
* gcc -x c -O3 MC-GPU_v1.3.cu -o MC-GPU_v1.3_CPU.x -I./ -lm -lz
* \endcode
*
* To run a simulation (and keep the information reported to the standard
* output in an external file) the compiled code can be executed as:
* \code
* ./MC-GPU_v1.3.x MC-GPU_v1.3.in | tee MC-GPU_v1.3.out
* \endcode
*
* All simulation can be executed in the same way using the code compiled for the CPU
* or the GPU (however, the number of histories should be reduced for the CPU to finish
* the simulation in a reasonable time).
* To run the simulation in parallel with MPI in multiple GPUs (or CPU cores) in the
* current computer the user can execute:
* \code
* mpirun -n 4 ./MC-GPU_v1.3.x MC-GPU_v1.3.in
* \endcode
*
* To use GPUs in different computers, the user must make sure all computers can access the simulation
* files and that the libraries are correctly set up in all nodes.
* To execute a simulation (with verbose MPI information being reported):
* \code
* mpirun --tag-output -v -x LD_LIBRARY_PATH -hostfile myhostfile.txt -n 8
* /fullPath/MC-GPU_v1.3.x /fullPath/MC-GPU_v1.3.in | tee MC-GPU_v1.3.out
* \endcode
*
* The text file 'hostfile' lists the IP addresses and number of computing slots (GPUs) of the
* computers collaborating in the simulation. This file is not necessary when using multiple
* GPUs in a single workstation. When using multiple computers, the simulation files should
* be located in a shared drive to make sure every node can access the input data.
* The different workstations must have different host names in order to be differentiated by
* the MPI threads. The multiple threads communicate to each other to make sure they don't
* use the same GPU in the same workstation.
*
*
*
* \section sec_issues Known issues
*
* In extremely long simulations, it is theoretically possible to cause an overflow of the counters
* estimating the mean and standard deviation of the material or voxel doses. If this happen, the
* results will be incorrect and even negative or nan values can be reported.
*
*
*
*
* \section sec_ref References
*
* -# A. Badal and A. Badano, Accelerating Monte Carlo simulations of photon transport in a voxelized geometry using a massively parallel Graphics Processing Unit, Med. Phys. 36, p. 4878-4880 (2009)
* -# A. Badal and A. Badano, Monte Carlo Simulation of X-Ray Imaging Using a Graphics Processing Unit, IEEE NSC-MIC, Conference Record , HP31, p. 4081-4084 (2009)
* -# A. Badal, I. Kyprianou, D. Sharma and A. Badano, Fast cardiac CT simulation using a Graphics Processing Unit-accelerated Monte Carlo code, Proc. SPIE Medical Imaging Conference 7622, p. 762231 (2010)
* -# A. Badal and A. Badano, Fast Simulation of Radiographic Images Using a Monte Carlo X-Ray Transport Algorithm Implemented in CUDA, Chapter 50 of GPU Computing Gems (Emerald Edition), p. 813-830, editor Wen-mei W. Hwu, publisher Morgan Kaufmann (Elsevier), Burlington MA, 2010
* -# F. Salvat, J. M. Fernandez-Varea and J. Sempau, PENELOPE A code system for Monte Carlo simulation of electron and photon transport, NEA-OECD, Issy-les-Moulineaux, available at www.nea.fr/html/dbprog/peneloperef.html (2006)
* -# NVIDIA Corporation, NVIDIA CUDA(TM) Programming Guide, Technical Report available at www.nvidia.com/cuda (2011)
* -# A. Badal and J. Sempau, A package of Linux scripts for the parallelization of Monte Carlo simulations, Comput. Phys. Commun. 175 (6), p. 440-450 (2006)
*
*
*
* @file MC-GPU_v1.5.cu
* @author Andreu Badal ([email protected])
* @date 2012/12/12
* -- MC-GPU v.1.4_DBT: 2016/02/02
* -- MC-GPU v.1.3: 2012/12/12
* -- MC-GPU v.1.2: 2011/10/25
* -- MC-GPU v.1.1: 2010/06/25
* -- MC-GPU v.1.0: 2009/03/17
*/
////////////////////////////////////////////////////////////////////////////////////////
// *** Include header file with the structures and functions declarations
#include <MC-GPU_v1.5b.h>
// *** Include the computing kernel:
#include <MC-GPU_kernel_v1.5b.cu>
////////////////////////////////////////////////////////////////////////////////
//! Main program of MC-GPU: initialize the simulation enviroment, launch the GPU
//! kernels that perform the x ray transport and report the final results.
//! This function reads the description of the simulation from an external file
//! given in the command line. This input file defines the number of particles to
//! simulate, the characteristics of the x-ray source and the detector, the number
//! and spacing of the projections (if simulating a CT), the location of the
//! material files containing the interaction mean free paths, and the location
//! of the voxelized geometry file.
//!
//! @author Andreu Badal
//!
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// -- Start time counter:
time_t current_time = time(NULL); // Get current time (in seconds)
clock_t clock_start, clock_end, clock_start_beginning; // (requires standard header <time.h>)
clock_start = clock(); // Get current clock counter
clock_start_beginning = clock_start;
#ifdef USING_MPI
// -- Using MPI to access multiple GPUs to simulate the x-ray projection image:
int myID = -88, numprocs = -99, return_reduce = -1;
MPI_Init(&argc, &argv); // Init MPI and get the current thread ID
MPI_Comm_rank(MPI_COMM_WORLD, &myID);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
char MPI_processor_name[81];
int resultlen = -1;
MPI_Get_processor_name(MPI_processor_name, &resultlen);
char* char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located betwen the characters 11 and 19.
printf(" >> MPI run (myId=%d, numprocs=%d) on processor \"%s\" (time: %s) <<\n", myID, numprocs, MPI_processor_name, &char_time[11]);
fflush(stdout); // Clear the screen output buffer
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
MASTER_THREAD printf(" -- Time spent initializing the MPI world (MPI_Barrier): %.3f s\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
#else
int myID = 0, numprocs = 1; // Only one CPU thread used when MPI is not activated (multiple projections will be simulated sequentially).
#endif
MASTER_THREAD
{
printf("\n\n *****************************************************************************\n");
printf( " *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
printf( " *** ***\n");
printf( " *** A. Badal and A. Badano, \"Accelerating Monte Carlo simulations of *** \n");
printf( " *** photon transport in a voxelized geometry using a massively parallel *** \n");
printf( " *** Graphics Processing Unit\", Medical Physics 36, pp. 48784880 (2009) ***\n");
printf( " *** ***\n");
printf( " *** Andreu Badal ([email protected]) ***\n");
printf( " *****************************************************************************\n\n");
printf("****** Code execution started on: %s\n\n", ctime(¤t_time));
fflush(stdout);
}
#ifdef USING_CUDA
// The "MASTER_THREAD" macro prints the messages just once when using MPI threads (it has no effect if MPI is not used): MASTER_THREAD == "if(0==myID)"
MASTER_THREAD printf ("\n *** CUDA SIMULATION IN THE GPU ***\n");
#else
MASTER_THREAD printf ("\n *** SIMULATION IN THE CPU ***\n");
#endif
MASTER_THREAD printf("\n -- INITIALIZATION phase:\n");
MASTER_THREAD fflush(stdout); // Clear the screen output buffer for the master thread
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Declare the arrays and structures that will contain the simulation data:
struct voxel_struct voxel_data; // Define the geometric constants of the voxel file
struct detector_struct detector_data[MAX_NUM_PROJECTIONS+1]; // Define an x ray detector (for each projection)
struct source_struct source_data[MAX_NUM_PROJECTIONS+1]; // Define the particles source (for each projection)
struct source_energy_struct source_energy_data; // Define the source energy spectrum
struct linear_interp mfp_table_data; // Constant data for the linear interpolation
struct compton_struct compton_table; // Structure containing Compton sampling data (to be copied to CONSTANT memory)
struct rayleigh_struct rayleigh_table; // Structure containing Rayleigh sampling data (to be copied to CONSTANT memory)
// float2 *voxel_mat_dens = NULL;
// char *voxel_mat_dens = NULL; // Pointer where voxels array will be allocated //!!FixedDensity_DBT!! Density taken from function "density_LOT"
int *voxel_mat_dens = NULL; //!!bitree!! v1.5 --> using an integer value to be able to store both the material number (positive) or pointers to the binary tree branches (negative)
long long int voxel_mat_dens_bytes = 0; // Size (in bytes) of the voxels array (using unsigned int to allocate up to 4.2GBytes)
char *bitree = NULL; // Array storing the binary tree structures for each non-uniform low resolution voxel //!!bitree!! v1.5b
unsigned int bitree_bytes = 0; // Size (in bytes) of the bitree array
int *voxel_geometry_LowRes = NULL; // Array to temporary store the low resolution version of the geometry when the binary tree is created
unsigned int voxel_geometry_LowRes_bytes = 0;
float density_max[MAX_MATERIALS];
float density_nominal[MAX_MATERIALS];
unsigned long long int *image = NULL; // Poiter where image array will be allocated
int image_bytes = -1; // Size of the image array
int mfp_table_bytes = -1, mfp_Woodcock_table_bytes = -1; // Size of the table arrays
float2 *mfp_Woodcock_table = NULL; // Linear interpolation data for the Woodcock mean free path [cm]
float3 *mfp_table_a = NULL, *mfp_table_b = NULL; // Linear interpolation data for 3 different interactions:
// (1) inverse total mean free path (divided by density, cm^2/g)
// (2) inverse Compton mean free path (divided by density, cm^2/g)
// (3) inverse Rayleigh mean free path (divided by density, cm^2/g)
short int dose_ROI_x_min, dose_ROI_x_max, dose_ROI_y_min, dose_ROI_y_max, dose_ROI_z_min, dose_ROI_z_max; // Coordinates of the dose region of interest (ROI)
ulonglong2 *voxels_Edep = NULL; // Poiter where the voxel energy deposition array will be allocated
int voxels_Edep_bytes = 0; // Size of the voxel Edep array
ulonglong2 materials_dose[MAX_MATERIALS]; // Array for tally_materials_dose. !!tally_materials_dose!!
int kk;
for(kk=0;kk<MAX_MATERIALS;kk++)
{
materials_dose[kk].x = 0; // Initializing data !!tally_materials_dose!!
materials_dose[kk].y = 0;
density_nominal[kk] =-1.0f;
}
clock_t clock_kernel; // Using only cpu timers after CUDA 5.0
double time_elapsed_MC_loop = 0.0, time_total_MC_simulation = 0.0, time_total_MC_init_report = 0.0;
unsigned long long int total_histories;
int histories_per_thread, seed_input, num_threads_per_block, gpu_id, num_projections;
int flag_material_dose=-2;
bool flag_simulateMammoAfterDBT=false, flag_detectorFixed=false; // !!DBTv1.4!!;
double SRotAxisD=-1.0, translation_helical=0.0;
char file_name_voxels[250], file_name_materials[MAX_MATERIALS][250], file_name_output[250], file_dose_output[250], file_name_espc[250];
// *** Read the input file given in the command line and return the significant data:
read_input(argc, argv, myID, &total_histories, &seed_input, &gpu_id, &num_threads_per_block, &histories_per_thread, detector_data, &image, &image_bytes, source_data, &source_energy_data, &voxel_data, file_name_voxels, file_name_materials, file_name_output, file_name_espc, &num_projections, &voxels_Edep, &voxels_Edep_bytes, file_dose_output, &dose_ROI_x_min, &dose_ROI_x_max, &dose_ROI_y_min, &dose_ROI_y_max, &dose_ROI_z_min, &dose_ROI_z_max, &SRotAxisD, &translation_helical, &flag_material_dose, &flag_simulateMammoAfterDBT, &flag_detectorFixed);
// *** Read the energy spectrum and initialize its sampling with the Walker aliasing method:
MASTER_THREAD printf(" -- Reading the energy spectrum and initializing the Walker aliasing sampling algorithm.\n");
float mean_energy_spectrum = 0.0f;
init_energy_spectrum(file_name_espc, &source_energy_data, &mean_energy_spectrum);
// *** Output some of the data read to make sure everything was correctly read:
MASTER_THREAD
{
printf("\n -- Data read from the input file:\n");
if (total_histories < (unsigned long long int)(100000))
printf(" simulation time = %lld s\n", total_histories);
else
printf(" x-ray tracks to simulate = %lld\n", total_histories);
printf(" initial random seed = %d\n", seed_input);
double phi0 = ((double)source_data[0].D_phi)*RAD2DEG;
double theta0 = 2.0*(90.0 - acos(((double)source_data[0].cos_theta_low))*RAD2DEG);
if (source_data[0].flag_halfConeX)
theta0 = 0.5*theta0;
printf(" NOTE: sampling only upper half of collimated cone beam, with beam offset to edge of the image (eg, mammo).\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
printf(" azimuthal (phi), polar apertures = %.6f , %.6f degrees\n", phi0, theta0);
printf(" (max_height_at_y1cm = %f , max_width_at_y1cm = %f)\n", source_data[0].max_height_at_y1cm, source_data[0].max_width_at_y1cm); // !!DBTv1.4!! !!DeBuG!!
printf(" source direction = (%f, %f, %f)\n", source_data[0].direction.x, source_data[0].direction.y, source_data[0].direction.z);
printf(" focal spot position = (%f, %f, %f)\n", source_data[0].position.x, source_data[0].position.y, source_data[0].position.z);
printf(" focal spot Gaussian blur FWHM = %f (3D Gaussian dist. cropped at 2*sigma)\n", source_data[0].focal_spot_FWHM); // !!DBTv1.4!!
if (num_projections!=1 && flag_simulateMammoAfterDBT==true)
printf(" focal spot rotation blur = %f degrees (disabled for the first single projection at 0 deg)\n", source_data[0].rotation_blur*RAD2DEG); // !!DBTv1.5!!
else
printf(" focal spot rotation blur = %f degrees\n", source_data[0].rotation_blur*RAD2DEG); // !!DBTv1.5!!
printf(" source-detector distance = %f cm\n", detector_data[0].sdd);
printf(" detector center position = (%f, %f, %f)\n", detector_data[0].center.x, detector_data[0].center.y, detector_data[0].center.z);
printf(" image offset from beam at center = (%f, %f)\n", detector_data[0].offset.x, detector_data[0].offset.y); // !!DBTv1.4!!
printf(" detector layer thickness = %f cm (=%.2f micron)\n", detector_data[0].scintillator_thickness, 1.0e4f*detector_data[0].scintillator_thickness); // !!DBTv1.4!!
printf(" detector material average MFP = %f cm\n", detector_data[0].scintillator_MFP);
printf(" detector material K-edge energy = %f eV\n", detector_data[0].kedge_energy);
printf(" fluorescence energy and yield = %f eV , %f\n", detector_data[0].fluorescence_energy, detector_data[0].fluorescence_yield);
printf(" MFP at fluorescence energy = %f cm\n", detector_data[0].fluorescence_MFP);
if (detector_data[0].gain_W>0.001f)
{
printf(" detector gain and Swank factor = %f eV/detected_charge, %f (%f relative std_dev)\n", detector_data[0].gain_W, 1.0f/(1.0f+detector_data[0].Swank_rel_std*detector_data[0].Swank_rel_std), detector_data[0].Swank_rel_std); // Swank_factor = mean^2/(mean^2 + std_dev^2) --> (std_dev/mean) = sqrt(1/Swank_factor - 1)
printf(" electronic noise per pixel = %f electrons\n", detector_data[0].electronic_noise);
}
printf(" detector cover thickness = %f cm\n", detector_data[0].cover_thickness); // !!DBTv1.5!!
printf(" cover average mean free path = %f cm\n", detector_data[0].cover_MFP);
if (detector_data[0].grid_freq > 0.0f)
{
printf(" Antiscatter grid ratio = %f\n", fabsf(detector_data[0].grid_ratio)); // !!DBTv1.5!!
printf(" Antiscatter grid frequency = %f lines per cm\n", detector_data[0].grid_freq);
printf(" Antiscatter grid strip thickness = %f cm (=%.2f micron)\n", detector_data[0].grid_strip_thickness, 1.0e4*detector_data[0].grid_strip_thickness);
float h = fabsf(detector_data[0].grid_ratio)*(1.0f/detector_data[0].grid_freq - detector_data[0].grid_strip_thickness); // Height of the grid, according to input grid ratio, freq, and strip thickness
printf(" Computed antiscatter grid height = %f cm (=%.2f micron)\n", h, 1.0e4*h);
printf(" strips average mean free path = %f cm\n", 1.0f/detector_data[0].grid_strip_mu);
printf(" interspace average mean free path = %f cm\n", 1.0f/detector_data[0].grid_interspace_mu);
if (detector_data[0].grid_ratio<0.0f)
printf(" Antiscatter grid orientation = 0 --> 1D collimated grid with strips perpendicular to lateral direction (mammo style)\n");
else
printf(" Antiscatter grid orientation = 1 --> 1D collimated grid with strips parallel to lateral direction (DBT style)\n");
}
else
printf("\n Antiscatter grid: DISABLED!\n\n"); // !!DBTv1.5!!
printf(" number of pixels image = %dx%d = %d\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y, detector_data[0].total_num_pixels);
printf(" pixel size = %.5fx%.5f cm\n", 1.0f/detector_data[0].inv_pixel_size_X, 1.0f/detector_data[0].inv_pixel_size_Z);
printf(" detector size = %.5fx%.5f cm\n", detector_data[0].width_X, detector_data[0].height_Z);
printf(" number of projections = %d\n", num_projections);
if (num_projections!=1 || source_data[0].rotation_blur>0.000001f) // Report data if blur is used bc the rotation is around the source-rotation axis
{
printf(" source-rotation axis-distance = %lf cm\n", SRotAxisD);
printf(" angle between projections = %lf\n", source_data[0].angle_per_projection*RAD2DEG);
printf(" initial angle offset = %lf\n", source_data[0].angle_offset*RAD2DEG); // !!DBTv1.4!!
printf(" rotation point = (%f, %f, %f)\n", source_data[0].rotation_point.x, source_data[0].rotation_point.y, source_data[0].rotation_point.z); // !!DBTv1.4!!
printf(" axis of rotation = (%f, %f, %f)\n", source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z); // !!DBTv1.4!!
printf(" translation between proj = %lf\n", translation_helical);
}
printf(" output image file = %s\n", file_name_output);
printf(" input voxel file = %s\n", file_name_voxels);
printf(" voxel geometry offset = (%f, %f, %f) cm\n", voxel_data.offset.x, voxel_data.offset.y, voxel_data.offset.z); // !!DBTv1.4!!
printf(" size coarse voxels for binary trees = %d x %d x %d\n", (int)voxel_data.num_voxels_coarse.x, (int)voxel_data.num_voxels_coarse.y, (int)voxel_data.num_voxels_coarse.z); // !!bitree!! v1.5b
if (dose_ROI_x_max>-1)
{
printf(" output dose file = %s\n", file_dose_output);
printf(" input region of interest dose = X[%d,%d], Y[%d,%d], Z[%d,%d]\n", dose_ROI_x_min+1, dose_ROI_x_max+1, dose_ROI_y_min+1, dose_ROI_y_max+1, dose_ROI_z_min+1, dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
}
printf("\n energy spectrum file = %s\n", file_name_espc);
printf( " number of energy bins read = %d\n", source_energy_data.num_bins_espc);
printf( " minimum, maximum energies = %.3f, %.3f keV\n", 0.001f*source_energy_data.espc[0], 0.001f*source_energy_data.espc[source_energy_data.num_bins_espc]);
printf( " mean energy spectrum = %.3f keV\n\n", 0.001f*mean_energy_spectrum);
fflush(stdout);
}
// *** Set the detectors and sources for the CT trajectory (if needed, ie, for more than one projection):
if (num_projections != 1)
{
set_CT_trajectory(myID, num_projections, source_data, detector_data, translation_helical, flag_detectorFixed);
}
fflush(stdout);
double mass_materials[MAX_MATERIALS];
// !!bitree!! If the binary tree is used, read the geometry only with the master thread, and then broadcast the new data:
// if the tree is not used, every thread reads the input geometry at the same time.
if (0==myID || (voxel_data.num_voxels_coarse.x)==0)
{
// *** Read the voxel data and allocate the density map matrix. Return the maximum density:
if (voxel_data.num_voxels.x<1)
{
// -- Read ASCII format geometry: geometric parameters will be read from the header file !!DBTv1.4!!
load_voxels(myID, file_name_voxels, density_max, &voxel_data, &voxel_mat_dens, &voxel_mat_dens_bytes, &dose_ROI_x_max, &dose_ROI_y_max, &dose_ROI_z_max);
}
else
{
// -- Read binary RAW format geometry: geometric parameters given in input file !!DBTv1.4!!
load_voxels_binary_VICTRE(myID, file_name_voxels, density_max, &voxel_data, &voxel_mat_dens, &voxel_mat_dens_bytes, &dose_ROI_x_max, &dose_ROI_y_max, &dose_ROI_z_max); //!!DBT!! // !!DBTv1.4!!
}
// -- Pre-compute the total mass of each material present in the voxel phantom (to be used in "report_materials_dose"):
double voxel_volume = 1.0 / ( ((double)voxel_data.inv_voxel_size.x) * ((double)voxel_data.inv_voxel_size.y) * ((double)voxel_data.inv_voxel_size.z) );
for(kk=0; kk<MAX_MATERIALS; kk++)
mass_materials[kk] = 0.0;
long long int llk;
for(llk=0; llk<((long long int)voxel_data.num_voxels.x*(long long int)voxel_data.num_voxels.y*(long long int)voxel_data.num_voxels.z); llk++) // For each voxel in the geometry
{
// mass_materials[((int)voxel_mat_dens[llk].x)-1] += ((double)voxel_mat_dens[llk].y)*voxel_volume; // Add material mass = density*volume
mass_materials[((int)voxel_mat_dens[llk])] += ((double)density_LUT((int)voxel_mat_dens[llk]))*voxel_volume; // Add material mass = density*volume (first material==0) //!!FixedDensity_DBT!! Density taken from function "density_LOT"
}
// ** Create the low resolution version of the phantom and the binary tree structures, if requested in the input file and dose dep tally disabled: //!!bitree!! v1.5b
if ((voxel_data.num_voxels_coarse.x)!=0)
{
if (dose_ROI_x_max>0)
{
MASTER_THREAD printf("\n\n !!ERROR!! Sorry, the voxel dose deposition tally cannot be used when the binary tree is active. Please, disable the binary tree.\n\n");
exit(-1);
}
MASTER_THREAD printf("\n !!bitree!! Creating a binary tree structure to minimize memory use.\n"); // !!bitree!! v1.5b
#ifdef DISABLE_CANON
MASTER_THREAD printf(" !!bitree!! Tree branch canonicalization was disabled by defining the pre-processor parameter \"DISABLE_CANON\"\n"); // !!bitree!! v1.5b !!DeBuG!!
#endif
create_bitree(myID, &voxel_data, voxel_mat_dens, &bitree, &bitree_bytes, &voxel_geometry_LowRes, &voxel_geometry_LowRes_bytes); //!!bitree!! v1.5b
MASTER_THREAD printf(" >> RAM memory allocation: original voxelized geometry = %f MBytes; low resolution voxelized geometry = %f MBytes;\n", voxel_mat_dens_bytes/(1024.f*1024.f), voxel_geometry_LowRes_bytes/(1024.f*1024.f));
MASTER_THREAD printf(" binary tree = %f MBytes; image vector = %f MBytes; data structures = %f Mbytes\n", bitree_bytes/(1024.f*1024.f), image_bytes/(1024.f*1024.f), (sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp)+2*mfp_table_bytes+sizeof(struct rayleigh_struct)+sizeof(struct compton_struct))/(1024.f*1024.f));
MASTER_THREAD printf(" (reduction in memory use with bitree: [low res voxels + binary tree]-[high res voxels] = %f MBytes = %.3f%%)\n", (voxel_geometry_LowRes_bytes+bitree_bytes-voxel_mat_dens_bytes)/(1024.f*1024.f), 100.f*(voxel_geometry_LowRes_bytes+bitree_bytes-voxel_mat_dens_bytes)/voxel_mat_dens_bytes);
// -- Replace the high resolution version of the geometry by the low resolution version: !!DeBuG!! voxel dose tally can't be used now!!
free(voxel_mat_dens); //!!bitree!! v1.5b
voxel_mat_dens = voxel_geometry_LowRes; //!!bitree!! v1.5b
voxel_mat_dens_bytes = voxel_geometry_LowRes_bytes; //!!bitree!! v1.5b
}
else
{
MASTER_THREAD printf("\n !!bitree!! Binary tree structure disabled: standard voxelized geometry in use.\n\n"); // !!bitree!! v1.5b
MASTER_THREAD printf(" >> RAM memory allocation: voxelized geometry = %f MBytes; image vector = %f MBytes; data structures = %f Mbytes\n", voxel_mat_dens_bytes/(1024.f*1024.f), image_bytes/(1024.f*1024.f), (sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp)+2*mfp_table_bytes+sizeof(struct rayleigh_struct)+sizeof(struct compton_struct))/(1024.f*1024.f));
}
}
fflush(stdout);
// !!bitree!! If the binary tree is used, broadcast the tree data and all auxiliary data from master to every other thread: !!DeBuG!!
if (numprocs>1 && (voxel_data.num_voxels_coarse.x)!=0)
{
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
// Send all the geometric adata that has been read or changed by root node:
MPI_Bcast(&voxel_data.num_voxels.x, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.num_voxels.y, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.num_voxels.z, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&voxel_data.size_bbox.x, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.size_bbox.y, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.size_bbox.z, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
voxel_data.voxel_size_HiRes.x = voxel_data.voxel_size.x; voxel_data.voxel_size_HiRes.y = voxel_data.voxel_size.y; voxel_data.voxel_size_HiRes.z = voxel_data.voxel_size.z; // Save the original high resolution voxel size
MPI_Bcast(&voxel_data.voxel_size.x, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.voxel_size.y, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.voxel_size.z, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
voxel_data.inv_voxel_size.x = 1.0/voxel_data.voxel_size.x; voxel_data.inv_voxel_size.y = 1.0/voxel_data.voxel_size.y; voxel_data.inv_voxel_size.z = 1.0/voxel_data.voxel_size.z;
MPI_Bcast(density_max, MAX_MATERIALS, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Bcast(mass_materials, MAX_MATERIALS, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Allocate memory (except root) and transmit voxel+binary tree links data:
MPI_Bcast(&voxel_geometry_LowRes_bytes, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast(&bitree_bytes, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
if(0!=myID)
{
voxel_mat_dens_bytes = voxel_geometry_LowRes_bytes;
voxel_mat_dens = (int*) malloc(voxel_mat_dens_bytes); // Allocate voxels (low resolution)
bitree = (char*) malloc(bitree_bytes); // Allocate binary tree elements
}
MPI_Bcast(voxel_mat_dens, ((unsigned long long int)voxel_data.num_voxels.x)*(voxel_data.num_voxels.y*voxel_data.num_voxels.z), MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(bitree, bitree_bytes, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
// *** Read the material mean free paths and set the interaction table in a "linear_interp" structure:
load_material(myID, file_name_materials, density_max, density_nominal, &mfp_table_data, &mfp_Woodcock_table, &mfp_Woodcock_table_bytes, &mfp_table_a, &mfp_table_b, &mfp_table_bytes, &rayleigh_table, &compton_table);
// -- Check that the input material tables and the x-ray source are consistent:
if ( (source_energy_data.espc[0] < mfp_table_data.e0) || (source_energy_data.espc[source_energy_data.num_bins_espc] > (mfp_table_data.e0 + (mfp_table_data.num_values-1)/mfp_table_data.ide)) )
{
MASTER_THREAD
{
printf("\n\n\n !!ERROR!! The input x-ray source energy spectrum minimum (%.3f eV) and maximum (%.3f eV) energy values\n", source_energy_data.espc[0], source_energy_data.espc[source_energy_data.num_bins_espc]);
printf( " are outside the tabulated energy interval for the material properties tables (from %.3f to %.3f eV)!!\n", mfp_table_data.e0, (mfp_table_data.e0+(mfp_table_data.num_values-1)/mfp_table_data.ide));
printf( " Please, modify the input energy spectra to fit the tabulated limits or create new tables.\n\n");
}
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
// *** Initialize the GPU using the NVIDIA CUDA libraries, if USING_CUDA parameter defined at compile time:
#ifdef USING_CUDA
// -- Declare the pointers to the device global memory, when using the GPU:
// float2 *voxel_mat_dens_device = NULL,
// *mfp_Woodcock_table_device = NULL;
// char *voxel_mat_dens_device = NULL; //!!FixedDensity_DBT!! Allocate material vector as char
int *voxel_mat_dens_device = NULL; //!!bitree!! v1.5 --> using an integer value to be able to store both the material number (positive) or pointers to the binary tree branches (negative)
char *bitree_device = NULL; //!!bitree!! v1.5b
float2 *mfp_Woodcock_table_device = NULL; //!!FixedDensity_DBT!!
float3 *mfp_table_a_device = NULL,
*mfp_table_b_device = NULL;
unsigned long long int *image_device = NULL;
struct rayleigh_struct *rayleigh_table_device = NULL;
struct compton_struct *compton_table_device = NULL;
ulonglong2 *voxels_Edep_device = NULL;
struct detector_struct *detector_data_device = NULL;
struct source_struct *source_data_device = NULL;
ulonglong2 *materials_dose_device = NULL; // !!tally_materials_dose!!
int* seed_input_device = NULL; // Store latest random seed used in GPU in global memory to continue random sequence in consecutive projections. !!DBTv1.4!!
// -- Sets the CUDA enabled GPU that will be used in the simulation, and allocate and copies the simulation data in the GPU global and constant memories.
init_CUDA_device(&gpu_id, myID, numprocs, &voxel_data, source_data, &source_energy_data, detector_data, &mfp_table_data, /*Variables GPU constant memory*/
voxel_mat_dens, &voxel_mat_dens_device, voxel_mat_dens_bytes, /*Variables GPU global memory*/
bitree, &bitree_device, bitree_bytes, //!!bitree!! v1.5b
image, &image_device, image_bytes,
mfp_Woodcock_table, &mfp_Woodcock_table_device, mfp_Woodcock_table_bytes,
mfp_table_a, mfp_table_b, &mfp_table_a_device, &mfp_table_b_device, mfp_table_bytes,
&rayleigh_table, &rayleigh_table_device,
&compton_table, &compton_table_device, &detector_data_device, &source_data_device,
voxels_Edep, &voxels_Edep_device, voxels_Edep_bytes, &dose_ROI_x_min, &dose_ROI_x_max, &dose_ROI_y_min, &dose_ROI_y_max, &dose_ROI_z_min, &dose_ROI_z_max,
materials_dose, &materials_dose_device, flag_material_dose, &seed_input_device, &seed_input, (num_projections+1));
// !!DBTv1.4!! Allocate space for one extra projection (num_projections+1) for case flag_simulateMammoAfterDBT==true !!DBTv1.4!!
// -- Constant data already moved to the GPU: clean up unnecessary RAM memory
free(mfp_Woodcock_table);
free(mfp_table_a);
free(mfp_table_b);
if (0!=myID) // Keep the geometry data for the MPI root because the voxel densities are still needed to compute the final doses
free(voxel_mat_dens);
#endif
MASTER_THREAD
{
current_time=time(NULL);
printf("\n -- INITIALIZATION finished: elapsed time = %.3f s. \n\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
}
#ifdef USING_MPI
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads before starting the MC phase.
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
MASTER_THREAD
{
current_time=time(NULL);
printf("\n\n -- MONTE CARLO LOOP phase. Time: %s\n\n", ctime(¤t_time));
fflush(stdout);
}
// -- A number of histories smaller than 24 hours in sec (3600*24=86400) means that the user wants to simulate for the input number of seconds in each GPU, not a fix number of histories:
unsigned long long int total_histories_INPUT = total_histories; // Save the original input values to be re-used for multiple projections
int doing_speed_test = -1, simulating_by_time = 0; // 0==false
if (total_histories < (unsigned long long int)(95000))
simulating_by_time = 1; // 1=true
int num_blocks_speed_test = 0;
unsigned long long int histories_speed_test = (unsigned long long int)0, total_histories_speed_test = (unsigned long long int)0;
float node_speed = -1.0f, total_speed = 1.0f;
double current_angle = -999;
int num_p; // == current projection number
// *************************************************************************************
// *** CT simulation loop (including speed test, if simulating by time or multi-GPU) ***
// *************************************************************************************
int num_projections_loop = num_projections;
if (num_projections>1)
{
num_projections_loop++; // Add an extra projection because [0] always corresponds to the 0 deg projection (first tomographic image starts at [1]) !!DBTv1.4!!
}
for (num_p=0; num_p<num_projections_loop; num_p++)
{
// --Re-load the num histories for each new projection !!DBTv1.4!!
total_histories = total_histories_INPUT;
// --Skip the initial 0 deg projection if we are simulating a tomographic scan and we don't want a separate projection with the full dose: !!DBTv1.4!!
if (0==num_p && num_projections>1 && flag_simulateMammoAfterDBT==false)
continue;
if (flag_simulateMammoAfterDBT && 0==num_p)
{
// -- !!DBT!! Simulate the first 0 deg projection (mammo) with almost as many histories (SCALE_MAMMO_DBT factor) as the whole tomographic scan to follow: !!DBTv1.4!!
total_histories = total_histories_INPUT * num_projections * SCALE_MAMMO_DBT; //!!DeBuG!! !!DBTv1.4!! Scaling dose a factor SCALE_MAMMO_DBT (eg, factor 2/3 for 1 mGy mammo for 1.5 mGy DBT)
MASTER_THREAD
{
printf("\n\n !!DBT!! Simulating first a 0 degree projection with %.4f times the number of histories as the complete scan with %d projections = %lld histories\n", SCALE_MAMMO_DBT, num_projections, total_histories);
printf( " Afterwards, simulate the tomo acquisition starting at most negative angle and ending at most positive angle.\n"); // !!DBT!! !!DBTv1.4!!
printf( " If defined, motion blur is disabled and anti-scatter grid enabled only for the single projection.\n\n");
}
}
else if (flag_simulateMammoAfterDBT && 1==num_p)
MASTER_THREAD printf("\n\n !!DBT!! After the first full simulation (eg, mammo), simulate a DBT acquisition (starting at neg angle) with the input number histories per projections.\n\n");
if (0==num_p)
current_angle = 0.0;
else
current_angle = source_data[0].angle_offset + (num_p-1) * source_data[0].angle_per_projection;
MASTER_THREAD
if (num_projections!=1)
if (flag_simulateMammoAfterDBT && 0==num_p)
printf("\n\n\n\n << Simulating a 0 degree projection (mammography) with %d * %f as many histories as each tomographic projection >>\n\n", num_projections, SCALE_MAMMO_DBT);
else
printf("\n\n\n\n << Simulating tomographic projection %d of %d >> Angle: %lf degrees.\n\n", num_p, num_projections, current_angle*RAD2DEG);
clock_start = clock(); // Start the CPU clock
#ifdef USING_CUDA
// *** Simulate in the GPUs the input amount of time or amount of particles:
// -- Estimate GPU speed to use a total simulation time or multiple GPUs:
if ( simulating_by_time==0 && // Simulating a fixed number of particles, not a fixed time (so performing the speed test only once)
node_speed>0.0f && // Speed test already performed for a previous projection in this simulation (node_speed and total_speed variables set)
numprocs>1) // Using multiple GPUs (ie, multiple MPI threads)
{
// -- Simulating successive projections after the first one with a fix number of particles, with multiple MPI threads: re-use the speed test results from the first projection image:
total_histories = (unsigned long long int)(0.5 + ((double)total_histories) * (((double)node_speed)/total_speed));
doing_speed_test = 0; // No speed test for this projection.
}
else if ( simulating_by_time==1 || numprocs>1)
{
// -- Simulating with a time limit OR multiple MPI threads for the first time (num_p==0): run a speed test to calculate the speed of the current GPU and distribute the number of particles to the multiple GPUs or estimate the total number of particles required to run the input amount of time:
// Note that this ELSE IF block will be skipped if we are using a single MPI thread and a fix number of particles.
doing_speed_test = 1; // Remember that we are performing the speed test to make sure we add the test histories to the total before the tally reports.
if (node_speed<0.0f) // Speed test not performed before (first projection being simulated): set num_blocks_speed_test and histories_speed_test.
{
num_blocks_speed_test = guestimate_GPU_performance(gpu_id); // Guestimating a good number of blocks to estimate the speed of different generations of GPUs. Slower GPUs will simulate less particles and hopefully the fastest GPUs will not have to wait much.
}
histories_speed_test = (unsigned long long int)(num_blocks_speed_test*num_threads_per_block)*(unsigned long long int)(histories_per_thread);
dim3 blocks_speed_test(num_blocks_speed_test, 1);
dim3 threads_speed_test(num_threads_per_block, 1);
#ifdef USING_MPI
// -- Init the current random number generator seed to avoid overlapping sequences with other MPI threads:
if (simulating_by_time == 1)
// Simulating by time: set an arbitrary huge number of particles to skip.
update_seed_PRNG((myID + num_p*numprocs), (unsigned long long int)(123456789012), &seed_input); // Set the random number seed far from any other MPI thread (myID) and away from the seeds used in the previous projections (num_p*numprocs).
else
// Simulating by histories (default):
update_seed_PRNG(myID, total_histories_INPUT*num_projections, &seed_input); // Init the random seed for each MPI thread as far away from the previous thread as if all "total_histories*num_projections" histories were simulated by each thread --> warranty that each thread has uncorrelated random sequence of random values (at least for the first seed of RANECU). !!DBTv1.4!! !!DeBuG!! !!DeBuG!!
checkCudaErrors(hipMemcpy(seed_input_device, &seed_input, sizeof(int), hipMemcpyHostToDevice)); // Upload initial seed value to GPU memory. !!DBTv1.4!!
printf(" ==> CUDA (MPI process #%d in \"%s\"): estimate GPU speed executing %d blocks of %d threads, %d histories per thread: %lld histories in total (random seed=%d).\n", myID, MPI_processor_name, num_blocks_speed_test, num_threads_per_block, histories_per_thread, histories_speed_test, seed_input);
#else
printf(" ==> CUDA: Estimating the GPU speed executing %d blocks of %d threads, %d histories per thread: %lld histories in total.\n", num_blocks_speed_test, num_threads_per_block, histories_per_thread, histories_speed_test);
#endif
fflush(stdout);
clock_kernel = clock();
// -- Launch Monte Carlo simulation kernel for the speed test:
hipLaunchKernelGGL(( track_particles), dim3(blocks_speed_test),dim3(threads_speed_test), 0, 0, histories_per_thread, (short int)num_p, seed_input_device, image_device, voxels_Edep_device, voxel_mat_dens_device, bitree_device, mfp_Woodcock_table_device, mfp_table_a_device, mfp_table_b_device, rayleigh_table_device, compton_table_device, detector_data_device, source_data_device, materials_dose_device);
#ifdef USING_MPI
// Find out the total number of histories simulated in the speed test by all the GPUs. Note that this MPI call will be executed in parallel with the GPU kernel because it is located before the hipDeviceSynchronize command!
return_reduce = MPI_Allreduce(&histories_speed_test, &total_histories_speed_test, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error reducing (MPI_Allreduce) the total number of histories in the speed test test??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
else
#else
total_histories_speed_test = histories_speed_test;
#endif
fflush(stdout);
hipDeviceSynchronize(); // Force the runtime to wait until GPU kernel has completed
getLastCudaError("\n\n !!Kernel execution failed while simulating particle tracks!! "); // Check if the CUDA function returned any error
float speed_test_time = float(clock()-clock_kernel)/CLOCKS_PER_SEC;
node_speed = (float) (((double)histories_speed_test)/speed_test_time);
#ifdef USING_MPI
printf(" (MPI process #%d): Estimated GPU speed = %lld hist / %.4f s = %.3f hist/s\n", myID, histories_speed_test, speed_test_time, node_speed);
#else
printf(" Estimated GPU speed = %lld hist / %.3f s = %.3f hist/s\n", histories_speed_test, speed_test_time, node_speed);
#endif
// !!DBTv1.4!! !!DeBuG!! No need to update the seed in the main program bc each GPU continues its series!
// // -- Init random number generator seed to avoid repeating the random numbers used in the speed test:
// update_seed_PRNG(1, histories_speed_test, &seed_input);
if (simulating_by_time==1)
{
// -- Set number of histories for each GPU when simulating by time:
if (total_histories > speed_test_time)
total_histories = (total_histories - speed_test_time)*node_speed; // Calculate the total number of remaining histories by "GPU speed" * "remaining time"
else
total_histories = 1; // Enough particles simulated already, simulate just one more history (block) and report (kernel call would fail if total_histories < or == 0).
}
else
{
#ifdef USING_MPI
// -- Simulating a fix number of histories divided between all GPUs (execution time variable):
// Compute the fraction of the total speed that accounts for the current MPI thread:
return_reduce = MPI_Allreduce(&node_speed, &total_speed, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD); // Sum all the times and send result to all processes
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error reducing (MPI_Allreduce) the speed test results??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
else
MASTER_THREAD
{
printf("\n -- Total speed for all GPUs (MPI_Allreduce) = %.3f hist/s; total histories simulated in the speed test (MPI_Allreduce) = %lld.\n", total_speed, total_histories_speed_test);
printf(" The master thread will simulate %.2f%% of the x rays in the simulation.\n", 100.0f*node_speed/total_speed);
}
#else
total_speed = node_speed;
#endif
// - Divide the remaining histories among the MPI threads (GPUs) according to their fraction of the total speed (rounding up).
if (total_histories_speed_test < total_histories)
total_histories = (unsigned long long int)(0.5 + ((double)(total_histories-total_histories_speed_test)) * ((double)(node_speed/total_speed)));
else
total_histories = numprocs; // Enough particles simulated already, simulate just one more history (block) and report (kernel call would fail if total_histories < or == 0).
}
} // [Done with case of simulating projections by time or first projection by number of particles]
// else ==> if using only 1 GPU and a fixed number of histories the whole speed test is skipped. The random seed will be different for each projection because it is updated after calling the kernel below.
// fflush(stdout);
// MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads here if we want to have a better organized output text at the expense of losing some performance
// *** Perform the MC simulation itself (the speed test would be skipped for a single CPU thread using a fix number of histories):
// -- Compute the number of CUDA blocks to simulate, rounding up and making sure it is below the limit of 65535 blocks.
// The total number of particles simulated will be increased to the nearest multiple "histories_per_thread".
double total_threads = ceil(((double)total_histories)/((double)histories_per_thread)); // Divide the histories among GPU threads, rounding up and avoiding overflow // New in MC-GPU v1.4 (Mina's bug)
int total_threads_blocks = (int)(((double)total_threads)/((double)num_threads_per_block) + 0.9990); // Divide the GPU threads among CUDA blocks, rounding up
if (total_threads_blocks>65535)
{
#ifdef USING_MPI
printf(" WARNING (MPI process #%d): %d hist per thread would produce %d CUDA blocks (>65535 maximum).", myID, histories_per_thread, total_threads_blocks);
#else
printf("\n WARNING: %d hist per thread would produce %d CUDA blocks, more than the maximum value of 65535.", histories_per_thread, total_threads_blocks);
#endif
total_threads_blocks = 65000; // Increase the histories per thread to have exactly 65000 blocks.
histories_per_thread = (int) ( ((double)total_histories)/((double)(total_threads_blocks*num_threads_per_block)) + 0.9990 );
printf(" Increasing to %d hist to run exactly %d blocks in the GPU.\n", histories_per_thread, total_threads_blocks);
}
else if (total_threads_blocks<1)
{
total_threads_blocks = 1; // Make sure we have at least 1 block to run
}
total_histories = ((unsigned long long int)(total_threads_blocks*num_threads_per_block))*histories_per_thread; // Total histories will be equal or higher than the input value due to the rounding up in the division of the histories
float total_histories_current_kernel_float = (float)total_histories; // Keep a float approx of the num histories for the timing below
checkCudaErrors(hipMemcpy(&seed_input, seed_input_device, sizeof(int), hipMemcpyDeviceToHost)); // Download latest seed value used in the GPU. !!DBTv1.4!!
fflush(stdout);
#ifdef USING_MPI
MASTER_THREAD printf("\n\n");
printf(" ==> CUDA (MPI process #%d in \"%s\"): Executing %d blocks of %d threads, with %d histories in each thread: %lld histories in total (random seed=%d, num_p=%d).\n", myID, MPI_processor_name, total_threads_blocks, num_threads_per_block, histories_per_thread, total_histories, seed_input, num_p);
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads to better organize output
#else
printf("\n ==> CUDA: Executing %d blocks of %d threads, with %d histories in each thread: %lld histories in total (random seed=%d, num_p=%d).\n", total_threads_blocks, num_threads_per_block, histories_per_thread, total_histories, seed_input, num_p);
#endif
fflush(stdout);
// -- Setup the execution parameters (Max number threads per block: 512, Max sizes each dimension of grid: 65535x65535x1)
dim3 blocks(total_threads_blocks, 1);
dim3 threads(num_threads_per_block, 1);
clock_kernel = clock();
// *** Execute the x-ray transport kernel in the GPU ***
hipLaunchKernelGGL(( track_particles), dim3(blocks),dim3(threads), 0, 0, histories_per_thread, (short int)num_p, seed_input_device, image_device, voxels_Edep_device, voxel_mat_dens_device, bitree_device, mfp_Woodcock_table_device, mfp_table_a_device, mfp_table_b_device, rayleigh_table_device, compton_table_device, detector_data_device, source_data_device, materials_dose_device);
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread:
{
// -- Compute the total number of histories simulated with all MPI thread, including the speed test (histories_speed_test==0 if speed test was skipped).
// These MPI messajes are sent concurrently with the GPU kernel computation for maximum efficiency.
unsigned long long int current_GPU_histories = total_histories;
return_reduce = MPI_Reduce(¤t_GPU_histories, &total_histories, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // Sum all the simulated particles and send to thread 0
}
#endif
if (1==doing_speed_test)
total_histories += total_histories_speed_test; // Speed test was done: compute the total number of histories including the particles simulated in the speed test
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread:
{
MASTER_THREAD
{
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error getting the total number of particles simulated in all the GPUs (MPI_Reduce). return_reduce = %d.\n\n\n", return_reduce);
if (1==simulating_by_time || 1==doing_speed_test)
{
printf("\n -- Total number of histories being simulated in all the GPUs for the current projection (including speed test)= %.3lld.\n\n", total_histories);
}
}
}
#endif
fflush(stdout);
hipDeviceSynchronize(); // Force the runtime to wait until the GPU kernel is completed
getLastCudaError("\n\n !!Kernel execution failed while simulating particle tracks!! "); // Check if kernel execution generated any error
float real_GPU_speed = total_histories_current_kernel_float/(float(clock()-clock_kernel)/CLOCKS_PER_SEC); // GPU speed for all the image simulation, not just the speed test.
// #ifdef USING_MPI
// printf(" ==> CUDA (MPI process #%d in \"%s\"): GPU kernel execution time: %.4f s (%.3f hist/s)\n", myID, MPI_processor_name, time_kernel, total_histories_current_kernel_float/time_kernel);
// #else
// printf(" ==> CUDA: Kernel execution time: %.4f s\n", time_kernel);
// #endif
// -- Copy the simulated image from the GPU memory to the CPU:
checkCudaErrors(hipMemcpy(image, image_device, image_bytes, hipMemcpyDeviceToHost) ); // Copy final results to host
///////////////////////////////////////////////////////////////////////////////////////////////////
#else
// *** Executing the kernel in the CPU:
// If using more than one MPI thread, the number of particles is equally dividied among the threads.
// !!DeBuG!! --> NOT USING SPEED TEST IN THE CPU!! Not possible to limit the execution by time in the CPU.
int total_threads = (int)(((double)total_histories)/((double)histories_per_thread*numprocs) + 0.9990); // Divide the histories among MPI threads, rounding up
unsigned long long int total_histories_per_thread = ((unsigned long long int)(total_threads))*histories_per_thread;
total_histories = total_histories_per_thread*numprocs; // Total histories will be equal or higher than the input value due to the rounding up in the division of the histories
if (numprocs>1)
{
#ifdef USING_MPI
update_seed_PRNG(myID, total_histories, &seed_input); // Compute the initial random seed for each MPI threads, avoiding overlapping of the random sequences
printf(" Executing %d history batches in the CPU, with %d histories in each batch (thread %d of %d at \'%s\'): %lld histories (random seed=%d).\n", total_threads, histories_per_thread, myID+1, numprocs, MPI_processor_name, total_histories_per_thread, seed_input);
MASTER_THREAD printf(" Simulating %lld histories in total for the %d MPI threads.\n\n", total_histories, numprocs);
#endif
}
else
{
printf(" Executing %d history batches in the CPU, with %d histories in each batch: %lld histories in total.\n\n", total_threads, histories_per_thread, total_histories);
}
fflush(stdout);
// -- Copy local structures to global struct variables accessible from "track_particles" (__constant__ variables in the GPU):
source_energy_data_CONST = source_energy_data;
voxel_data_CONST = voxel_data;
mfp_table_data_CONST = mfp_table_data;
dose_ROI_x_min_CONST = dose_ROI_x_min;
dose_ROI_x_max_CONST = dose_ROI_x_max;
dose_ROI_y_min_CONST = dose_ROI_y_min;
dose_ROI_y_max_CONST = dose_ROI_y_max;
dose_ROI_z_min_CONST = dose_ROI_z_min;
dose_ROI_z_max_CONST = dose_ROI_z_max;
int CPU_batch;
for(CPU_batch=0; CPU_batch<total_threads; CPU_batch++)
{
// -- Simulate a particle track initializing the PRNG with the particle number 'n':
track_particles(CPU_batch, histories_per_thread, (short int)num_p, seed_input_device, image, voxels_Edep, voxel_mat_dens, bitree_device, mfp_Woodcock_table, mfp_table_a, mfp_table_b, &rayleigh_table, &compton_table, detector_data, source_data, materials_dose);
}
#endif
// Get current time and calculate execution time in the MC loop:
time_elapsed_MC_loop = ((double)(clock()-clock_start))/CLOCKS_PER_SEC;
time_total_MC_simulation += time_elapsed_MC_loop; // Count total time (in seconds).
// printf("\n -- MONTE CARLO LOOP finished: time tallied in MAIN program: %.3f s\n\n", time_elapsed_MC_loop);
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Move the images simulated in the GPU (or multiple CPU cores) to the host memory space:
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread
{
// -- Add the images simulated in all the MPI threads:
MASTER_THREAD printf("\n >> Synchronize the MPI threads and accumulate the simulated images (MPI_Reduce).\n\n");
// Allocate the memory for the final image in the master thread:
unsigned long long int *image_MPI = NULL;
MASTER_THREAD image_MPI = (unsigned long long int*) malloc(image_bytes);
MASTER_THREAD if (image_MPI==NULL)
{
printf("\n\n !!malloc ERROR!! Problem allocating the total MPI image. Out of memory??\n\n");
exit(-4);
}
// !!DeBuG!! To know how much time the threads lose waiting for other threads in the MPI_Reduce, I have to use an explicit barrier here. It may be more efficient to let the threads advance to the MPI_Reduce directly.
clock_start = clock();
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
current_time=time(NULL);
char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located between the characters 11 and 19.
#ifdef USING_CUDA
if (1==doing_speed_test) // This message will be shown only for the first projection simulated in the GPU.
printf(" ==> CUDA (MPI process #%d in \"%s\"): GPU speed = %.4f hist/s. Time spent at MPI_Barrier waiting to add the partial images: %.6f s (time: %8s)\n", myID, MPI_processor_name, real_GPU_speed, ((double)(clock()-clock_start))/CLOCKS_PER_SEC, &char_time[11]);
#else
if (-1==doing_speed_test)
{
printf(" ==> CUDA (MPI process #%d in \"%s\"): Time spent at MPI_Barrier waiting to add the partial images: %.6f s (time: %8s)\n", myID, MPI_processor_name, ((double)(clock()-clock_start))/CLOCKS_PER_SEC, &char_time[11]);
doing_speed_test = 0;
}
#endif
fflush(stdout);
MASTER_THREAD clock_start = clock();
// -- Sum the pixel values from the different simulated images and send to thread 0.
// MPI_Reduce will act as a synchronization barrier for all the MPI threads.
int num_pixels_image = image_bytes/((int)sizeof(unsigned long long int)); // Number of elements allocated in the "image" array.
return_reduce = MPI_Reduce(image, image_MPI, num_pixels_image, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
if (MPI_SUCCESS != return_reduce)
{
printf("\n\n !!ERROR!! Possible error reducing (MPI_SUM) the image results??? Returned value MPI_Reduce = %d\n\n\n", return_reduce);
}
// -- Exchange the image simulated in thread 0 for the final image from all threads, in the master thread:
MASTER_THREAD
{
free(image);
image = image_MPI; // point the image pointer to the new image in host memory
image_MPI = NULL;
printf("\n -- Time reducing the images simulated by all the MPI threads (MPI_Reduce) according to the master thread = %.6f s.\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
}
}
#endif
// *** Report the final results:
char file_name_output_num_p[253];
// if (1==num_projections || (flag_simulateMammoAfterDBT && 0==num_p)) // !!DBTv1.4!!
if (1==num_projections)
strcpy(file_name_output_num_p, file_name_output); // Use the input name for single projection
else
sprintf(file_name_output_num_p, "%s_%04d", file_name_output, num_p); // Create the output file name with the input name + projection number (4 digits, padding with 0)
if (num_p>0)
{
MASTER_THREAD report_image(file_name_output_num_p, detector_data, source_data, mean_energy_spectrum, image, time_elapsed_MC_loop, total_histories, num_p, num_projections, myID, numprocs, current_angle, &seed_input);
}
else
{
// Projection 0 happens only when num_projections==1 or when flag_simulateMammoAfterDBT==true:
MASTER_THREAD report_image(file_name_output_num_p, detector_data, source_data, mean_energy_spectrum, image, time_elapsed_MC_loop, total_histories, 0, 1, myID, numprocs, current_angle, &seed_input);
}
// *** Clear the image after reporting, unless this is the last projection to simulate:
if (num_p<num_projections)
{
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
#ifdef USING_CUDA
MASTER_THREAD printf(" ==> CUDA: Launching kernel to reset the device image to 0: number of blocks = %d, threads per block = 128\n", (int)(ceil(pixels_per_image/128.0f)+0.01f) );
hipLaunchKernelGGL(( init_image_array_GPU), dim3((int)(ceil(pixels_per_image/128.0f)+0.01f)),dim3(128), 0, 0, image_device, pixels_per_image);
fflush(stdout);
hipDeviceSynchronize();
getLastCudaError("\n\n !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error:
#else
memset(image, 0, image_bytes); // Init memory space to 0. (see http://www.lainoox.com/c-memset-examples/)
#endif
}
if (num_p==0 && flag_material_dose==1 && flag_simulateMammoAfterDBT) // !!DBTv1.4!!
{
// --Report "tally_materials_dose" for the first projection corresponding to a mammo acquisition, and reset dose counter. The dose for the DBT scan only will be reported at the end: !!mammo-DBT!!
MASTER_THREAD printf("\n\n !!DBT Reporting \"tally_materials_dose\" for the first 0 deg projection, and reseting material and voxel dose counters.\n");
checkCudaErrors( hipMemcpy( materials_dose, materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2), hipMemcpyDeviceToHost) ); // Copy materials dose results to host
#ifdef USING_MPI
ulonglong2 materials_dose_total[MAX_MATERIALS];
return_reduce = MPI_Reduce(materials_dose, materials_dose_total, 2*MAX_MATERIALS, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // !!tally_materials_dose!!
#else
ulonglong2 *materials_dose_total = materials_dose; // Create a dummy pointer to the materials_dose data
#endif
MASTER_THREAD report_materials_dose(1, total_histories, density_nominal, materials_dose_total, mass_materials, file_name_materials); // Report the material dose for the mammo image only !!tally_materials_dose!!
int kk;
for(kk=0;kk<MAX_MATERIALS;kk++) // Reset dose in CPU and GPU memory
{
materials_dose[kk].x = 0;
materials_dose[kk].y = 0;
}
checkCudaErrors(hipMemcpy(materials_dose_device, materials_dose, MAX_MATERIALS*sizeof(ulonglong2), hipMemcpyHostToDevice)); // !!tally_materials_dose!!
// --Reject the voxel doses tallied for this first projection: re-copy the empty host data to GPU // !!DeBuG!! (It would be more efficient to disable the tally in kernel directly for the first projection...)
if (dose_ROI_x_max > -1)
checkCudaErrors(hipMemcpy(voxels_Edep_device, &voxels_Edep, voxels_Edep_bytes, hipMemcpyHostToDevice) ); // !!DBTv1.4!!
}
} // [Projection loop end: iterate for next CT projection angle]
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Simulation finished! Report dose and timings and clean up.
#ifdef USING_CUDA
if (dose_ROI_x_max > -1)
{
MASTER_THREAD clock_kernel = clock();
checkCudaErrors( hipMemcpy( voxels_Edep, voxels_Edep_device, voxels_Edep_bytes, hipMemcpyDeviceToHost) ); // Copy final dose results to host (for every MPI threads)
MASTER_THREAD printf(" ==> CUDA: Time copying dose results from device to host: %.6f s\n", float(clock()-clock_kernel)/CLOCKS_PER_SEC);
}
if (flag_material_dose==1)
checkCudaErrors( hipMemcpy( materials_dose, materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2), hipMemcpyDeviceToHost) ); // Copy materials dose results to host, if tally enabled in input file. !!tally_materials_dose!!
// -- Clean up GPU device memory:
clock_kernel = clock();
hipFree(voxel_mat_dens_device);
hipFree(image_device);
hipFree(mfp_Woodcock_table_device);
hipFree(mfp_table_a_device);
hipFree(mfp_table_b_device);
hipFree(voxels_Edep_device);
checkCudaErrors( hipDeviceReset() );
MASTER_THREAD printf(" ==> CUDA: Time freeing the device memory and ending the GPU threads: %.6f s\n", float(clock()-clock_kernel)/CLOCKS_PER_SEC);
#endif
#ifdef USING_MPI
current_time=time(NULL); // Get current time (in seconds)
char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located betwen the characters 11 and 19.
printf(" >> MPI thread %d in \"%s\" done! (local time: %s)\n", myID, MPI_processor_name, &char_time[11]);
fflush(stdout); // Clear the screen output buffer
#endif
// *** Report the total dose for all the projections, if the tally is not disabled (must be done after MPI_Barrier to have all the MPI threads synchronized):
MASTER_THREAD clock_start = clock();
if (dose_ROI_x_max > -1)
{
#ifdef USING_MPI
if (numprocs>1)
{
// -- Use MPI_Reduce to accumulate the dose from all projections:
// Allocate memory in the root node to combine the dose results with MPI_REDUCE:
int num_voxels_ROI = voxels_Edep_bytes/((int)sizeof(ulonglong2)); // Number of elements allocated in the "dose" array.
ulonglong2 *voxels_Edep_total = (ulonglong2*) malloc(voxels_Edep_bytes);
if (voxels_Edep_total==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d voxels by the MPI root node for the total deposited dose (and uncertainty) array (%f Mbytes)!!\n\n", num_voxels_ROI, voxels_Edep_bytes/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD
{
printf("\n >> Array for the total deposited dose correctly allocated by the MPI root node (%f Mbytes).\n", voxels_Edep_bytes/(1024.f*1024.f));
printf( " Waiting at MPI_Barrier for thread synchronization.\n");
}
}
MASTER_THREAD printf("\n >> Calling MPI_Reduce to accumulate the dose from all projections...\n\n");
return_reduce = MPI_Reduce(voxels_Edep, voxels_Edep_total, 2*num_voxels_ROI, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // Sum all the doses in "voxels_Edep_total" at thread 0.
// !!DeBuG!! I am sending a "ulonglong2" array as if it was composed of 2 "ulonglong" variables per element. There could be problems if the alignment in the structure includes some extra padding space (but it seems ok for a 64-bit computer).
if (MPI_SUCCESS != return_reduce)
{
printf("\n\n !!ERROR!! Possible error reducing (MPI_SUM) the dose results??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
}
// -- Exchange the dose simulated in thread 0 for the final dose from all threads
MASTER_THREAD
{
free(voxels_Edep);
voxels_Edep = voxels_Edep_total; // point the voxels_Edep pointer to the final voxels_Edep array in host memory
voxels_Edep_total = NULL; // This pointer is not needed by now
}
}
#endif
// -- Report the total dose for all the projections:
MASTER_THREAD report_voxels_dose(file_dose_output, num_projections, &voxel_data, voxel_mat_dens, voxels_Edep, time_total_MC_simulation, total_histories, dose_ROI_x_min, dose_ROI_x_max, dose_ROI_y_min, dose_ROI_y_max, dose_ROI_z_min, dose_ROI_z_max, source_data);
}
// -- Report "tally_materials_dose" with data from all MPI threads, if tally enabled:
if (flag_material_dose==1)
{
#ifdef USING_MPI
ulonglong2 materials_dose_total[MAX_MATERIALS];
return_reduce = MPI_Reduce(materials_dose, materials_dose_total, 2*MAX_MATERIALS, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // !!tally_materials_dose!!
#else
ulonglong2 *materials_dose_total = materials_dose; // Create a dummy pointer to the materials_dose data
#endif
MASTER_THREAD report_materials_dose(num_projections, total_histories, density_nominal, materials_dose_total, mass_materials, file_name_materials); // Report the material dose !!tally_materials_dose!!
}
MASTER_THREAD clock_end = clock();
MASTER_THREAD printf("\n\n ==> CUDA: Time reporting the dose data: %.6f s\n", ((double)(clock_end-clock_start))/CLOCKS_PER_SEC);
// *** Clean up RAM memory. If CUDA was used, the geometry and table data were already cleaned for MPI threads other than root after copying data to the GPU:
free(voxels_Edep);
free(image);
#ifdef USING_CUDA
MASTER_THREAD free(voxel_mat_dens);
#else
free(voxel_mat_dens);
free(mfp_Woodcock_table);
free(mfp_table_a);
free(mfp_table_b);
#endif
MASTER_THREAD
{
printf("\n\n\n -- SIMULATION FINISHED!\n");
time_total_MC_init_report = ((double)(clock()-clock_start_beginning))/CLOCKS_PER_SEC;
unsigned long long int total_histories_final = total_histories*((unsigned long long int)num_projections);
if (flag_simulateMammoAfterDBT)
total_histories_final = total_histories_final + total_histories_final*SCALE_MAMMO_DBT; // Add the histories for both the tomo and the 0 deg projection
// -- Report total performance:
printf("\n\n ****** TOTAL SIMULATION PERFORMANCE (including initialization and reporting) ******\n\n");
printf( " >>> Execution time including initialization, transport and report: %.3f s.\n", time_total_MC_init_report);
printf( " >>> Time spent in the Monte Carlo transport only: %.3f s.\n", time_total_MC_simulation);
printf( " >>> Time spent in initialization, reporting and clean up: %.3f s.\n", (time_total_MC_init_report-time_total_MC_simulation));
printf( " >>> Total number of simulated x rays: %lld\n", total_histories_final);
if (time_total_MC_init_report>0.000001)
printf( " >>> Total speed (using %d thread, including transport, initialization and report times) [x-rays/s]: %.2f\n", numprocs, (double)(total_histories_final/time_total_MC_init_report));
printf( " >>> Total speed Monte Carlo transport only (using %d thread) [x-rays/s]: %.2f\n\n", numprocs, (double)(total_histories_final/time_total_MC_simulation));
current_time=time(NULL); // Get current time (in seconds)
printf("\n****** Code execution finished on: %s\n\n", ctime(¤t_time));
}
#ifdef USING_CUDA
hipDeviceReset(); // Destroy the CUDA context before ending program (flush visual debugger data).
#endif
#ifdef USING_MPI
MPI_Finalize(); // Finalize MPI library: no more MPI calls allowed below.
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Read the input file given in the command line and return the significant data.
//! Example input file:
//!
//! 1000000 [Total number of histories to simulate]
//! geometry.vox [Voxelized geometry file name]
//! material.mat [Material data file name]
//!
//! @param[in] argc Command line parameters
//! @param[in] argv Command line parameters: name of input file
//! @param[out] total_histories Total number of particles to simulate
//! @param[out] seed_input Input random number generator seed
//! @param[out] num_threads_per_block Number of CUDA threads for each GPU block
//! @param[out] detector_data
//! @param[out] image
//! @param[out] source_data
//! @param[out] file_name_voxels
//! @param[out] file_name_materials
//! @param[out] file_name_output
////////////////////////////////////////////////////////////////////////////////
void read_input(int argc, char** argv, int myID, unsigned long long int* total_histories, int* seed_input, int* gpu_id, int* num_threads_per_block, int* histories_per_thread, struct detector_struct* detector_data, unsigned long long int** image_ptr, int* image_bytes, struct source_struct* source_data, struct source_energy_struct* source_energy_data, struct voxel_struct* voxel_data, char* file_name_voxels, char file_name_materials[MAX_MATERIALS][250] , char* file_name_output, char* file_name_espc, int* num_projections, ulonglong2** voxels_Edep_ptr, int* voxels_Edep_bytes, char* file_dose_output, short int* dose_ROI_x_min, short int* dose_ROI_x_max, short int* dose_ROI_y_min, short int* dose_ROI_y_max, short int* dose_ROI_z_min, short int* dose_ROI_z_max, double* SRotAxisD, double* translation_helical, int* flag_material_dose, bool* flag_simulateMammoAfterDBT, bool* flag_detectorFixed)
{
FILE* file_ptr = NULL;
char new_line[250];
char *new_line_ptr = NULL;
double dummy_double;
// -- Read the input file name from command line, if given (otherwise keep default value):
if (2==argc)
{
file_ptr = fopen(argv[1], "r");
if (NULL==file_ptr)
{
printf("\n\n !!read_input ERROR!! Input file not found or not readable. Input file name: \'%s\'\n\n", argv[1]);
// Not finalizing MPI here because we want the execution to fail if there is a problem with any MPI thread!!! MPI_Finalize(); // Finalize MPI library: no more MPI calls allowed below.
exit(-1);
}
}
else if (argc>2)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Too many input parameter (argc=%d)!! Provide only the input file name.\n\n", argc);
// Finalizing MPI because all threads will detect the same problem and fail together.
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input file name not given as an execution parameter!! Try again...\n\n");
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
MASTER_THREAD printf("\n -- Reading the input file \'%s\':\n", argv[1]);
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION SIMULATION CONFIG v.2009-05-12]:
do
{
new_line_ptr = fgets(new_line, 250, file_ptr); // Read full line (max. 250 characters).
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SIMULATION CONFIG v.2009-05-12\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION SIMULATION CONFIG v.2009-05-12")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", &dummy_double);
*total_histories = (unsigned long long int) (dummy_double+0.0001); // Maximum unsigned long long value: 18446744073709551615
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", seed_input); // Set the RANECU PRNG seed (the same seed will be used to init the 2 MLCGs in RANECU)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", gpu_id); // GPU NUMBER WHERE SIMULATION WILL RUN
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", num_threads_per_block); // GPU THREADS PER CUDA BLOCK
#ifdef USING_CUDA
if ((*num_threads_per_block%32)!=0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of GPU threads per CUDA block must be a multiple of 32 (warp size). Input value: %d !!\n\n", *num_threads_per_block);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
#endif
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", histories_per_thread); // HISTORIES PER GPU THREAD
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION SOURCE v.2009-05-12]: !!DBTv1.4!! ; [SECTION SOURCE v.2011-07-12] ; [SECTION SOURCE v.2009-05-12]
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SOURCE v.2016-12-02\'!!\n"); // !!DBTv1.4!!
exit(-2);
}
}
while(strstr(new_line,"SECTION SOURCE v.2016-12-02")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // X-RAY ENERGY SPECTRUM FILE
trim_name(new_line, file_name_espc);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &source_data[0].position.x, &source_data[0].position.y, &source_data[0].position.z); // SOURCE POSITION: X Y Z [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &source_data[0].direction.x, &source_data[0].direction.y, &source_data[0].direction.z); // SOURCE DIRECTION COSINES: U V W
// -- Normalize the input beam direction to 1:
dummy_double = 1.0/sqrt((double)(source_data[0].direction.x*source_data[0].direction.x + source_data[0].direction.y*source_data[0].direction.y + source_data[0].direction.z*source_data[0].direction.z));
source_data[0].direction.x = (float)(((double)source_data[0].direction.x)*dummy_double);
source_data[0].direction.y = (float)(((double)source_data[0].direction.y)*dummy_double);
source_data[0].direction.z = (float)(((double)source_data[0].direction.z)*dummy_double);
// Read input fan beam polar (theta) and azimuthal (phi) aperture angles (deg):
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
double phi_aperture, theta_aperture;
sscanf(new_line, "%lf %lf", &phi_aperture, &theta_aperture);
if (0.5*theta_aperture > 180.0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input polar semi-aperture must be in [0,180] deg.!\n");
MASTER_THREAD printf(" 0.5*theta_aperture = %lf, 0.5*phi_aperture = %lf\n", 0.5*theta_aperture, 0.5*phi_aperture);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (0.5*phi_aperture > 360.0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input azimuthal semi-aperture must be in [0,360] deg.!\n");
MASTER_THREAD printf(" 0.5*theta_aperture = %lf, 0.5*phi_aperture = %lf\n", 0.5*theta_aperture, 0.5*phi_aperture);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
// Read the source rotation: necessary to define which direction is azimuthal (width) and polar (height) in the rotated source emission: !!DBTv1.4!!
double rotZ1, rotY2, rotZ3;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf %lf %lf", &rotZ1, &rotY2, &rotZ3); // EULER ANGLES (RzRyRz) TO ROTATE RECTANGULAR BEAM FROM DEFAULT POSITION AT Y=0, NORMAL=(0,-1,0) //!!DBTv1.4!!
// *** Init the fan beam source model:
source_data[0].rot_fan[0] = 1; source_data[0].rot_fan[1] = 0; source_data[0].rot_fan[2] = 0; // Init rotation matrix to identity
source_data[0].rot_fan[3] = 0; source_data[0].rot_fan[4] = 1; source_data[0].rot_fan[5] = 0;
source_data[0].rot_fan[6] = 0; source_data[0].rot_fan[7] = 0; source_data[0].rot_fan[8] = 1;
create_rotation_matrix_around_axis(rotZ1*DEG2RAD, 0, 0, 1, source_data[0].rot_fan); // 1st rotation around Z !!DBTv1.4!!
create_rotation_matrix_around_axis(rotY2*DEG2RAD, 0, 1, 0, source_data[0].rot_fan); // 2nd rotation around Y !!DBTv1.4!!
create_rotation_matrix_around_axis(rotZ3*DEG2RAD, 0, 0, 1, source_data[0].rot_fan); // 3rd rotation around Z !!DBTv1.4!!
MASTER_THREAD printf(" Input Euler angles to rotate the source from (0,1,0) to the input direction [deg]: rotZ1=%f , rotY2=%f , rotZ3=%f\n", rotZ1, rotY2, rotZ3); // !!DBTv1.4!! !!VERBOSE!!
// printf("\n [%f %f %f]\n",source_data[0].rot_fan[0],source_data[0].rot_fan[1],source_data[0].rot_fan[2]);
// printf( " Rotation matrix: Rodrigues = |%f %f %f|\n",source_data[0].rot_fan[3],source_data[0].rot_fan[4],source_data[0].rot_fan[5]); // !!DBTv1.4!! !!VERBOSE!!
// printf( " [%f %f %f]\n\n",source_data[0].rot_fan[6],source_data[0].rot_fan[7],source_data[0].rot_fan[8]);
float3 default_direction;
default_direction.x = 0.0f; default_direction.y = 1.0f; default_direction.z = 0.0f;
apply_rotation(&default_direction, source_data[0].rot_fan);
if ( fabsf(default_direction.x-source_data[0].direction.x)>1e-5f || fabsf(default_direction.y-source_data[0].direction.y)>1e-5f || fabsf(default_direction.z-source_data[0].direction.z)>1e-5f )
{
MASTER_THREAD
{
printf("\n\n!!WARNING!! The input Euler rotation angles for the source are incorrect!!!!!\n"); // !!DBTv1.4!! !!DeBuG!!
printf( " The Euler angles are defined as a rotation around Z axis, then Y, then Z again; positive rotations are counter-clock (eg, to move the detector from Y=0 to Z=0, input: 90.0, -90.0, 180.0).\n");
printf( " The input angles would rotate the default source direction (0,1,0) towards direction (%f,%f,%f), but the input direction was (%f,%f,%f) instead.\n\n", default_direction.x, default_direction.y, default_direction.z, source_data[0].direction.x, source_data[0].direction.y, source_data[0].direction.z);
printf( " Please, provide a consistent set of source direction and Euler angle rotation or the code can't determine the orientation of the square field and detector. \n\n\n"); // !!DBTv1.4!! !!DeBuG!!
}
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1); // !!DBTv1.4!! !!DeBuG!!
}
// Read parameters for the non-ideal focal spot: !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].focal_spot_FWHM); // SOURCE GAUSSIAN FOCAL SPOT FWHM [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].rotation_blur); // ANGULAR BLUR DUE TO MOVEMENT ([exposure_time]*[angular_speed]) [degrees]
source_data[0].rotation_blur = fabsf(source_data[0].rotation_blur*DEG2RAD);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // COLLIMATE BEAM TOWARDS POSITIVE X ANGLES ONLY? (ie, cone-beam center aligned with chest wall in mammography) [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
source_data[0].flag_halfConeX = true;
// MASTER_THREAD printf(" \'flag_halfConeX = true\': sampling only upper half beam for mammo geometry; beam centered at image edge.\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
source_data[0].flag_halfConeX = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the beam collimation question in \'SECTION SOURCE\'.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION IMAGE DETECTOR v.2017-06-20]]: !!DBTv1.5!!
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION IMAGE DETECTOR v.2017-06-20]\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION IMAGE DETECTOR v.2017-06-20")==NULL); // Skip comments and empty lines until the section begins !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
trim_name(new_line, file_name_output); // OUTPUT IMAGE FILE NAME (no spaces)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
float dummy_num_pixels_x, dummy_num_pixels_y; // Read input pixel number as float and truncated to integer
sscanf(new_line, "%f %f", &dummy_num_pixels_x, &dummy_num_pixels_y); // NUMBER OF PIXELS IN THE IMAGE: Nx Nz
detector_data[0].num_pixels.x = (int)(dummy_num_pixels_x+0.001f);
detector_data[0].num_pixels.y = (int)(dummy_num_pixels_y+0.001f);
detector_data[0].total_num_pixels = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
if (detector_data[0].total_num_pixels < 1 || detector_data[0].total_num_pixels > 99999999 )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of pixels is incorrect. Input: X_pix = %d, Y_pix = %d, total_num_pix = %d!!\n\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y, detector_data[0].total_num_pixels);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].width_X, &detector_data[0].height_Z); // IMAGE SIZE (width, height): Dx Dz [cm]
detector_data[0].inv_pixel_size_X = detector_data[0].num_pixels.x / detector_data[0].width_X;
detector_data[0].inv_pixel_size_Z = detector_data[0].num_pixels.y / detector_data[0].height_Z;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].sdd); // SOURCE-TO-DETECTOR DISTANCE [cm] (detector set in front of the source, normal to the input direction)
if ((detector_data[0].sdd)<1.0e-6)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The source-to-detector distance must be positive. Input: sdd=%f!!\n\n", detector_data[0].sdd);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
// Input parameters for the improved detector model: !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].offset.x, &detector_data[0].offset.y); // IMAGE OFFSET ON DETECTOR PLANE IN WIDTH AND HEIGHT DIRECTIONS (BY DEFAULT BEAM CENTERED AT IMAGE CENTER) [cm] !!DBTv1.4!!
if (source_data[0].flag_halfConeX)
detector_data[0].offset.y = detector_data[0].offset.y + 0.5*detector_data[0].height_Z; // Center the cone beam at the edge of the image with a halfCone (mammo). !!DBT!! !!HalfBeam!! !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].scintillator_thickness); // DETECTOR THICKNESS [cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].scintillator_MFP); // DETECTOR MATERIAL AVERAGE MEAN FREE PATH [1/cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f %f", &detector_data[0].kedge_energy, &detector_data[0].fluorescence_energy, &detector_data[0].fluorescence_yield, &detector_data[0].fluorescence_MFP); // DETECTOR K-EDGE ENERGY [eV], K-FLUORESCENCE ENERGY [eV], K-FLUORESCENCE YIELD, MFP AT FLUORESCENCE ENERGY [cm]
// NOTE: K-EDGE ENERGY, K-FLUORESCENCE ENERGY and K-FLUORESCENCE YIELD are tabulated in the XRAYLIB and other tables !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
float Swank_factor = -1.0f;
sscanf(new_line, "%f %f", &detector_data[0].gain_W, &Swank_factor); // EFECTIVE DETECTOR GAIN, W_+- [eV/ehp], SWANK FACTOR (input 0 to report ideal energy fluence)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].electronic_noise); // ELECTRONIC NOISE LEVEL (electrons/pixel)
if (detector_data[0].gain_W<0.001f || Swank_factor<0.001f)
{
MASTER_THREAD printf("\n !!read_input!! Negative gain or Swank factor input: reporting pixel values as energy fluence, disabling conversion to detected charges and electronic noise.\n\n");
detector_data[0].gain_W = 0.0f;
detector_data[0].Swank_rel_std = 0.0f;
detector_data[0].electronic_noise = 0.0f;
}
else
{
if (Swank_factor > 0.9999995f)
detector_data[0].Swank_rel_std = 0.0f; // Swank_rel_std = 0 ==> Swank factor = 1 ==> no variability in the amount of ehp generated
else
detector_data[0].Swank_rel_std = sqrtf(1.0f/Swank_factor - 1.0f);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].cover_thickness, &detector_data[0].cover_MFP); // PROTECTIVE COVER THICKNESS (detector and grid) [cm], MEAN FREE PATH AT AVERAGE ENERGY [cm] !!DBTv1.5!!
float grid_strip_MFP=-1.0f, grid_interspace_MFP=-1.0f;
int grid_orientation=99;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &detector_data[0].grid_ratio, &detector_data[0].grid_freq, &detector_data[0].grid_strip_thickness); // ANTISCATTER GRID RATIO, FREQUENCY, STRIP THICKNESS [X:1, lp/cm, cm] !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &grid_strip_MFP, &grid_interspace_MFP); // ANTISCATTER STRIPS AND INTERSPACE MEAN FREE PATHS AT MEAN ENERGY [cm] !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", &grid_orientation); // ORIENTATION 1D FOCUSED ANTISCATTER GRID LINES: 0==STRIPS PERPENDICULAR LATERAL DIRECTION (mammo style); 1==STRIPS PARALLEL LATERAL DIRECTION (DBT style) !!DBTv1.5!!
detector_data[0].grid_strip_mu = 1.0f/grid_strip_MFP; // Store the coefficients of attenuation for the attenuating strips and the interspace material [1/cm]
detector_data[0].grid_interspace_mu = 1.0f/grid_interspace_MFP;
if (detector_data[0].grid_ratio<1e-7f || detector_data[0].grid_freq<1e-7f || detector_data[0].grid_strip_thickness<2e-8f)
{
detector_data[0].grid_freq = -1.0f; // Signal that the grid is disabled
}
if (0==grid_orientation)
detector_data[0].grid_ratio = -1.0f*detector_data[0].grid_ratio; // A negative grid ratio will signal orientation 0 !!DBTv1.5!!
else if (grid_orientation!=0 && grid_orientation!=1)
{
if (detector_data[0].grid_freq>0.0f)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Incorrect grid orientation value: input 0 for strips perpendicular to image width (lateral direction) as in mammography, or 1 for strips parallel to image widtht. Input: orientation=%d!!\n\n", grid_orientation);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2); // !!DBTv1.5!!
}
}
if ( (theta_aperture < -1.0e-7) || (phi_aperture < -1.0e-7) )
{
// Negative angle input: set total fan beam angle to exactly cover the detector surface.
theta_aperture= 2.0 * atan(0.5*detector_data[0].height_Z/(detector_data[0].sdd)) * RAD2DEG;
phi_aperture = 2.0 * atan(0.5*detector_data[0].width_X/(detector_data[0].sdd)) * RAD2DEG;
}
if (source_data[0].flag_halfConeX)
theta_aperture= 2.0*theta_aperture; // Double the input aperture towards the nipple to send beam only towards positive angles (+X) !!DBT!! !!HalfBeam!! !!DBTv1.4!!
// *** RECTANGULAR BEAM INITIALIZATION: aperture initially centered at (0,1,0), ie, THETA_0=90, PHI_0=90
// Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat).
source_data[0].cos_theta_low = (float)( cos((90.0 - 0.5*theta_aperture)*DEG2RAD) );
source_data[0].D_cos_theta = (float)( -2.0*source_data[0].cos_theta_low ); // Theta aperture is symetric above and below 90 deg
source_data[0].phi_low = (float)( (90.0 - 0.5*phi_aperture)*DEG2RAD );
source_data[0].D_phi = (float)( phi_aperture*DEG2RAD );
// Particular case of pencil beam input: convert the 0 angle to a very small square beam to avoid precission errors in sampling
if (abs(theta_aperture) < 1.0e-7)
{
theta_aperture = +1.00e-8;
source_data[0].cos_theta_low = 0.0f; // = cos(90-0)
source_data[0].D_cos_theta = 0.0f;
}
if (abs(phi_aperture) < 1.0e-7)
{
phi_aperture = +1.00e-8;
source_data[0].phi_low = (float)( 90.0*DEG2RAD );
source_data[0].D_phi = 0.0f;
}
source_data[0].max_height_at_y1cm = (float) ( tan(0.5*theta_aperture*DEG2RAD) ); // !!DBTv1.4!!
source_data[0].max_width_at_y1cm = (float) ( tan(0.5*phi_aperture*DEG2RAD) ); // Collimate in both directions when using th non-point focal spot. !!DBTv1.4!!
if (source_data[0].flag_halfConeX) // Sampling only half beam towards +X for mammo geometry! !!DBT!! !!HalfBeam!! !!DBTv1.4!!
source_data[0].D_cos_theta = 0.5f*source_data[0].D_cos_theta; // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02] (OLD NAME SECTION: [SECTION CT SCAN v.2011-10-25]) !!DBTv1.4!!
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", num_projections); // NUMBER OF PROJECTIONS (set to 1 or less for a single projection)
if (*num_projections<1)
*num_projections = 1; // Zero projections has the same effect as 1 projection (ie, no CT scan rotation).
if ( fabs(*num_projections) > MAX_NUM_PROJECTIONS )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of projections is too large. Increase parameter MAX_NUM_PROJECTIONS=%d in the header file and recompile.\n", MAX_NUM_PROJECTIONS);
MASTER_THREAD printf( " There is no limit in the number of projections to be simulated because the source, detector data for each projection is stored in global memory and transfered to shared memory for each projection.\n\n");
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (*num_projections==1)
{
// -- Skip rest of the section if simulating a single projection:
source_data[0].angle_per_projection = 0.0f; // Init variables to default values hat will not be used anyway
source_data[0].angle_offset = 0.0f;
source_data[0].axis_of_rotation.x = 1.0f; source_data[0].axis_of_rotation.y = source_data[0].axis_of_rotation.z = 0.0f;
source_data[0].rotation_point.x = source_data[0].rotation_point.y = source_data[0].rotation_point.z = 0.0f;
}
else
{
// -- Tomographic scan with multiple projections:
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", SRotAxisD); // SOURCE-TO-ROTATION AXIS DISTANCE
if (*SRotAxisD<0.0 || *SRotAxisD>detector_data[0].sdd)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Invalid source-to-rotation axis distance! Input: %f (sdd=%f).\n\n\n", *SRotAxisD, detector_data[0].sdd);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
source_data[0].rotation_point.x = source_data[0].position.x + source_data[0].direction.x * (*SRotAxisD); // Store the rotation point to apply rotation in the kernel !!DBTv1.4!!
source_data[0].rotation_point.y = source_data[0].position.y + source_data[0].direction.y * (*SRotAxisD);
source_data[0].rotation_point.z = source_data[0].position.z + source_data[0].direction.z * (*SRotAxisD);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].angle_per_projection); // ANGLE BETWEEN PROJECTIONS [degrees] (360/num_projections for full CT) !!DBTv1.4!!
source_data[0].angle_per_projection = source_data[0].angle_per_projection*DEG2RAD; // store the angle in radians
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].angle_offset); // ANGULAR ROTATION TO FIRST PROJECTION (USEFUL FOR DBT, INPUT SOURCE DIRECTION CONSIDERED AS 0 DEGREES) [degrees] !!DBTv1.4!!
source_data[0].angle_offset = source_data[0].angle_offset*DEG2RAD;
double wx,wy,wz,norm;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf %lf %lf", &wx, &wy, &wz); // AXIS OF ROTATION !!DBTv1.4!!
norm = 1.0/sqrt(wx*wx+wy*wy+wz*wz);
source_data[0].axis_of_rotation.x = (float) wx*norm;
source_data[0].axis_of_rotation.y = (float) wy*norm;
source_data[0].axis_of_rotation.z = (float) wz*norm;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", translation_helical); // TRANSLATION ALONG ROTATION AXIS BETWEEN PROJECTIONS (HELICAL SCAN) [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // KEEP DETECTOR FIXED AT 0 DEGREES FOR DBT? [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
*flag_detectorFixed = true;
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
*flag_detectorFixed = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO to KEEP DETECTOR FIXED AT 0 DEGREES FOR DBT.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // SIMULATE BOTH 0 deg PROJECTION AND TOMOGRAPHIC SCAN (WITHOUT GRID) WITH 2/3 TOTAL NUM HIST IN 1st PROJ (eg, DBT+mammo)? [YES/NO] !!DBTv1.4!!
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
*flag_simulateMammoAfterDBT = true;
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
*flag_simulateMammoAfterDBT = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO to SIMULATE BOTH FIRST PROJECTION AND TOMOGRAPHIC SCAN (WITHOUT GRID) WITH SAME NUM HIST (eg, DBT+mammo).\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (*num_projections==1)
*flag_simulateMammoAfterDBT=false; // Make sure the flag is always false if simulating a single projection
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION DOSE DEPOSITION v.2012-12-12] (MC-GPU v.1.3):
// Electrons are not transported and therefore we are approximating that the dose is equal to the KERMA (energy released by the photons alone).
// This approximation is acceptable when there is electronic equilibrium and when the range of the secondary electrons is shorter than the voxel size.
// Usually the doses will be acceptable for photon energies below 1 MeV. The dose estimates may not be accurate at the interface of low density volumes.
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION DOSE DEPOSITION v.2012-12-12\'!!\n");
exit(-2);
}
if (strstr(new_line,"SECTION DOSE DEPOSITION v.2011-02-18")!=NULL) // Detect previous version of input file
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Please update the input file to the new version of MC-GPU (v1.3)!!\n\n You simply have to change the input file text line:\n [SECTION DOSE DEPOSITION v.2011-02-18]\n\n for these two lines:\n [SECTION DOSE DEPOSITION v.2012-12-12]\n NO # TALLY MATERIAL DOSE? [YES/NO]\n\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION DOSE DEPOSITION v.2012-12-12")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // TALLY MATERIAL DOSE? [YES/NO] --> turn on/off the material dose tallied adding the Edep in each material, independently of the voxels.
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
{
*flag_material_dose = 1;
MASTER_THREAD printf(" Material dose deposition tally ENABLED.\n");
}
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
{
*flag_material_dose = 0; // -- NO: disabling tally
MASTER_THREAD printf(" Material dose deposition tally DISABLED.\n");
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the first two line of \'SECTION DOSE DEPOSITION\' to enable or disable the material dose and 3D voxel dose tallies.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // TALLY 3D VOXEL DOSE? [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
{
// -- YES: using the tally
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); trim_name(new_line, file_dose_output); // OUTPUT DOSE FILE NAME (no spaces)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_x_min, dose_ROI_x_max); // # VOXELS TO TALLY DOSE: X-index min max (first voxel has index 1)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_y_min, dose_ROI_y_max); // # VOXELS TO TALLY DOSE: Y-index min max
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_z_min, dose_ROI_z_max); // # VOXELS TO TALLY DOSE: Z-index min max
*dose_ROI_x_min -= 1; *dose_ROI_x_max -= 1; // -Re-scale input coordinates to have index=0 for the first voxel instead of 1.
*dose_ROI_y_min -= 1; *dose_ROI_y_max -= 1;
*dose_ROI_z_min -= 1; *dose_ROI_z_max -= 1;
MASTER_THREAD printf(" 3D voxel dose deposition tally ENABLED.\n");
if ( ((*dose_ROI_x_min)>(*dose_ROI_x_max)) || ((*dose_ROI_y_min)>(*dose_ROI_y_max)) || ((*dose_ROI_z_min)>(*dose_ROI_z_max)) ||
(*dose_ROI_x_min)<0 || (*dose_ROI_y_min)<0 || (*dose_ROI_z_min)<0 )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input region-of-interst in \'SECTION DOSE DEPOSITION\' is not valid: the minimum voxel index may not be zero or larger than the maximum index.\n");
MASTER_THREAD printf( " Input data = X[%d,%d], Y[%d,%d], Z[%d,%d]\n\n", *dose_ROI_x_min+1, *dose_ROI_x_max+1, *dose_ROI_y_min+1, *dose_ROI_y_max+1, *dose_ROI_z_min+1, *dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if ( ((*dose_ROI_x_min)==(*dose_ROI_x_max)) && ((*dose_ROI_y_min)==(*dose_ROI_y_max)) && ((*dose_ROI_z_min)==(*dose_ROI_z_max)) )
{
MASTER_THREAD printf("\n\n !!read_input!! According to the input region-of-interest in \'SECTION DOSE DEPOSITION\', only the dose in the voxel (%d,%d,%d) will be tallied.\n\n",*dose_ROI_x_min,*dose_ROI_y_min,*dose_ROI_z_min);
}
}
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
{
// -- NO: disabling tally
MASTER_THREAD printf(" 3D voxel dose deposition tally DISABLED.\n");
*dose_ROI_x_min = (short int) 32500; *dose_ROI_x_max = (short int) -32500; // Set absurd values for the ROI to make sure we never get any dose tallied
*dose_ROI_y_min = (short int) 32500; *dose_ROI_y_max = (short int) -32500; // (the maximum values for short int variables are +-32768)
*dose_ROI_z_min = (short int) 32500; *dose_ROI_z_max = (short int) -32500;
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the first two line of \'SECTION DOSE DEPOSITION\' to enable or disable the material dose and 3D voxel dose tallies.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
MASTER_THREAD printf("\n");
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION VOXELIZED GEOMETRY FILE v.2017-07-26] // !!v1.5bitree!! // 2016-12-02] // !!DBTv1.4!! // Previous version: v.2009-11-30
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION VOXELIZED GEOMETRY FILE v.2017-07-26\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION VOXELIZED GEOMETRY FILE v.2017-07-26")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
trim_name(new_line, file_name_voxels); // VOXEL GEOMETRY FILE (penEasy 2008 format)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &voxel_data->offset.x, &voxel_data->offset.y, &voxel_data->offset.z); // OFFSET OF THE VOXEL GEOMETRY (DEFAULT ORIGIN AT LOWER BACK CORNER) [cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d %d %d", &voxel_data->num_voxels.x, &voxel_data->num_voxels.y, &voxel_data->num_voxels.z); // NUMBER OF VOXELS: INPUT A "0" TO READ ASCII FORMAT WITH HEADER SECTION, RAW VOXELS WILL BE READ OTHERWISE !!DBTv1.4!!
if (voxel_data->num_voxels.x<1 || voxel_data->num_voxels.y<1 || voxel_data->num_voxels.z<1)
voxel_data->num_voxels.x = -1; // Indicate to read ASCII format geometry: geometric parameters will be read from the header file !!DBTv1.4!! !!DeBuG!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &voxel_data->voxel_size.x, &voxel_data->voxel_size.y, &voxel_data->voxel_size.z); // VOXEL SIZES [cm] !!DBTv1.4!!
voxel_data->inv_voxel_size.x = 1.0f/voxel_data->voxel_size.x;
voxel_data->inv_voxel_size.y = 1.0f/voxel_data->voxel_size.y;
voxel_data->inv_voxel_size.z = 1.0f/voxel_data->voxel_size.z;
int split_x=-1, split_y=-1, split_z=-1;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); //!!bitree!! v1.5b
sscanf(new_line, "%d %d %d", &split_x, &split_y, &split_z); // SIZE OF LOW RESOLUTION VOXELS THAT WILL BE DESCRIBED BY A BINARY TREE, GIVEN AS POWERS OF TWO (eg, 2 2 3 = 2^2x2^2x2^3 = 128 input voxels per low res voxel; 0 0 0 disables tree) //!!bitre!! v1.5b
if( (split_x+split_y+split_z)==0 || split_x<0 || split_y<0 || split_z<0)
{
// Disable bitree generation if a negative value or all zeros are entered:
voxel_data->num_voxels_coarse.x = voxel_data->num_voxels_coarse.y = voxel_data->num_voxels_coarse.z = (unsigned char) 0;
}
else
{
voxel_data->num_voxels_coarse.x = (unsigned char) min_value(pow(2, split_x), 256); // Limit max size coarse voxel and void overflow // !!bitree!! v1.5b
voxel_data->num_voxels_coarse.y = (unsigned char) min_value(pow(2, split_y), 256);
voxel_data->num_voxels_coarse.z = (unsigned char) min_value(pow(2, split_z), 256);
// voxel_data->num_voxels_LowRes.x = (int)((float)voxel_data->num_voxels.x/(float)voxel_data->num_voxels_coarse.x + 0.99f); // !!bitree!! v1.5b
// voxel_data->num_voxels_LowRes.y = (int)((float)voxel_data->num_voxels.y/(float)voxel_data->num_voxels_coarse.y + 0.99f);
// voxel_data->num_voxels_LowRes.z = (int)((float)voxel_data->num_voxels.z/(float)voxel_data->num_voxels_coarse.z + 0.99f);
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION MATERIAL FILE LIST v.2009-11-30]
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION MATERIAL FILE LIST v.2009-11-30\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION MATERIAL FILE LIST v.2009-11-30")==NULL); // Skip comments and empty lines until the section begins
int i;
for (i=0; i<MAX_MATERIALS; i++)
{
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
file_name_materials[i][0]='\n'; // The input file is allowed to finish without defining all the materials
else
trim_name(new_line, file_name_materials[i]);
}
// [Finish reading input file]
/////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////
// *** Set the detector center in front of the source: !!DBTv1.4!!
detector_data[0].center.x = source_data[0].position.x + source_data[0].direction.x * detector_data[0].sdd;
detector_data[0].center.y = source_data[0].position.y + source_data[0].direction.y * detector_data[0].sdd;
detector_data[0].center.z = source_data[0].position.z + source_data[0].direction.z * detector_data[0].sdd;
// *** Set the rotation that will bring the particles from the actual detector plane to the default detector plane at +Y=(0,+1,0) where pixelization is simpler: !!DBTv1.4!!
// Essentially I just need to invert the Euler angles and the order of the rotations given for the source, and move the detector a distance SDD in the direction of the cone beam:
detector_data[0].rot_inv[0] = 1; detector_data[0].rot_inv[1] = 0; detector_data[0].rot_inv[2] = 0; // Init rotation matrix to identity
detector_data[0].rot_inv[3] = 0; detector_data[0].rot_inv[4] = 1; detector_data[0].rot_inv[5] = 0;
detector_data[0].rot_inv[6] = 0; detector_data[0].rot_inv[7] = 0; detector_data[0].rot_inv[8] = 1;
create_rotation_matrix_around_axis(-rotZ3*DEG2RAD, 0, 0, 1, detector_data[0].rot_inv); // Inverse 3rd rotation around Z !!DBTv1.4!!
create_rotation_matrix_around_axis(-rotY2*DEG2RAD, 0, 1, 0, detector_data[0].rot_inv); // Inverse 2nd rotation around Y !!DBTv1.4!!
create_rotation_matrix_around_axis(-rotZ1*DEG2RAD, 0, 0, 1, detector_data[0].rot_inv); // Inverse 1st rotation around Z !!DBTv1.4!!
MASTER_THREAD printf(" Rotations from the detector plane to default detector plane at Y=0 [deg]: rotZ=%f , rotY=%f , rotZ=%f\n", -rotZ3, -rotY2, -rotZ1);
/////////////////////////////////////////////////////////////////////////////
// *** Allocate array for the 4 detected images (non-scattered, Compton, Rayleigh, multiple-scatter):
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
*image_bytes = 4 * pixels_per_image * sizeof(unsigned long long int);
(*image_ptr) = (unsigned long long int*) malloc(*image_bytes);
if (*image_ptr==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d pixels for the 4 scatter images (%f Mbytes)!!\n\n", pixels_per_image, (*image_bytes)/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD printf(" Array for 4 scatter images correctly allocated (%d pixels, %f Mbytes)\n", pixels_per_image, (*image_bytes)/(1024.f*1024.f));
}
// *** Initialize the images to 0 in the CPU. The CUDA code will init it to 0 in the GPU global memory later, using kernel "init_image_array_GPU".
memset(*image_ptr, 0, (*image_bytes)); // Init memory space to 0.
// *** Allocate dose and dose^2 array if tally active:
int num_voxels_ROI = ((int)(*dose_ROI_x_max - *dose_ROI_x_min + 1)) * ((int)(*dose_ROI_y_max - *dose_ROI_y_min + 1)) * ((int)(*dose_ROI_z_max - *dose_ROI_z_min + 1));
if ((*dose_ROI_x_max)>-1)
{
*voxels_Edep_bytes = num_voxels_ROI * sizeof(ulonglong2);
(*voxels_Edep_ptr) = (ulonglong2*) malloc(*voxels_Edep_bytes);
if (*voxels_Edep_ptr==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d voxels for the deposited dose (and uncertainty) array (%f Mbytes)!!\n\n", num_voxels_ROI, (*voxels_Edep_bytes)/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD printf(" Array for the deposited dose ROI (and uncertainty) correctly allocated (%d voxels, %f Mbytes)\n", num_voxels_ROI, (*voxels_Edep_bytes)/(1024.f*1024.f));
}
}
else
{
(*voxels_Edep_bytes) = 0;
}
// *** Initialize the voxel dose to 0 in the CPU. Not necessary for the CUDA code if dose matrix init. in the GPU global memory using a GPU kernel, but needed if using hipMemcpy.
if ((*dose_ROI_x_max)>-1)
{
memset(*voxels_Edep_ptr, 0, (*voxels_Edep_bytes)); // Init memory space to 0.
}
return;
}
////////////////////////////////////////////////////////////////////////////////
//! Extract a file name from an input text line, trimming the initial blanks,
//! trailing comment (#) and stopping at the first blank (the file name should
//! not contain blanks).
//!
//! @param[in] input_line Input sentence with blanks and a trailing comment
//! @param[out] file_name Trimmed file name
////////////////////////////////////////////////////////////////////////////////
void trim_name(char* input_line, char* file_name)
{
int a=0, b=0;
// Discard initial blanks:
while(' '==input_line[a])
{
a++;
}
// Read file name until a blank or a comment symbol (#) is found:
while ((' '!=input_line[a])&&('#'!=input_line[a]))
{
file_name[b] = input_line[a];
b++;
a++;
}
file_name[b] = '\0'; // Terminate output string
}
////////////////////////////////////////////////////////////////////////////////
//! Read a line of text and trim initial blancks and trailing comments (#).
//!
//! @param[in] num Characters to read
//! @param[in] file_ptr Pointer to the input file stream
//! @param[out] trimmed_line Trimmed line from input file, skipping empty lines and comments
////////////////////////////////////////////////////////////////////////////////
char* fgets_trimmed(char* trimmed_line, int num, FILE* file_ptr)
{
char new_line[250];
char *new_line_ptr = NULL;
int a=0, b=0;
trimmed_line[0] = '\0'; // Init with a mark that means no file input
do
{
a=0; b=0;
new_line_ptr = fgets(new_line, num, file_ptr); // Read new line
if (new_line_ptr != NULL)
{
// Discard initial blanks:
while(' '==new_line[a])
{
a++;
}
// Read file until a comment symbol (#) or end-of-line are found:
while (('\n'!=new_line[a])&&('#'!=new_line[a]))
{
trimmed_line[b] = new_line[a];
b++;
a++;
}
}
} while(new_line_ptr!=NULL && '\0'==trimmed_line[0]); // Keep reading lines until end-of-file or a line that is not empty or only comment is found
trimmed_line[b] = '\0'; // Terminate output string
return new_line_ptr;
}
////////////////////////////////////////////////////////////////////////////////
//! Read the voxel data and allocate the material and density matrix.
//! Also find and report the maximum density defined in the geometry.
//!
// -- Sample voxel geometry file:
//
// # (comment lines...)
// #
// # Voxel order: X runs first, then Y, then Z.
// #
// [SECTION VOXELS HEADER v.2008-04-13]
// 411 190 113 No. OF VOXELS IN X,Y,Z
// 5.000e-02 5.000e-02 5.000e-02 VOXEL SIZE (cm) ALONG X,Y,Z
// 1 COLUMN NUMBER WHERE MATERIAL ID IS LOCATED
// 2 COLUMN NUMBER WHERE THE MASS DENSITY IS LOCATED
// 1 BLANK LINES AT END OF X,Y-CYCLES (1=YES,0=NO)
// [END OF VXH SECTION]
// 1 0.00120479
// 1 0.00120479
// ...
//
//! @param[in] file_name_voxels Name of the voxelized geometry file.
//! @param[out] density_max Array with the maximum density for each material in the voxels.
//! @param[out] voxel_data Pointer to a structure containing the voxel number and size.
//! @param[out] voxel_mat_dens_ptr Pointer to the vector with the voxel materials and densities.
//! @param[in] dose_ROI_x/y/z_max Size of the dose ROI: can not be larger than the total number of voxels in the geometry.
////////////////////////////////////////////////////////////////////////////////
// void load_voxels(int myID, char* file_name_voxels, float* density_max, struct voxel_struct* voxel_data, float2** voxel_mat_dens_ptr, unsigned int* voxel_mat_dens_bytes, short int* dose_ROI_x_max, short int* dose_ROI_y_max, short int* dose_ROI_z_max)
void load_voxels(int myID, char* file_name_voxels, float* density_max, struct voxel_struct* voxel_data, int** voxel_mat_dens_ptr, long long int* voxel_mat_dens_bytes, short int* dose_ROI_x_max, short int* dose_ROI_y_max, short int* dose_ROI_z_max) //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
{
char new_line[250];
char *new_line_ptr = NULL;
MASTER_THREAD if (strstr(file_name_voxels,".zip")!=NULL)
printf("\n\n -- WARNING load_voxels! The input voxel file name has the extension \'.zip\'. Only \'.gz\' compression is allowed!!\n\n"); // !!zlib!!
gzFile file_ptr = gzopen(file_name_voxels, "rb"); // Open the file with zlib: the file can be compressed with gzip or uncompressed. !!zlib!!
if (file_ptr==NULL)
{
printf("\n\n !! fopen ERROR load_voxels!! File %s does not exist!!\n", file_name_voxels);
exit(-2);
}
MASTER_THREAD
{
printf("\n -- Reading voxel file \'%s\':\n",file_name_voxels);
if (strstr(file_name_voxels,".gz")==NULL)
printf(" (note that MC-GPU can also read voxel and material files compressed with gzip)\n"); // !!zlib!!
fflush(stdout);
}
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (new_line_ptr==NULL)
{
MASTER_THREAD printf("\n\n !!Reading ERROR load_voxels!! File is not readable or does not contain the string \'[SECTION VOXELS HEADER\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[SECTION VOXELS")==NULL); // Skip comments and empty lines until the header begins
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!! // Read full line (max. 250 characters).
sscanf(new_line, "%d %d %d",&voxel_data->num_voxels.x, &voxel_data->num_voxels.y, &voxel_data->num_voxels.z);
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "%f %f %f", &voxel_data->voxel_size.x, &voxel_data->voxel_size.y, &voxel_data->voxel_size.z);
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (new_line_ptr==NULL)
{
MASTER_THREAD printf("\n\n !!Reading ERROR load_voxels!! File is not readable or does not contain the string \'[END OF VXH SECTION]\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[END OF VXH SECTION")==NULL); // Skip rest of the header
// -- Store the size of the voxel bounding box (used in the source function):
voxel_data->size_bbox.x = voxel_data->num_voxels.x * voxel_data->voxel_size.x;
voxel_data->size_bbox.y = voxel_data->num_voxels.y * voxel_data->voxel_size.y;
voxel_data->size_bbox.z = voxel_data->num_voxels.z * voxel_data->voxel_size.z;
MASTER_THREAD
{
printf(" Number of voxels in the input geometry file: %d x %d x %d = %d\n", voxel_data->num_voxels.x, voxel_data->num_voxels.y, voxel_data->num_voxels.z, (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z));
printf(" Size of the input voxels: %f x %f x %f cm (voxel volume=%f cm^3)\n", voxel_data->voxel_size.x, voxel_data->voxel_size.y, voxel_data->voxel_size.z, voxel_data->voxel_size.x*voxel_data->voxel_size.y*voxel_data->voxel_size.z);
printf(" Voxel bounding box size: %f x %f x %f cm\n", voxel_data->size_bbox.x, voxel_data->size_bbox.y, voxel_data->size_bbox.z);
printf(" Voxel geometry offset: %f, %f, %f cm\n", voxel_data->offset.x, voxel_data->offset.y, voxel_data->offset.z); // !!DBTv1.4!!
}
if (*dose_ROI_x_max > -1) // Check if tally not disabled
{
// -- Make sure the input number of voxels in the vox file is compatible with the input dose ROI (ROI assumes first voxel is index 0):
if ( (*dose_ROI_x_max+1)>(voxel_data->num_voxels.x) || (*dose_ROI_y_max+1)>(voxel_data->num_voxels.y) || (*dose_ROI_z_max+1)>(voxel_data->num_voxels.z) )
{
MASTER_THREAD printf("\n The input region of interest for the dose deposition is larger than the size of the voxelized geometry:\n");
*dose_ROI_x_max = min_value(voxel_data->num_voxels.x-1, *dose_ROI_x_max);
*dose_ROI_y_max = min_value(voxel_data->num_voxels.y-1, *dose_ROI_y_max);
*dose_ROI_z_max = min_value(voxel_data->num_voxels.z-1, *dose_ROI_z_max);
MASTER_THREAD printf( " updating the ROI max limits to fit the geometry -> dose_ROI_max=(%d, %d, %d)\n", *dose_ROI_x_max+1, *dose_ROI_y_max+1, *dose_ROI_z_max+1); // Allowing the input of an ROI larger than the voxel volume: in this case some of the allocated memory will be wasted but the program will run ok.
}
if ( (*dose_ROI_x_max+1)==(voxel_data->num_voxels.x) && (*dose_ROI_y_max+1)==(voxel_data->num_voxels.y) && (*dose_ROI_z_max+1)==(voxel_data->num_voxels.z) )
MASTER_THREAD printf(" The voxel dose tally ROI covers the entire voxelized phantom: the dose to every voxel will be tallied.\n");
else
MASTER_THREAD printf(" The voxel dose tally ROI covers only a fraction of the voxelized phantom: the dose to voxels outside the ROI will not be tallied.\n");
}
// -- Store the inverse of the pixel sides (in cm) to speed up the particle location in voxels.
voxel_data->inv_voxel_size.x = 1.0f/(voxel_data->voxel_size.x);
voxel_data->inv_voxel_size.y = 1.0f/(voxel_data->voxel_size.y);
voxel_data->inv_voxel_size.z = 1.0f/(voxel_data->voxel_size.z);
// -- Allocate the voxel matrix and store array size:
// *voxel_mat_dens_bytes = sizeof(float2)*(voxel_data->num_voxels.x)*(voxel_data->num_voxels.y)*(voxel_data->num_voxels.z);
// *voxel_mat_dens_ptr = (float2*) malloc(*voxel_mat_dens_bytes);
*voxel_mat_dens_bytes = sizeof(int)*(voxel_data->num_voxels.x)*(voxel_data->num_voxels.y)*(voxel_data->num_voxels.z); //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
*voxel_mat_dens_ptr = (int*) malloc(*voxel_mat_dens_bytes); //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
if (*voxel_mat_dens_ptr==NULL)
{
printf("\n\n !!malloc ERROR load_voxels!! Not enough memory to allocate %d voxels (%f Mbytes)!!\n\n", (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z), (*voxel_mat_dens_bytes)/(1024.f*1024.f));
exit(-2);
}
MASTER_THREAD printf("\n\n!!WARNING!! HARDCODED CONVERSION TABLE FROM MATERIAL NUMBER TO DENSITY (kernel function \"density_LUT\") !!DeBuG!!\n"); //!!DeBuG!! !!DeBuG!!
MASTER_THREAD printf( " The densities given in the input .vox file are not used in the actual simulation.\n\n"); //!!DeBuG!! !!DeBuG!!
MASTER_THREAD printf(" -- Initializing the voxel material vector (%f Mbytes). Each voxel density is fixed by its material number using a look up table; individual densities disregarded !!FixedDensity_DBT!!\n\n", (*voxel_mat_dens_bytes)/(1024.f*1024.f)); //!!FixedDensity_DBT!!
MASTER_THREAD fflush(stdout);
// -- Read the voxel densities:
// MASTER_THREAD printf(" Reading the voxel densities... ");
int i, j, k, read_lines=0, dummy_material, read_items = -99;
float dummy_density;
// float2 *voxels_ptr = *voxel_mat_dens_ptr;
int *voxels_ptr = *voxel_mat_dens_ptr; //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
for (k=0; k<MAX_MATERIALS; k++)
density_max[k] = -999.0f; // Init array with an impossible low density value
for(k=0; k<(voxel_data->num_voxels.z); k++)
{
for(j=0; j<(voxel_data->num_voxels.y); j++)
{
for(i=0; i<(voxel_data->num_voxels.x); i++)
{
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
}
while (('\n'==new_line[0])||('\n'==new_line[1])||('#'==new_line[0])||('#'==new_line[1])); // Skip empty lines and comments.
read_items = sscanf(new_line, "%d %f", &dummy_material, &dummy_density); // Read the next 2 numbers
if (read_items!=2)
printf("\n !!WARNING load_voxels!! Expecting to read 2 items (material and density). read_items=%d, read_lines=%d \n", read_items, read_lines);
if (dummy_material>MAX_MATERIALS)
{
printf("\n\n !!ERROR load_voxels!! Voxel material number too high!! #mat=%d, MAX_MATERIALS=%d, voxel number=%d\n\n", dummy_material, MAX_MATERIALS, read_lines+1);
exit(-2);
}
if (dummy_material<1)
{
printf("\n\n !!ERROR load_voxels!! Voxel material number can not be zero or negative!! #mat=%d, voxel number=%dd\n\n", dummy_material, read_lines+1);
exit(-2);
}
if (dummy_density < 1.0e-9f)
{
printf("\n\n !!ERROR load_voxels!! Voxel density can not be 0 or negative: #mat=%d, density=%f, voxel number=%d\n\n", dummy_material, dummy_density, read_lines+1);
exit(-2);
}
if (dummy_density > density_max[dummy_material-1])
density_max[dummy_material-1] = dummy_density; // Store maximum density for each material
// (*voxels_ptr).x = (float)(dummy_material)+0.0001f; // Assign material value as float (the integer value will be recovered by truncation)
// (*voxels_ptr).y = dummy_density; // Assign density values
(*voxels_ptr) = (int)(dummy_material-1); // Assign material value as char, starting at 0 //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"; density taken from look up table
voxels_ptr++; // Move to next voxel
read_lines++;
}
}
}
MASTER_THREAD printf(" Total number of voxels read: %d\n",read_lines);
gzclose(file_ptr); // Close input file !!zlib!!
}
////////////////////////////////////////////////////////////////////////////////
//! Read the material input files and set the mean free paths and the "linear_interp" structures.
//! Find the material nominal density. Set the Woodcock trick data.
//
// -- Sample material data file (data obtained from the PENELOPE 2006 database and models):
//
// [MATERIAL NAME]
// Water
// [NOMINAL DENSITY (g/cm^3)]
// 1.000
// [NUMBER OF DATA VALUES]
// 4096
// [MEAN FREE PATHS :: Energy (eV) || Rayleigh | Compton | Photoelectric | Pair-production | TOTAL (cm)]
// 1.00000E+03 7.27451E-01 9.43363E+01 2.45451E-04 1.00000E+35 2.45367E-04
// 5.00000E+03 1.80004E+00 8.35996E+00 2.38881E-02 1.00000E+35 2.35089E-02
// 1.00000E+04 4.34941E+00 6.26746E+00 2.02568E-01 1.00000E+35 1.87755E-01
// ...
// #[RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database)]
// ...
// #[COMPTON INTERACTIONS (relativistic impulse model with approximated one-electron analytical profiles)]
// ...
//
//! @param[in] file_name_materials Array with the names of the material files.
//! @param[in] density_max maximum density in the geometry (needed to set Woodcock trick)
//! @param[out] density_nominal Array with the nominal density of the materials read
//! @param[out] mfp_table_data Constant values for the linear interpolation
//! @param[out] mfp_table_a_ptr First element for the linear interpolation.
//! @param[out] mfp_table_b_ptr Second element for the linear interpolation.
////////////////////////////////////////////////////////////////////////////////
void load_material(int myID, char file_name_materials[MAX_MATERIALS][250], float* density_max, float* density_nominal, struct linear_interp* mfp_table_data, float2** mfp_Woodcock_table_ptr, int* mfp_Woodcock_table_bytes, float3** mfp_table_a_ptr, float3** mfp_table_b_ptr, int* mfp_table_bytes, struct rayleigh_struct *rayleigh_table_ptr, struct compton_struct *compton_table_ptr)
{
char new_line[250];
char *new_line_ptr = NULL;
int mat, i, bin, input_num_values = 0, input_rayleigh_values = 0, input_num_shells = 0;
double delta_e=-99999.0;
// -- Init the number of shells to 0 for all materials
for (mat=0; mat<MAX_MATERIALS; mat++)
compton_table_ptr->noscco[mat] = 0;
// --Read the material data files:
MASTER_THREAD printf("\n -- Reading the material data files (MAX_MATERIALS=%d):\n", MAX_MATERIALS);
for (mat=0; mat<MAX_MATERIALS; mat++)
{
if ((file_name_materials[mat][0]=='\0') || (file_name_materials[mat][0]=='\n')) // Empty file name
continue; // Re-start loop for next material
MASTER_THREAD printf(" Mat %d: File \'%s\'\n", mat+1, file_name_materials[mat]);
// printf(" -- Reading material file #%d: \'%s\'\n", mat, file_name_materials[mat]);
gzFile file_ptr = gzopen(file_name_materials[mat], "rb"); // !!zlib!!
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR!! File %d \'%s\' does not exist!!\n", mat, file_name_materials[mat]);
exit(-2);
}
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // Read full line (max. 250 characters). // !!zlib!!
if (new_line_ptr==NULL)
{
printf("\n\n !!Reading ERROR!! File is not readable or does not contain the string \'[NOMINAL DENSITY\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[NOMINAL DENSITY")==NULL); // Skip rest of the header
// Read the material nominal density:
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "# %f", &density_nominal[mat]);
if (density_max[mat]>0) // Material found in the voxels
{
MASTER_THREAD printf(" Nominal density = %f g/cm^3; Max density in voxels = %f g/cm^3\n", density_nominal[mat], density_max[mat]);
}
else // Material NOT found in the voxels
{
MASTER_THREAD printf(" This material is not used in any voxel.\n");
// Do not lose time reading the data for materials not found in the voxels, except for the first one (needed to determine the size of the input data).
if (0 == mat)
density_max[mat] = 0.01f*density_nominal[mat]; // Assign a small but positive density; this material will not be used anyway.
else
continue; // Move on to next material
}
// --For the first material, set the number of energy values and allocate table arrays:
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "# %d", &input_num_values);
if (0==mat)
{
mfp_table_data->num_values = input_num_values;
MASTER_THREAD printf(" Number of energy values in the mean free path database: %d.\n", input_num_values);
// Allocate memory for the linear interpolation arrays:
*mfp_Woodcock_table_bytes = sizeof(float2)*input_num_values;
*mfp_Woodcock_table_ptr = (float2*) malloc(*mfp_Woodcock_table_bytes); // Allocate space for the 2 parameter table
*mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS;
*mfp_table_a_ptr = (float3*) malloc(*mfp_table_bytes); // Allocate space for the 4 MFP tables
*mfp_table_b_ptr = (float3*) malloc(*mfp_table_bytes);
*mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS;
if (input_num_values>MAX_ENERGYBINS_RAYLEIGH)
{
printf("\n\n !!load_material ERROR!! Too many energy bins (Input bins=%d): increase parameter MAX_ENERGYBINS_RAYLEIGH=%d!!\n\n", input_num_values, MAX_ENERGYBINS_RAYLEIGH);
exit(-2);
}
if ((NULL==*mfp_Woodcock_table_ptr)||(NULL==*mfp_table_a_ptr)||(NULL==*mfp_table_b_ptr))
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate the linear interpolation data: %d bytes!!\n\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes)));
exit(-2);
}
else
{
MASTER_THREAD printf(" Linear interpolation data correctly allocated (%f Mbytes)\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes))/(1024.f*1024.f));
}
for (i=0; i<input_num_values; i++)
{
(*mfp_Woodcock_table_ptr)[i].x = 99999999.99f; // Init this array with a huge MFP, the minimum values are calculated below
}
}
else // Materials after first
{
if (input_num_values != mfp_table_data->num_values)
{
printf("\n\n !!load_material ERROR!! Incorrect number of energy values given in material \'%s\': input=%d, expected=%d\n",file_name_materials[mat], input_num_values, mfp_table_data->num_values);
exit(-2);
}
}
// -- Read the mean free paths (and Rayleigh cumulative prob):
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
double d_energy, d_rayleigh, d_compton, d_photelectric, d_total_mfp, d_pmax, e_last=-1.0;
for (i=0; i<input_num_values; i++)
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %le %le %le %le %le %le", &d_energy, &d_rayleigh, &d_compton, &d_photelectric, &d_total_mfp, &d_pmax);
// Find and store the minimum total MFP at the current energy, for every material's maximum density:
float temp_mfp = d_total_mfp*(density_nominal[mat])/(density_max[mat]);
if (temp_mfp < (*mfp_Woodcock_table_ptr)[i].x)
(*mfp_Woodcock_table_ptr)[i].x = temp_mfp; // Store minimum total mfp [cm]
// Store the inverse MFP data points with [num_values rows]*[MAX_MATERIALS columns]
// Scaling the table to the nominal density so that I can re-scale in the kernel to the actual local density:
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].x = 1.0/(d_total_mfp*density_nominal[mat]); // inverse TOTAL mfp * nominal density
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].y = 1.0/(d_compton *density_nominal[mat]); // inverse Compton mfp * nominal density
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].z = 1.0/(d_rayleigh *density_nominal[mat]); // inverse Rayleigh mfp * nominal density
rayleigh_table_ptr->pmax[i*(MAX_MATERIALS)+mat] = d_pmax; // Store the maximum cumulative probability of atomic form factor F^2 for
if (0==i && 0==mat)
{
mfp_table_data->e0 = d_energy; // Store the first energy of the first material
}
if (0==i)
{
if (fabs(d_energy-mfp_table_data->e0)>1.0e-9)
{
printf("\n\n !!load_material ERROR!! Incorrect first energy value given in material \'%s\': input=%f, expected=%f\n", file_name_materials[mat], d_energy, mfp_table_data->e0);
exit(-2);
}
}
else if (1==i)
{
delta_e = d_energy-e_last;
}
else if (i>1)
{
if (((fabs((d_energy-e_last)-delta_e))/delta_e)>0.001) // Tolerate up to a 0.1% relative variation in the delta e (for each bin) to account for possible precission errors reading the energy values
{
printf(" !!ERROR reading material data!! The energy step between mean free path values is not constant!!\n (maybe not enough decimals given for the energy values)\n #value = %d, First delta: %f , New delta: %f, Energy: %f ; Rel.Dif=%f\n", i, delta_e, (d_energy-e_last), d_energy,((fabs((d_energy-e_last)-delta_e))/delta_e));
exit(-2);
}
}
e_last = d_energy;
}
if (0==mat) MASTER_THREAD printf(" Lowest energy first bin = %f eV, last bin = %f eV; bin width = %f eV\n", (mfp_table_data->e0), e_last, delta_e);
// -- Store the inverse of delta energy:
mfp_table_data->ide = 1.0f/delta_e;
// -- Store MFP data slope 'b' (.y for Woodcock):
for (i=0; i<(input_num_values-1); i++)
{
bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns
(*mfp_table_b_ptr)[bin].x = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].x - (*mfp_table_a_ptr)[bin].x) / delta_e;
(*mfp_table_b_ptr)[bin].y = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].y - (*mfp_table_a_ptr)[bin].y) / delta_e;
(*mfp_table_b_ptr)[bin].z = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].z - (*mfp_table_a_ptr)[bin].z) / delta_e;
}
// After maximum energy (last bin), assume constant slope:
(*mfp_table_b_ptr)[(input_num_values-1)*MAX_MATERIALS+mat] = (*mfp_table_b_ptr)[(input_num_values-2)*MAX_MATERIALS+mat];
// -- Rescale the 'a' parameter (.x for Woodcock) as if the bin started at energy = 0: we will not have to rescale to the bin minimum energy every time
for (i=0; i<input_num_values; i++)
{
d_energy = mfp_table_data->e0 + i*delta_e; // Set current bin lowest energy value
bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns
(*mfp_table_a_ptr)[bin].x = (*mfp_table_a_ptr)[bin].x - d_energy*(*mfp_table_b_ptr)[bin].x;
(*mfp_table_a_ptr)[bin].y = (*mfp_table_a_ptr)[bin].y - d_energy*(*mfp_table_b_ptr)[bin].y;
(*mfp_table_a_ptr)[bin].z = (*mfp_table_a_ptr)[bin].z - d_energy*(*mfp_table_b_ptr)[bin].z;
}
// -- Reading data for RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database):
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (gzeof(file_ptr)!=0) // !!zlib!!
{
printf("\n\n !!End-of-file ERROR!! Rayleigh data not found: \"#[DATA VALUES...\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line);
exit(-2);
}
}
while(strstr(new_line,"[DATA VALUES")==NULL); // Skip all lines until this text is found
new_line_ptr = gzgets(file_ptr, new_line, 250); // Read the number of data points in Rayleigh // !!zlib!!
sscanf(new_line, "# %d", &input_rayleigh_values);
if (input_rayleigh_values != NP_RAYLEIGH)
{
printf("\n\n !!ERROR!! The number of values for Rayleigh sampling is different than the allocated space: input=%d, NP_RAYLEIGH=%d. File=\'%s\'\n", input_rayleigh_values, NP_RAYLEIGH, file_name_materials[mat]);
exit(-2);
}
new_line_ptr = gzgets(file_ptr, new_line, 250); // Comment line: #[SAMPLING DATA FROM COMMON/CGRA/: X, P, A, B, ITL, ITU] // !!zlib!!
for (i=0; i<input_rayleigh_values; i++)
{
int itlco_tmp, ituco_tmp;
bin = NP_RAYLEIGH*mat + i;
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %e %e %e %e %d %d", &(rayleigh_table_ptr->xco[bin]), &(rayleigh_table_ptr->pco[bin]),
&(rayleigh_table_ptr->aco[bin]), &(rayleigh_table_ptr->bco[bin]),
&itlco_tmp, &ituco_tmp);
rayleigh_table_ptr->itlco[bin] = (unsigned char) itlco_tmp;
rayleigh_table_ptr->ituco[bin] = (unsigned char) ituco_tmp;
}
// printf(" -- Rayleigh sampling data read. Input values = %d\n",input_rayleigh_values);
// -- Reading COMPTON INTERACTIONS data (relativistic impulse model with approximated one-electron analytical profiles):
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (gzeof(file_ptr)!=0) // !!zlib!!
{
printf("\n\n !!End-of-file ERROR!! Compton data not found: \"[NUMBER OF SHELLS]\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line);
exit(-2);
}
}
while(strstr(new_line,"[NUMBER OF SHELLS")==NULL); // Skip all lines until this text is found
new_line_ptr = gzgets(file_ptr, new_line, 250);
sscanf(new_line, "# %d", &input_num_shells); // Read the NUMBER OF SHELLS
if (input_num_shells>MAX_SHELLS)
{
printf("\n\n !!ERROR!! Too many shells for Compton interactions in file \'%s\': input=%d, MAX_SHELLS=%d\n", file_name_materials[mat], input_num_shells, MAX_SHELLS);
exit(-2);
}
compton_table_ptr->noscco[mat] = input_num_shells; // Store number of shells for this material in structure
new_line_ptr = gzgets(file_ptr, new_line, 250); // Comment line: #[SHELL INFORMATION FROM COMMON/CGCO/: FCO, UICO, FJ0, KZCO, KSCO]
int kzco_dummy, ksco_dummy;
for (i=0; i<input_num_shells; i++)
{
bin = mat + i*MAX_MATERIALS;
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %e %e %e %d %d", &(compton_table_ptr->fco[bin]), &(compton_table_ptr->uico[bin]),
&(compton_table_ptr->fj0[bin]), &kzco_dummy, &ksco_dummy);
}
gzclose(file_ptr); // Material data read. Close the current material input file. // !!zlib!!
} // ["for" loop: continue with next material]
// -- Store Woodcock MFP slope in component '.y':
for (i=0; i<(mfp_table_data->num_values-1); i++)
(*mfp_Woodcock_table_ptr)[i].y = ((*mfp_Woodcock_table_ptr)[i+1].x - (*mfp_Woodcock_table_ptr)[i].x)/delta_e;
// -- Rescale the first parameter in component .x for Woodcock
for (i=0; i<mfp_table_data->num_values; i++)
{
(*mfp_Woodcock_table_ptr)[i].x = (*mfp_Woodcock_table_ptr)[i].x - (mfp_table_data->e0 + i*delta_e)*(*mfp_Woodcock_table_ptr)[i].y;
}
}
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
////////////////////////////////////////////////////////////////////////////////
//! Select and initialize the CUDA-enabled GPU that will be used in the simulation.
//! Allocates and copies the simulation data in the GPU global and constant memories.
//!
////////////////////////////////////////////////////////////////////////////////
void init_CUDA_device( int* gpu_id, int myID, int numprocs,
/*Variables to GPU constant memory:*/ struct voxel_struct* voxel_data, struct source_struct* source_data, struct source_energy_struct* source_energy_data, struct detector_struct* detector_data, struct linear_interp* mfp_table_data,
// /*Variables to GPU global memory:*/ float2* voxel_mat_dens, float2** voxel_mat_dens_device, unsigned int voxel_mat_dens_bytes,
/*Variables to GPU global memory:*/ int* voxel_mat_dens, int** voxel_mat_dens_device, long long int voxel_mat_dens_bytes, //!!FixedDensity_DBT!!
char* bitree, char** bitree_device, unsigned int bitree_bytes, //!!bitree!! v1.5b
unsigned long long int* image, unsigned long long int** image_device, int image_bytes,
float2* mfp_Woodcock_table, float2** mfp_Woodcock_table_device, int mfp_Woodcock_table_bytes,
float3* mfp_table_a, float3* mfp_table_b, float3** mfp_table_a_device, float3** mfp_table_b_device, int mfp_table_bytes,
struct rayleigh_struct* rayleigh_table, struct rayleigh_struct** rayleigh_table_device,
struct compton_struct* compton_table, struct compton_struct** compton_table_device,
struct detector_struct** detector_data_device, struct source_struct** source_data_device,
ulonglong2* voxels_Edep, ulonglong2** voxels_Edep_device, int voxels_Edep_bytes, short int* dose_ROI_x_min, short int* dose_ROI_x_max, short int* dose_ROI_y_min, short int* dose_ROI_y_max, short int* dose_ROI_z_min, short int* dose_ROI_z_max,
ulonglong2* materials_dose, ulonglong2** materials_dose_device, int flag_material_dose, int** seed_input_device, int* seed_input, int num_projections)
{
hipDeviceProp_t deviceProp;
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (0==deviceCount)
{
printf("\n !!ERROR!! No CUDA enabled GPU detected by thread #%d!!\n\n", myID);
exit(-1);
}
#ifdef USING_MPI
if (numprocs>1)
{
// *** Select the appropriate GPUs in the different workstations in the MPI hostfile:
// The idea is that each threads will wait for the previous thread to send a messages with its processor name and GPU id,
// then it will assign the current GPU, and finally it will notify the following thread:
const int NODE_NAME_LENGTH = 31;
char processor_name[NODE_NAME_LENGTH+1], previous_processor_name[NODE_NAME_LENGTH+1];
int resultlen = -1;
MPI_Get_processor_name(processor_name, &resultlen);
MPI_Status status;
int gpu_id_to_avoid = *gpu_id;
clock_t clock_start;
if (myID == (numprocs-1))
clock_start = clock();
// Unless we are the first thread, wait for a message from the previous thread:
// The MPI_Recv command will block the execution of the code until the previous threads have communicated and shared the appropriate information.
if (0!=myID)
{
MPI_Recv(previous_processor_name, NODE_NAME_LENGTH, MPI_CHAR, myID-1, 111, MPI_COMM_WORLD, &status); // Receive the processor name and gpu_id from the previous thread
// printf("\n -> MPI_Recv thread %d: gpu_id=%d, %s\n", myID, (int)previous_processor_name[NODE_NAME_LENGTH-1], previous_processor_name); fflush(stdout); //!!Verbose!!
}
// Compare the 30 first characters of the 2 names to see if we changed the node, except for the first thread that allways gets GPU 0:
if ((0==myID) || (0!=strncmp(processor_name, previous_processor_name, NODE_NAME_LENGTH-1)))
{
*gpu_id = 0; // Thread in a new node: assign to GPU 0:
}
else
{
// Current thread in the same node as the previous one: assign next GPU (previous GPU id given in element NODE_NAME_LENGTH-1 of the array)
*gpu_id = (int)previous_processor_name[NODE_NAME_LENGTH-1] + 1;
}
// Set the following GPU if this is the one to be skipped (given in the input file):
if (*gpu_id == gpu_id_to_avoid)
{
*gpu_id = *gpu_id + 1;
printf(" Skipping GPU %d in thread %d (%s), as selected in the input file: gpu_id=%d\n", gpu_id_to_avoid, myID, processor_name, *gpu_id); fflush(stdout);
}
//!!DeBuG!! MC-GPU_v1.4!! Skip GPUs connected to a monitor, if more GPUs available:
checkCudaErrors(hipGetDeviceProperties(&deviceProp, *gpu_id));
if (0!=deviceProp.kernelExecTimeoutEnabled) //!!DeBuG!!
{
if((*gpu_id)<(deviceCount-1)) //!!DeBuG!!
{
printf("\n ==> CUDA: GPU #%d is connected to a display and the CUDA driver would limit the kernel run time. Skipping this GPU!!\n", *gpu_id); //!!DeBuG!!
*gpu_id = (*gpu_id)+1; //!!DeBuG!!
}
}
// Send the processor and GPU id to the following thread, unless we are the last thread:
if (myID != (numprocs-1))
{
processor_name[NODE_NAME_LENGTH-1] = (char)(*gpu_id); // Store GPU number in the last element of the array
// printf(" <- MPI_Send thread %d: gpu_id=%d, %s\n", myID, (int)processor_name[NODE_NAME_LENGTH-1], processor_name); fflush(stdout); //!!Verbose!!
MPI_Send(processor_name, NODE_NAME_LENGTH, MPI_CHAR, myID+1, 111, MPI_COMM_WORLD); // Send processor name and gpu_id to the following thread (tag is the current thread id)
}
else
{
printf(" -- Time spent communicating between threads to determine the GPU id to use in each thread: %.6f s\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC); fflush(stdout);
}
}
#endif
if (*gpu_id>=deviceCount)
{
printf("\n\n !!WARNING!! The selected GPU number is too high, this device number does not exist!! GPU_id (starting at 0)=%d, deviceCount=%d\n", (*gpu_id), deviceCount); fflush(stdout);
if (numprocs==1)
{
*gpu_id = gpuGetMaxGflopsDeviceId();
printf(" Selecting the fastest GPU available using gpuGetMaxGflopsDeviceId(): GPU_id = %d\n\n", (*gpu_id)); fflush(stdout);
}
else
{
exit(-1);
}
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, *gpu_id)); // Re-load card properties in case we chaged gpu_id
if (deviceProp.major>99 || deviceProp.minor>99)
{
printf("\n\n\n !!ERROR!! The selected GPU device does not support CUDA!! GPU_id=%d, deviceCount=%d, compute capability=%d.%d\n\n\n", (*gpu_id), deviceCount, deviceProp.major,deviceProp.minor);
exit(-1);
}
checkCudaErrors(hipSetDevice(*gpu_id)); // Set the GPU device. (optionally use: cutGetMaxGflopsDeviceId())
if (deviceProp.major>1)
{
#ifdef LARGE_CACHE
// -- Compute capability > 1: set a large L1 cache for the global memory, reducing the size of the shared memory:
// hipFuncCachePreferShared: shared memory is 48 KB
// hipFuncCachePreferL1: shared memory is 16 KB
// hipFuncCachePreferNone: no preference
printf("\n ==> CUDA: LARGE_CACHE defined --> setting a large global memory cache (L1) and a small shared memory (hipFuncCachePreferL1).\n");
hipFuncSetCacheConfig(track_particles, hipFuncCachePreferL1); // -- Set a large cache instead of a large shared memory.
// #else
// -- Using default:
// printf("\n ==> CUDA: LARGE_CACHE not defined --> setting a large shared memory and a small global memory cache (hipFuncCachePreferShared).\n");
// hipFuncSetCacheConfig(track_particles, hipFuncCachePreferShared); // !!DeBuG!! Setting size of shared memory/global cache
#endif
}
// DISCONTINUED CUDA FUNCTION! register int GPU_cores = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount; // CUDA SDK function to get the number of GPU cores
// -- Reading the device properties:
#ifdef USING_MPI
printf("\n ==> CUDA (MPI process #%d): %d CUDA enabled GPU detected! Using device #%d: \"%s\"\n", myID, deviceCount, (*gpu_id), deviceProp.name);
#else
printf("\n ==> CUDA: %d CUDA enabled GPU detected! Using device #%d: \"%s\"\n", deviceCount, (*gpu_id), deviceProp.name);
#endif
// printf(" Compute capability: %d.%d, Number multiprocessors: %d, Number cores: %d\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount, GPU_cores);
printf(" Compute capability: %d.%d, Number multiprocessors: %d\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
printf(" Clock rate: %.2f GHz, Global memory: %.3f Mbyte, Constant memory: %.2f kbyte\n", deviceProp.clockRate*1.0e-6f, deviceProp.totalGlobalMem/(1024.f*1024.f), deviceProp.totalConstMem/1024.f);
printf(" Shared memory per block: %.2f kbyte, Registers per block: %.2f kbyte\n", deviceProp.sharedMemPerBlock/1024.f, deviceProp.regsPerBlock/1024.f);
int driverVersion = 0, runtimeVersion = 0;
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version: %d.%d, Runtime Version: %d.%d\n\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
if (0!=deviceProp.kernelExecTimeoutEnabled)
{
printf("\n\n\n !!WARNING!! The selected GPU is connected to a display and therefore CUDA driver will limit the kernel run time to 5 seconds and the simulation will likely fail!!\n");
printf( " You can fix this by executing the simulation in a different GPU (select number in the input file) or by turning off the window manager and using the text-only Linux shell.\n\n\n");
// exit(-1);
}
fflush(stdout);
clock_t clock_init = clock();
// -- Allocate the constant variables in the device:
checkCudaErrors(hipMemcpyToSymbol(voxel_data_CONST, voxel_data, sizeof(struct voxel_struct)));
checkCudaErrors(hipMemcpyToSymbol(source_energy_data_CONST, source_energy_data, sizeof(struct source_energy_struct)));
// Source, detector data now copied to global memory and transfered to shared memory in the kernel. OLD CODE: checkCudaErrors(hipMemcpyToSymbol(detector_data_CONST, detector_data, sizeof(struct detector_struct)));
checkCudaErrors(hipMemcpyToSymbol(mfp_table_data_CONST, mfp_table_data, sizeof(struct linear_interp)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_x_min_CONST, dose_ROI_x_min, sizeof(short int)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_x_max_CONST, dose_ROI_x_max, sizeof(short int)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_y_min_CONST, dose_ROI_y_min, sizeof(short int)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_y_max_CONST, dose_ROI_y_max, sizeof(short int)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_z_min_CONST, dose_ROI_z_min, sizeof(short int)));
checkCudaErrors(hipMemcpyToSymbol(dose_ROI_z_max_CONST, dose_ROI_z_max, sizeof(short int)));
double total_mem = sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp) + 6*sizeof(short int);
MASTER_THREAD printf(" ==> CUDA: Constant data successfully copied to the device. CONSTANT memory used: %lf kbytes (%.1lf%%)\n", total_mem/1024.0, 100.0*total_mem/deviceProp.totalConstMem);
// -- Allocate the device global memory:
if (*dose_ROI_x_max > -1) // Allocate dose array only if the tally is not disabled
{
checkCudaErrors(hipMalloc((void**) voxels_Edep_device, voxels_Edep_bytes));
if (*voxels_Edep_device==NULL)
{
printf("\n hipMalloc ERROR!! Error allocating the dose array on the device global memory!! (%lf Mbytes)\n", voxels_Edep_bytes/(1024.0*1024.0));
exit(-1);
}
}
checkCudaErrors(hipMalloc((void**) voxel_mat_dens_device, voxel_mat_dens_bytes));
checkCudaErrors(hipMalloc((void**) bitree_device, bitree_bytes)); //!!bitree!! v1.5b
checkCudaErrors(hipMalloc((void**) image_device, image_bytes));
checkCudaErrors(hipMalloc((void**) mfp_Woodcock_table_device, mfp_Woodcock_table_bytes));
checkCudaErrors(hipMalloc((void**) mfp_table_a_device, mfp_table_bytes));
checkCudaErrors(hipMalloc((void**) mfp_table_b_device, mfp_table_bytes));
checkCudaErrors(hipMalloc((void**) rayleigh_table_device, sizeof(struct rayleigh_struct)));
checkCudaErrors(hipMalloc((void**) compton_table_device, sizeof(struct compton_struct)));
checkCudaErrors(hipMalloc((void**) detector_data_device, num_projections*sizeof(struct detector_struct)));
checkCudaErrors(hipMalloc((void**) source_data_device, num_projections*sizeof(struct source_struct))); // The array of detectors, sources has "MAX_NUM_PROJECTIONS" elements but I am allocating only the used "num_projections" elements to the GPU
checkCudaErrors(hipMalloc((void**) seed_input_device, sizeof(int))); // Store latest random seed used in GPU in global memory to continue random sequence in consecutive projections. !!DBTv1.4!!
if (flag_material_dose==1)
checkCudaErrors(hipMalloc((void**) materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2))); // !!tally_materials_dose!!
total_mem = voxels_Edep_bytes + voxel_mat_dens_bytes + image_bytes + mfp_Woodcock_table_bytes + 2*mfp_table_bytes + sizeof(struct compton_struct) + sizeof(struct rayleigh_struct) + num_projections*(sizeof(struct detector_struct) + sizeof(struct source_struct)) + bitree_bytes;
if (*voxel_mat_dens_device==NULL || *image_device==NULL || *mfp_Woodcock_table_device==NULL || *mfp_table_a_device==NULL ||
*mfp_table_a_device==NULL || *rayleigh_table_device==NULL || *compton_table_device==NULL || *detector_data_device==NULL || *source_data_device==NULL)
{
printf("\n hipMalloc ERROR!! Device global memory not correctly allocated!! (%lf Mbytes)\n", total_mem/(1024.0*1024.0));
exit(-1);
}
else
{
MASTER_THREAD printf(" ==> CUDA: Device global memory correctly allocated. GLOBAL memory used: %lf Mbytes (%.1lf%%)\n", total_mem/(1024.0*1024.0), 100.0*total_mem/deviceProp.totalGlobalMem);
}
// --Copy the host memory to the device:
checkCudaErrors(hipMemcpy(*voxel_mat_dens_device, voxel_mat_dens, voxel_mat_dens_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*bitree_device, bitree, bitree_bytes, hipMemcpyHostToDevice)); //!!bitree!! v1.5b
checkCudaErrors(hipMemcpy(*mfp_Woodcock_table_device, mfp_Woodcock_table, mfp_Woodcock_table_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*mfp_table_a_device, mfp_table_a, mfp_table_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*mfp_table_b_device, mfp_table_b, mfp_table_bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*rayleigh_table_device, rayleigh_table, sizeof(struct rayleigh_struct), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*compton_table_device, compton_table, sizeof(struct compton_struct), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*detector_data_device, detector_data, num_projections*sizeof(struct detector_struct),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*source_data_device, source_data, num_projections*sizeof(struct source_struct), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(*seed_input_device, seed_input, sizeof(int), hipMemcpyHostToDevice)); // Upload initial seed value. !!DBTv1.4!!
// --Init the image array to 0 using a GPU kernel instead of hipMemcpy:
// Simple version: checkCudaErrors( hipMemcpy( image_device, image, image_bytes, hipMemcpyHostToDevice) );
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
MASTER_THREAD printf(" ==> CUDA: Launching kernel to initialize the device image to 0: number of blocks = %d, threads per block = 128\n", (int)(ceil(pixels_per_image/128.0f)+0.01f) );
hipLaunchKernelGGL(( init_image_array_GPU), dim3((int)(ceil(pixels_per_image/128.0f)+0.01f)),dim3(128), 0, 0, *image_device, pixels_per_image);
fflush(stdout);
hipDeviceSynchronize(); // Force the runtime to wait until all device tasks have completed
getLastCudaError("\n\n !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error:
// --Init the dose array to 0 using a GPU kernel, if the tally is not disabled:
if (*dose_ROI_x_max > -1)
{
MASTER_THREAD printf(" ==> CUDA: Initialize the device dose deposition to 0 using hipMemcpy.\n");
checkCudaErrors(hipMemcpy(*voxels_Edep_device, voxels_Edep, voxels_Edep_bytes, hipMemcpyHostToDevice) );
/* // -- OPTIONAL CODE: Launch kernel to initialize the device dose deposition to 0 (MAY FAIL IF DOSE MATRIX IS TOO BIG!) !!DeBuG!!
int num_voxels_dose = voxels_Edep_bytes/sizeof(ulonglong2); // Calculate the number of voxels in the dose array
int num_blocks, num_threads_block = 0;
// Select the number of threads per block making sure we don't try to launch more blocks than CUDA's maximum value:
do
{
num_threads_block += 64;
num_blocks = (int)(ceil(((double)num_voxels_dose)/((double)num_threads_block))+0.001);
}
while (num_blocks > 65500);
MASTER_THREAD printf(" ==> CUDA: Launching kernel to initialize the device dose deposition to 0: number of blocks = %d, threads per block = %d\n", num_blocks, num_threads_block);
init_dose_array_GPU<<<num_blocks,num_threads_block>>>(*voxels_Edep_device, num_voxels_dose);
hipDeviceSynchronize();
getLastCudaError("\n\n !!Kernel execution failed initializing the dose array!! "); // Check if kernel execution generated any error:
*/
}
// Init materials_dose array in GPU with 0 (same as host):
if (flag_material_dose==1)
checkCudaErrors(hipMemcpy(*materials_dose_device, materials_dose, MAX_MATERIALS*sizeof(ulonglong2), hipMemcpyHostToDevice)); // !!tally_materials_dose!!
MASTER_THREAD printf(" Time spent allocating and copying memory to the device: %.6f s\n", float(clock()-clock_init)/CLOCKS_PER_SEC);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Guestimate a good number of blocks to estimate the speed of different generations
//! of GPUs. Slower GPUs will simulate less particles and hopefully the fastest GPUs
//! will not have to wait much. If the speed is not accurately estimated in the speed test
//! some GPUs will simulate longer than others and valuable simulation time will be wasted
//! in the idle GPUs.
//!
//! In this function the "optimum" number of blocks for the speed test is heuristically
//! computed as the product of three GPU characteristics:
//! [2.0] * [number of GPU cores] * [core frequency] * [major CUDA compute capability] + [100]
//!
//! The factor 2.0 is arbitrary and can be modified depending on the case (for short
//! simulations this value may have to be reduced or the speed test will take longer
//! than the whole simulation). The constant 100 blocks are added to try to get enough
//! blocks for a reliable timing of slow GPUs.
//!
//! For example, an NVIDIA GeForce 290 will get:
//! 2.0 * 240 (cores) * 1.24 (GHz) * 1 (major compute capability) + 100 = 695.2 ~ 695 blocks
//! An NVIDIA GeForce 580 will get:
//! 2.0 * 512 (cores) * 1.54 (GHz) * 2 (major compute capability) + 100 = 3253.9 ~ 3254 blocks
//! In total the 580 gets 5.7 times more blocks than the 290.
//!
//! @param[in] gpu_id GPU number
//! @param[out] num_blocks Returns a number of blocks related to the expected GPU speed
////////////////////////////////////////////////////////////////////////////////
int guestimate_GPU_performance(int gpu_id)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, gpu_id);
// DISCONTINUED CUDA FUNCTION! float num_cores = (float) _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount;
float num_cores_aprox = 128 * deviceProp.multiProcessorCount; // I can't get the exact number of cores anymore; assume 128 per multiprocessor
float comp_capability = (float) deviceProp.major;
float frequency = deviceProp.clockRate*1.0e-6f;
int guestimated_value = (int)(0.5f*num_cores_aprox*frequency*comp_capability + 64.0f);
return min_value(guestimated_value, 1024); // Limit the returned number of blocks to prevent too long speed tests !!DBT!!
}
#endif
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Report the tallied image in ASCII and binary form (32-bit floats).
//! Separate images for primary and scatter radiation are generated.
//!
//!
//! @param[in] file_name_output File where tallied image is reported
//! @param[in] detector_data Detector description read from the input file (pointer to detector_struct)
//! @param[in] image Tallied image (in meV per pixel)
//! @param[in] time_elapsed Time elapsed during the main loop execution (in seconds)
//! @param[in] total_histories Total number of x-rays simulated
////////////////////////////////////////////////////////////////////////////////
int report_image(char* file_name_output, struct detector_struct* detector_data, struct source_struct* source_data, float mean_energy_spectrum, unsigned long long int* image, double time_elapsed, unsigned long long int total_histories, int current_projection, int num_projections, int myID, int numprocs, double current_angle, int* seed_input)
{
int pixels_per_image = (detector_data[0].num_pixels.x*detector_data[0].num_pixels.y), pixel=0;
float* energy_noScatter_array = (float*) malloc(pixels_per_image*sizeof(float)); // Allocate temporary space to report the binary image at the end
float* energy_compton_array = (float*) malloc(pixels_per_image*sizeof(float));
float* energy_rayleigh_array = (float*) malloc(pixels_per_image*sizeof(float));
float* energy_multiscatter_array = (float*) malloc(pixels_per_image*sizeof(float));
// -- Report data:
printf("\n\n *** IMAGE TALLY PERFORMANCE REPORT ***\n");
if(num_projections!=1) // Output the projection angle when simulating a CT:
{
printf(" Tomographic projection %d of %d: acquisition angle = %lf \n", current_projection, num_projections, current_angle*RAD2DEG);
}
printf(" Simulated x rays: %lld\n", total_histories);
printf(" Simulation time [s]: %.2f\n", time_elapsed);
if (time_elapsed>0.000001)
printf(" Speed [x-rays/s]: %.2f\n\n", ((double)total_histories)/time_elapsed);
FILE* file_ptr = fopen(file_name_output, "w");
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_image!! File %s can not be opened!!\n", file_name_output);
exit(-3);
}
// -- Report the whole image in ASCII text only if the file name contains ".dat". Otherwise, save disc space by writting only the header in ASCII and the data in raw format: // !!DBTv1.4!!
bool flag_report_ASCII = false;
if (strstr(file_name_output,".dat")!=NULL)
flag_report_ASCII = true; // !!DBTv1.4!!
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
fprintf(file_ptr, "# *** ***\n");
fprintf(file_ptr, "# *** Andreu Badal ([email protected]) ***\n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# \n");
#ifdef USING_CUDA
fprintf(file_ptr, "# *** SIMULATION IN THE GPU USING CUDA ***\n");
#else
fprintf(file_ptr, "# *** SIMULATION IN THE CPU ***\n");
#endif
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Image created counting the energy arriving at each pixel: ideal energy integrating detector.\n");
int2 seed;
if (detector_data[0].gain_W<0.001f)
{
// Reporting the image in ideal energy fluence units:
fprintf(file_ptr, "# Pixel value units: eV/cm^2 per history (energy fluence).\n");
}
else
{
// Reporting the image in charge units (electrons) after sampling a number of electron-hole-pairs generated by the tallied energy deposition according to the input detector gain and Swank:
fprintf(file_ptr, "# Pixel value units: electrons/cm^2 per history (detected charge).\n"); //!!DETECTOR_RESPONSE!!
fprintf(file_ptr, "# Detector gain W_+- = %f eV/detected_charge\n", detector_data[0].gain_W);
fprintf(file_ptr, "# Swank factor = %f -> relative std_dev of gain = %f\n", 1.0f/(1.0f+detector_data[0].Swank_rel_std*detector_data[0].Swank_rel_std), detector_data[0].Swank_rel_std); // Swank_factor = mean^2/(mean^2 + std_dev^2) --> (std_dev/mean) = sqrt(1/Swank_factor - 1)
fprintf(file_ptr, "# Mean electronic noise per pixel = %f electrons\n", detector_data[0].electronic_noise);
fprintf(file_ptr, "# Conversion from the energy deposited in each pixel to charge is done sampling a Gaussian distribution with:\n");
fprintf(file_ptr, "# mean = E_deposited/gain_W + electronic_noise\n");
fprintf(file_ptr, "# std_dev^2 = mean*sqrt(1/Swank_factor - 1) + sqrt(electronic_noise)\n");
// Init random seed for the Gaussian sampling:
seed.x = *seed_input;
seed.y = *seed_input;
int l;
for (l=0; l<1234; l++)
{
double dummy = ranecu_double_CPU(&seed); // Waste a few thousand random values to make sure that we don't have problems by using the same seed for both MLCGs
}
}
fprintf(file_ptr, "#\n");
if(num_projections!=1) // Output the projection angle when simulating a CT:
{
// !!DBTv1.4!! First tomo projection is [1], proj [0] is 0 deg if flag_simulateMammoAfterDBT==true
fprintf(file_ptr, "# Tomographic projection %d of %d: acquisition angle = %lf \n", current_projection, num_projections, current_angle*RAD2DEG);
}
fprintf(file_ptr, "# Focal spot position = (%.8f,%.8f,%.8f), cone beam direction = (%.8f,%.8f,%.8f)\n", source_data[current_projection].position.x, source_data[current_projection].position.y, source_data[current_projection].position.z, source_data[current_projection].direction.x, source_data[current_projection].direction.y, source_data[current_projection].direction.z);
fprintf(file_ptr, "# Focal spot Gaussian blur FWHM = %f\n", source_data[current_projection].focal_spot_FWHM);
fprintf(file_ptr, "# Focal spot rotation blur = %f degrees\n", source_data[current_projection].rotation_blur*RAD2DEG);
if (detector_data[current_projection].grid_freq>0.0f)
fprintf(file_ptr, "# Antiscatter grid ratio = %f ; grid frequency = %f lines per cm\n", fabsf(detector_data[current_projection].grid_ratio), detector_data[current_projection].grid_freq); // !!DBTv1.5!!
else
fprintf(file_ptr, "# Antiscatter grid not used.\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# Pixel size: %lf x %lf = %lf cm^2\n", 1.0/(double)(detector_data[0].inv_pixel_size_X), 1.0/(double)(detector_data[0].inv_pixel_size_Z), 1.0/(double)(detector_data[0].inv_pixel_size_X*detector_data[0].inv_pixel_size_Z));
fprintf(file_ptr, "# Number of pixels in X and Z: %d %d\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y);
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# The image data is reported in binary format in the .raw files (each pixel given as 32-bit float, little-endian order).\n");
// SEPARATE SCATTER: fprintf(file_ptr, "# Five images are reported one after the other in each .raw file: all signal combined, non-scattered x-rays, Compton scatter, Rayleigh scatter, multiple-scatter.\n");
// NOT SEPARATING SCATTER: January 2018
fprintf(file_ptr, "# Two images are reported one after the other in each .raw file: all SCATTER and PRIMARIES combined and primary x-rays only (which includes additive electronic noise)\n");
fprintf(file_ptr, "# \n");
if (source_data[0].flag_halfConeX) // Sampling only half beam towards +X for mammo geometry! !!DBT!! !!HalfBeam!! !!DBTv1.4!!
{
fprintf(file_ptr, "# NOTE: \"flag_halfCone==true\" --> Image acquired with only half cone beam towards positive azimuthal angles, with beam offset to the edge of the image.\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
}
if (flag_report_ASCII)
{
fprintf(file_ptr, "# Order of pixel data below: X rows (width) first, blank line separates the different Z rows (height).\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# [NON-SCATTERED] [COMPTON] [RAYLEIGH] [MULTIPLE-SCATTING]\n");
fprintf(file_ptr, "# ==========================================================\n");
}
else
{
fprintf(file_ptr, "# Pixel data is provided only in the .raw files. To report the data in ASCII format, include the string \".dat\" in the input image file name.\n");
fprintf(file_ptr, "# \n");
}
// -- Prepare binary output:
char file_binary[250];
strncpy (file_binary, file_name_output, 250);
strcat(file_binary,".raw"); // !!BINARY!!
FILE* file_binary_ptr = fopen(file_binary, "w"); // !!BINARY!!
if (file_binary_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_image!! Binary file %s can not be opened for writing!!\n", file_binary);
exit(-3);
}
const double invSCALE = 1.0/SCALE_eV; // conversion to eV using the inverse of the constant used in the "tally_image" kernel function (defined in the header file)
const double invHIST = 1.0 / ((double)total_histories); // ==> [eV per history]
const double invAREA = detector_data[0].inv_pixel_size_X * detector_data[0].inv_pixel_size_Z; // ==> [eV/cm^2 per history]
double energy_noScatter, energy_compton, energy_rayleigh, energy_multiscatter;
double energy_integral = 0.0; // Integrate (add) the energy in the image pixels [meV]
double maximum_energy_pixel = -100.0; // Find maximum pixel signal
int maximum_energy_pixel_x=0, maximum_energy_pixel_y=0, maximum_energy_pixel_number=0;
int i, j;
for(j=0; j<detector_data[0].num_pixels.y; j++)
{
for(i=0; i<detector_data[0].num_pixels.x; i++)
{
pixel = i + j*detector_data[0].num_pixels.x; // Set current pixel
energy_noScatter = invSCALE*(double)(image[pixel]);
energy_compton = invSCALE*(double)(image[pixel + pixels_per_image]);
energy_rayleigh = invSCALE*(double)(image[pixel + 2*pixels_per_image]);
energy_multiscatter = invSCALE*(double)(image[pixel + 3*pixels_per_image]);
if (detector_data[0].gain_W<0.001f)
{
// Normalize detected energy by number of histories and pixel area ==> energy fluence per hist.
energy_noScatter = energy_noScatter * invHIST * invAREA;
energy_compton = energy_compton * invHIST * invAREA;
energy_rayleigh = energy_rayleigh * invHIST * invAREA;
energy_multiscatter = energy_multiscatter * invHIST * invAREA;
// -- Write the results in an external file; the image corresponding to all particles not written: it has to be infered adding all images
if (flag_report_ASCII)
fprintf(file_ptr, "%.8lf %.8lf %.8lf %.8lf\n", energy_noScatter, energy_compton, energy_rayleigh, energy_multiscatter); // Report energy fluence per history
// Keep data as a arrays to be output at the end in binary form:
energy_noScatter_array[pixel] = (float)energy_noScatter;
energy_compton_array[pixel] = (float)energy_compton;
energy_rayleigh_array[pixel] = (float)energy_rayleigh;
energy_multiscatter_array[pixel]= (float)energy_multiscatter;
}
else
{
//!!DETECTOR_RESPONSE!! Convert the detected energy to a random number of charges using a Gaussian distribution with variance == mean:
//!!DETECTOR_RESPONSE!! Additive electronic noise is being added only to the No Scatter results. I can't include it to each scatter image bc it would be counted 4 times in final image.
// Sample 4 gaussian distributed random variables (mean=0, std_dev=1):
double g1=0.0, g2=0.0, g3=0.0, g4=0.0;
gausspdf_double_CPU(&g1, &g2, &seed);
gausspdf_double_CPU(&g3, &g4, &seed);
// Conversion from the energy deposited in each pixel to charge is done sampling a Gaussian distribution with:
// mean = E_deposited/gain_W + electronic_noise
// std_dev^2 = mean*sqrt(1/Swank_factor - 1) + sqrt(electronic_noise)
double mean = energy_noScatter/detector_data[0].gain_W;
double std_dev = sqrt(mean*detector_data[0].Swank_rel_std); // Swank_rel_std = sqrt(1.0/Swank-1.0)
mean = mean + detector_data[0].electronic_noise; // Adding additive electronic noise
std_dev = std_dev + sqrt(detector_data[0].electronic_noise);
energy_noScatter = g1*std_dev + mean;
if (energy_noScatter<0.0) energy_noScatter = 0.0; // Prevent negative pixel values
mean = energy_compton/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_compton = g2*std_dev + mean;
if (energy_compton<0.0) energy_compton = 0.0;
mean = energy_rayleigh/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_rayleigh = g3*std_dev + mean;
if (energy_rayleigh<0.0) energy_rayleigh = 0.0;
mean = energy_multiscatter/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_multiscatter = g4*std_dev + mean;
if (energy_multiscatter<0.0) energy_multiscatter = 0.0;
if (flag_report_ASCII)
fprintf(file_ptr, "%d %d %d %d\n", (int)(energy_noScatter+0.5), (int)(energy_compton+0.5), (int)(energy_rayleigh+0.5), (int)(energy_multiscatter+0.5)); // Report collected charge
// Keep data as a arrays to be output at the end in binary form:
energy_noScatter_array[pixel] = (float)round(energy_noScatter); // Round the floating point value into an integral number of charges
energy_compton_array[pixel] = (float)round(energy_compton);
energy_rayleigh_array[pixel] = (float)round(energy_rayleigh);
energy_multiscatter_array[pixel]= (float)round(energy_multiscatter);
}
double total_energy_pixel = (double)(image[pixel] + image[pixel + pixels_per_image] + image[pixel + 2*pixels_per_image] + image[pixel + 3*pixels_per_image]); // Find and report the pixel with maximum signal
if (total_energy_pixel>maximum_energy_pixel)
{
maximum_energy_pixel = total_energy_pixel;
maximum_energy_pixel_x = i;
maximum_energy_pixel_y = j;
maximum_energy_pixel_number = pixel;
}
energy_integral += total_energy_pixel; // Count total energy in the whole image
}
if (flag_report_ASCII)
fprintf(file_ptr, "\n"); // Separate rows with an empty line for visualization with gnuplot.
}
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# *** Simulation REPORT: ***\n");
fprintf(file_ptr, "# Fraction of source energy detected (over the mean energy of the spectrum): %.3lf%%\n", 100.0*invSCALE*(energy_integral/(double)(total_histories))/(double)(mean_energy_spectrum));
fprintf(file_ptr, "# Maximum energy fluence detected in pixel %i: (x,y)=(%i,%i) -> pixel value = %lf eV/cm^2 per hist.\n", maximum_energy_pixel_number, maximum_energy_pixel_x, maximum_energy_pixel_y, maximum_energy_pixel*invSCALE*invAREA*invHIST);
fprintf(file_ptr, "# Simulated x rays: %lld\n", total_histories);
fprintf(file_ptr, "# Simulation time [s]: %.2f\n", time_elapsed);
if (time_elapsed>0.000001)
fprintf(file_ptr, "# Speed [x-rays/sec]: %.2f\n\n", ((double)total_histories)/time_elapsed);
fclose(file_ptr); // Close output file and flush stream
printf(" Fraction of initial energy arriving at the detector (over the mean energy of the spectrum): %.3lf%%\n", 100.0*invSCALE*(energy_integral/(double)(total_histories))/(double)(mean_energy_spectrum));
printf(" Maximum energy fluence detected in pixel %i: (x,y)=(%i,%i). Maximum pixel value = %lf eV/cm^2 per hist.\n\n", maximum_energy_pixel_number, maximum_energy_pixel_x, maximum_energy_pixel_y, maximum_energy_pixel*invSCALE*invAREA*invHIST);
fflush(stdout);
// Report binary data as consecutive images in a binary file:
for(i=0; i<pixels_per_image; i++)
{
float energy_float = energy_noScatter_array[i] + energy_compton_array[i] + energy_rayleigh_array[i] + energy_multiscatter_array[i];
fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); // Total image (scatter + primary)
}
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_noScatter_array[i], sizeof(float), 1, file_binary_ptr); // Non-scattered image
// NOT SEPARATING SCATTER: January 2018
/*
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_compton_array[i], sizeof(float), 1, file_binary_ptr); // Compton image
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_rayleigh_array[i], sizeof(float), 1, file_binary_ptr); // Rayleigh image
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_multiscatter_array[i], sizeof(float), 1, file_binary_ptr); // Multiple-scatter image
*/
// //!!DeBuG!! REPORT THE PIXEL ENERGY PRIMARY AFTER THE SAMPLED CHARGES FOR DEBUGGING; NOT NORMALIZED!
// for(i=0; i<pixels_per_image; i++)
// {
// float energy_float = (float)(image[i]*invSCALE); //*invHIST*invAREA);
// fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); //!!DeBuG!! !!DeBuG!!
// }
// for(i=0; i<pixels_per_image; i++)
// {
// float energy_float = (float)((image[i] + image[i + pixels_per_image] + image[i + 2*pixels_per_image] + image[i + 3*pixels_per_image])*invSCALE); //*invHIST*invAREA);
// fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); //!!DeBuG!! !!DeBuG!!
// }
fclose(file_binary_ptr);
free(energy_noScatter_array);
free(energy_compton_array);
free(energy_rayleigh_array);
free(energy_multiscatter_array);
return 0; // Report could return not 0 to continue the simulation...
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Report the total tallied 3D voxel dose deposition for all projections.
//! The voxel doses in the input ROI and their respective uncertainties are reported
//! in binary form (32-bit floats) in two separate .raw files.
//! The dose in a single plane at the level of the focal spot is also reported in
//! ASCII format for simple visualization with GNUPLOT.
//! The total dose deposited in each different material is reported to the standard output.
//! The material dose is calculated adding the energy deposited in the individual voxels
//! within the dose ROI, and dividing by the total mass of the material in the ROI.
//!
//! @param[in] file_dose_output File where tallied image is reported
//! @param[in] detector_data Detector description read from the input file (pointer to detector_struct)
//! @param[in] image Tallied image (in meV per pixel)
//! @param[in] time_elapsed Time elapsed during the main loop execution (in seconds)
//! @param[in] total_histories Total number of x-rays simulated
//! @param[in] source_data Data required to compute the voxel plane to report in ASCII format: Z at the level of the source, 1st projection
////////////////////////////////////////////////////////////////////////////////
// int report_voxels_dose(char* file_dose_output, int num_projections, struct voxel_struct* voxel_data, float2* voxel_mat_dens, ulonglong2* voxels_Edep, double time_total_MC_init_report, unsigned long long int total_histories, short int dose_ROI_x_min, short int dose_ROI_x_max, short int dose_ROI_y_min, short int dose_ROI_y_max, short int dose_ROI_z_min, short int dose_ROI_z_max, struct source_struct* source_data)
int report_voxels_dose(char* file_dose_output, int num_projections, struct voxel_struct* voxel_data, int* voxel_mat_dens, ulonglong2* voxels_Edep, double time_total_MC_init_report, unsigned long long int total_histories, short int dose_ROI_x_min, short int dose_ROI_x_max, short int dose_ROI_y_min, short int dose_ROI_y_max, short int dose_ROI_z_min, short int dose_ROI_z_max, struct source_struct* source_data) //!!FixedDensity_DBT!!
{
printf("\n\n *** VOXEL ROI DOSE TALLY REPORT ***\n\n");
FILE* file_ptr = fopen(file_dose_output, "w");
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_voxels_dose!! File %s can not be opened!!\n", file_dose_output);
exit(-3);
}
// -- Binary output: // !!BINARY!!
char file_binary_mean[250], file_binary_sigma[250];
strncpy (file_binary_mean, file_dose_output, 250);
strcat(file_binary_mean,".raw");
strncpy (file_binary_sigma, file_dose_output, 250);
strcat(file_binary_sigma,"_PercentRelError2sigma.raw");
FILE* file_binary_mean_ptr = fopen(file_binary_mean, "w"); // !!BINARY!!
FILE* file_binary_sigma_ptr = fopen(file_binary_sigma, "w"); // !!BINARY!!
if (file_binary_mean_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_voxels_dose!! Binary file %s can not be opened!!\n", file_dose_output);
exit(-3);
}
int DX = dose_ROI_x_max - dose_ROI_x_min + 1,
DY = dose_ROI_y_max - dose_ROI_y_min + 1,
DZ = dose_ROI_z_max - dose_ROI_z_min + 1;
// -- Calculate the dose plane that will be output as ASCII text:
int z_plane_dose = (int)(source_data[0].position.z * voxel_data->inv_voxel_size.z + 0.00001f); // Select voxel plane at the level of the source, 1st projections
if ( (z_plane_dose<dose_ROI_z_min) || (z_plane_dose>dose_ROI_z_max) )
z_plane_dose = (dose_ROI_z_max+dose_ROI_z_min)/2;
int z_plane_dose_ROI = z_plane_dose - dose_ROI_z_min;
printf(" Reporting the 3D voxel dose distribution as binary floats in the .raw file, and the 2D dose for Z plane %d as ASCII text.\n", z_plane_dose);
// printf(" Also reporting the dose to each material inside the input ROI adding the energy deposited in each individual voxel\n");
// printf(" (these material dose results will be equal to the materials dose tally below if the ROI covers all the voxels).\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
fprintf(file_ptr, "# *** ***\n");
fprintf(file_ptr, "# *** Andreu Badal ([email protected]) ***\n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# \n");
#ifdef USING_CUDA
fprintf(file_ptr, "# *** SIMULATION IN THE GPU USING CUDA ***\n");
#else
fprintf(file_ptr, "# *** SIMULATION IN THE CPU ***\n");
#endif
fprintf(file_ptr, "#\n");
// Report only one dose plane in ASCII, all the other data in binary only:
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# 3D dose deposition map (and dose uncertainty) created tallying the energy deposited by photons inside each voxel of the input geometry.\n");
fprintf(file_ptr, "# Electrons were not transported and therefore we are approximating that the dose is equal to the KERMA (energy released by the photons alone).\n");
fprintf(file_ptr, "# This approximation is acceptable when there is electronic equilibrium and when the range of the secondary electrons is shorter than the voxel size.\n");
fprintf(file_ptr, "# Usually the doses will be acceptable for photon energies below 1 MeV. The dose estimates may not be accurate at the interface of low density volumes.\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# The 3D dose deposition is reported in binary form in the .raw files (data given as 32-bit floats). \n");
fprintf(file_ptr, "# The %% relative error in the voxel dose at 2 standard deviations [=100*2*sigma/voxel_dose] is reported in the *_PercentRelError2sigma.raw file (32-bit floats). \n"); // !!SPIE2013!! Report relative error
fprintf(file_ptr, "# To reduce the memory use and the reporting time this text output reports only the 2D dose at the Z plane at the level\n");
fprintf(file_ptr, "# of the source focal spot: z_coord = %d (z_coord in ROI = %d)\n", z_plane_dose, z_plane_dose_ROI);
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# The total dose deposited in each different material is reported to the standard output.\n");
fprintf(file_ptr, "# The dose is calculated adding the energy deposited in the individual voxels within the dose ROI and dividing by the total mass of the material in the ROI.\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Voxel size: %lf x %lf x %lf = %lf cm^3\n", 1.0/(double)(voxel_data->inv_voxel_size.x), 1.0/(double)(voxel_data->inv_voxel_size.y), 1.0/(double)(voxel_data->inv_voxel_size.z), 1.0/(double)(voxel_data->inv_voxel_size.x*voxel_data->inv_voxel_size.y*voxel_data->inv_voxel_size.z));
fprintf(file_ptr, "# Number of voxels in the reported region of interest (ROI) X, Y and Z:\n");
fprintf(file_ptr, "# %d %d %d\n", DX, DY, DZ);
fprintf(file_ptr, "# Coordinates of the ROI inside the voxel volume = X[%d,%d], Y[%d,%d], Z[%d,%d]\n", dose_ROI_x_min+1, dose_ROI_x_max+1, dose_ROI_y_min+1, dose_ROI_y_max+1, dose_ROI_z_min+1, dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Voxel dose units: eV/g per history\n");
fprintf(file_ptr, "# X rows given first, then Y, then Z. One blank line separates the different Y, and two blanks the Z values (GNUPLOT format).\n");
fprintf(file_ptr, "# The dose distribution is also reported with binary FLOAT values (.raw file) for easy visualization in ImageJ.\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# [DOSE] [2*standard_deviation]\n");
fprintf(file_ptr, "# =====================================\n");
fflush(file_ptr);
double voxel_dose, max_voxel_dose[MAX_MATERIALS], max_voxel_dose_std_dev[MAX_MATERIALS], max_voxel_dose_all_mat=0.0, max_voxel_dose_std_dev_all_mat=0.0;
int max_voxel_dose_x[MAX_MATERIALS], max_voxel_dose_y[MAX_MATERIALS], max_voxel_dose_z[MAX_MATERIALS];
unsigned long long int total_energy_deposited = 0;
double inv_SCALE_eV = 1.0 / SCALE_eV, // conversion to eV using the inverse of the constant used in the tally function (defined in the header file).
inv_N = 1.0 / (double)(total_histories*((unsigned long long int)num_projections));
register int i, j, k, voxel=0;
double mat_Edep[MAX_MATERIALS], mat_Edep2[MAX_MATERIALS], mat_mass_ROI[MAX_MATERIALS]; // Arrays with the total energy, energy squared and mass of each material inside the ROI (mass and dose outside the ROI was not tallied).
unsigned int mat_voxels[MAX_MATERIALS];
for(i=0; i<MAX_MATERIALS; i++)
{
mat_Edep[i] = 0.0;
mat_Edep2[i] = 0.0;
mat_mass_ROI[i] = 0.0;
mat_voxels[i]= 0;
max_voxel_dose[i] =-1.0;
max_voxel_dose_std_dev[i]= 1.0e-15;
max_voxel_dose_x[i] = 0;
max_voxel_dose_y[i] = 0;
max_voxel_dose_z[i] = 0;
}
double voxel_volume = 1.0 / ( ((double)voxel_data->inv_voxel_size.x) * ((double)voxel_data->inv_voxel_size.y) * ((double)voxel_data->inv_voxel_size.z) );
for(k=0; k<DZ; k++)
{
for(j=0; j<DY; j++)
{
for(i=0; i<DX; i++)
{
register int voxel_geometry = (i+dose_ROI_x_min) + (j+dose_ROI_y_min)*voxel_data->num_voxels.x + (k+dose_ROI_z_min)*voxel_data->num_voxels.x*voxel_data->num_voxels.y;
// register double inv_voxel_mass = 1.0 / (voxel_mat_dens[voxel_geometry].y*voxel_volume);
// register int mat_number = (int)(voxel_mat_dens[voxel_geometry].x) - 1 ; // Material number, starting at 0.
// mat_mass_ROI[mat_number] += voxel_mat_dens[voxel_geometry].y*voxel_volume; // Estimate mass and energy deposited in this material
register double inv_voxel_mass = 1.0 / (density_LUT((int)voxel_mat_dens[voxel_geometry])*voxel_volume); //!!FixedDensity_DBT!! Density taken from function "density_LOT"
register int mat_number = (int)(voxel_mat_dens[voxel_geometry]); // Material number, starting at 0. //!!FixedDensity_DBT!!
mat_mass_ROI[mat_number] += density_LUT((int)voxel_mat_dens[voxel_geometry])*voxel_volume; // Estimate mass and energy deposited in this material //!!FixedDensity_DBT!! Density taken from function "density_LOT"
mat_Edep[mat_number] += (double)voxels_Edep[voxel].x; // Using doubles to avoid overflow
mat_Edep2[mat_number] += (double)voxels_Edep[voxel].y;
mat_voxels[mat_number]++; // Count voxels made of this material
// Optional code to eliminate dose deposited in air (first material). Sometimes useful for visualization (dose to air irrelevant, noisy)
// if (voxel_mat_dens[voxel_geometry].x < 1.1f)
// {
// voxels_Edep[voxel].x = 0.0f;
// voxels_Edep[voxel].y = 0.0f;
// }
// -- Convert total energy deposited to dose [eV/gram] per history:
// !!DeBuG!! BUG in first version MC-GPU v1.3, corrected for v1.4 [2013-01-31]. Edep2 is NOT scaled by SCALE_eV!! Also, division by voxel_mass must be done at the end!
// !!DeBuG!! Wrong: voxel_dose = ((double)voxels_Edep[voxel].x) * inv_N * inv_voxel_mass * inv_SCALE_eV;
// !!DeBuG!! Wrong: register double voxel_std_dev = (((double)voxels_Edep[voxel].y) * inv_N * inv_SCALE_eV * inv_voxel_mass - voxel_dose*voxel_dose) * inv_N;
voxel_dose = ((double)voxels_Edep[voxel].x) * inv_N * inv_SCALE_eV; // [<Edep> == Edep / N_hist /scaling_factor ; dose == <Edep> / mass]
total_energy_deposited += voxels_Edep[voxel].x;
register double voxel_std_dev = (((double)voxels_Edep[voxel].y) * inv_N - voxel_dose*voxel_dose) * inv_N * inv_voxel_mass; // [sigma_Edep^2 = (<Edep^2> - <Edep>^2) / N_hist] ; [sigma_dose^2 = sigma_Edep/mass] (not using SCALE_eV for std_dev to prevent overflow)
if (voxel_std_dev>0.0)
voxel_std_dev = sqrt(voxel_std_dev);
voxel_dose *= inv_voxel_mass; // [dose == <Edep> / mass]
if (voxel_dose > max_voxel_dose[mat_number]) // Tally peak dose for each material!
{
// Find the voxel that has the maximum dose:
max_voxel_dose[mat_number] = voxel_dose;
max_voxel_dose_std_dev[mat_number] = voxel_std_dev;
max_voxel_dose_x[mat_number] = i+dose_ROI_x_min;
max_voxel_dose_y[mat_number] = j+dose_ROI_y_min;
max_voxel_dose_z[mat_number] = k+dose_ROI_z_min;
if (voxel_dose > max_voxel_dose_all_mat)
{
max_voxel_dose_all_mat = voxel_dose;
max_voxel_dose_std_dev_all_mat = voxel_std_dev;
}
}
// Report only one dose plane in ASCII:
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "%.6lf %.6lf\n", voxel_dose, 2.0*voxel_std_dev);
float voxel_dose_float = (float)voxel_dose; // After dividing by the number of histories I can report FLOAT bc the number of significant digits will be low.
fwrite(&voxel_dose_float, sizeof(float), 1, file_binary_mean_ptr); // Write dose data in a binary file that can be easyly open in imageJ. !!BINARY!!
// !!DeBuG!! OLD version, reporting sigma: float voxel_sigma_float = 2.0f * (float)(voxel_std_dev); fwrite(&voxel_sigma_float, sizeof(float), 1, file_binary_sigma_ptr);
float voxel_relErr_float = 0.0f;
if (voxel_dose > 0.0)
voxel_relErr_float = 200.0f*(float)(voxel_std_dev/voxel_dose); // New in MC-GPU v1.4: Report relative error for 2*sigma, in % (avoid dividing by 0)
fwrite(&voxel_relErr_float, sizeof(float), 1, file_binary_sigma_ptr);
voxel++;
}
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "\n"); // Separate Ys with an empty line for visualization with gnuplot.
}
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "\n"); // Separate Zs.
}
fprintf(file_ptr, "# ****** DOSE REPORT: TOTAL SIMULATION PERFORMANCE FOR ALL PROJECTIONS ******\n");
fprintf(file_ptr, "# Total number of simulated x rays: %lld\n", total_histories*((unsigned long long int)num_projections));
fprintf(file_ptr, "# Simulated x rays per projection: %lld\n", total_histories);
fprintf(file_ptr, "# Total simulation time [s]: %.2f\n", time_total_MC_init_report);
if (time_total_MC_init_report>0.000001)
fprintf(file_ptr, "# Total speed [x-rays/s]: %.2f\n", (double)(total_histories*((unsigned long long int)num_projections))/time_total_MC_init_report);
fprintf(file_ptr, "\n# Total energy absorved inside the dose ROI: %.5lf keV/hist\n\n", 0.001*((double)total_energy_deposited)*inv_N*inv_SCALE_eV);
// Output data to standard input:
printf("\n Total energy absorved inside the dose deposition ROI: %.5lf keV/hist\n", 0.001*((double)total_energy_deposited)*inv_N*inv_SCALE_eV);
printf( " Maximum voxel dose (+-2 sigma): %lf +- %lf eV/g per history.\n", max_voxel_dose_all_mat, max_voxel_dose_std_dev_all_mat);
// -- Report dose deposited in each material:
printf(" Dose deposited in the different materials inside the input ROI computed post-processing the 3D voxel dose results:\n\n");
// OLD reporting without peak dose (v1.3): printf(" [MATERIAL] [DOSE_ROI, eV/g/hist] [2*std_dev] [Rel error 2*std_dev, %%] [E_dep [eV/hist] [MASS_ROI, g] [NUM_VOXELS_ROI]\n");
printf(" [MAT] [DOSE_ROI eV/g/hist] [2*std_dev] [Rel error %%] [Peak voxel dose] [2*std_dev] [Rel error %%] [Peak voxel coord] [E_dep eV/hist] [MASS_ROI g] [NUM_VOXELS_ROI]\n");
printf(" ===============================================================================================================================================================================\n");
for(i=0; i<MAX_MATERIALS; i++)
{
if(mat_voxels[i]>0) // Report only for materials found at least in 1 voxel of the input geometry (prevent dividing by 0 mass).
{
double Edep = mat_Edep[i] * inv_N * inv_SCALE_eV; // [dose == Edep/Mass/N_hist]
// !!DeBuG!! BUG in version 1.2: I have to divide by mass after computing the mean and sigma!!!
// !!DeBuG!! WRONG code: double material_dose = mat_Edep[i] * inv_N * inv_SCALE_eV / mat_mass_ROI[i]; // [dose == Edep/Mass/N_hist]
// !!DeBuG!! WRONG code: double material_std_dev = (mat_Edep2[i] * inv_N * inv_SCALE_eV / mat_mass_ROI[i] - material_dose*material_dose) * inv_N; // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist]
double material_std_dev = (mat_Edep2[i] * inv_N - Edep*Edep) * inv_N; // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist] (mat_Edep2 not scaled by SCALE_eV in kernel to prevent overflow)
if (material_std_dev>0.0)
material_std_dev = sqrt(material_std_dev);
double material_dose = Edep / mat_mass_ROI[i];
material_std_dev = material_std_dev / mat_mass_ROI[i];
double rel_diff=0.0, rel_diff_peak=0.0;
if (material_dose>0.0)
{
rel_diff = material_std_dev/material_dose;
rel_diff_peak = max_voxel_dose_std_dev[i]/max_voxel_dose[i];
}
printf("\t%d\t%.5lf\t%.5lf\t%.3lf\t\t%.5lf\t%.5lf\t%.3lf\t(%d,%d,%d)\t\t%.5lf\t%.5lf\t%u\n", (i+1), material_dose, 2.0*material_std_dev, (200.0*rel_diff), max_voxel_dose[i], 2.0*max_voxel_dose_std_dev[i], (200.0*rel_diff_peak), max_voxel_dose_x[i], max_voxel_dose_y[i], max_voxel_dose_z[i], Edep, mat_mass_ROI[i], mat_voxels[i]);
// OLD reporting without peak dose (v1.3): printf("\t%d\t%.5lf\t\t%.5lf\t\t%.2lf\t\t%.2lf\t\t%.5lf\t%u\n", (i+1), material_dose, 2.0*material_std_dev, (2.0*100.0*rel_diff), Edep, mat_mass_ROI[i], mat_voxels[i]);
}
}
printf("\n");
fflush(stdout);
fclose(file_ptr); // Close output file and flush stream
fclose(file_binary_mean_ptr);
fclose(file_binary_sigma_ptr);
return 0; // Report could return not 0 to continue the simulation...
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Report the tallied dose to each material number, accounting for different
//! densities in different regions with the same material number.
//!
//! @param[in] num_projections Number of projections simulated
//! @param[in] total_histories Total number of x-rays simulated per projection
//! @param[out] density_nominal Array with the nominal densities of materials given in the input file; -1 for materials not defined. Used to report only defined materials.
//! @param[in] materials_dose Tallied dose and dose^2 arrays
////////////////////////////////////////////////////////////////////////////////
int report_materials_dose(int num_projections, unsigned long long int total_histories, float *density_nominal, ulonglong2 *materials_dose, double *mass_materials, char file_name_materials[MAX_MATERIALS][250]) // !!tally_materials_dose!!
{
printf("\n\n *** MATERIALS TOTAL DOSE TALLY REPORT ***\n\n");
printf(" Dose deposited in each material defined in the input file (tallied directly per material, not per voxel):\n");
printf(" The results of this tally should be equal to the voxel tally doses for an ROI covering all voxels.\n");
printf(" Total number of simulated x rays: %lld\n", total_histories*((unsigned long long int)num_projections)); // !!DBT!!
if (num_projections>1)
printf(" Simulated x rays for each of %d projections: %lld\n\n", num_projections, total_histories);
printf("\t [MAT] [DOSE eV/g/hist] [2*std_dev] [Rel_error 2*std_dev, %%] [E_dep eV/hist] [DOSE mGy] [Material mass g] [Material file name]\n");
printf("\t======================================================================================================================================\n");
double dose, Edep, std_dev, rel_diff, inv_N = 1.0 / (double)(total_histories*((unsigned long long int)num_projections));
int i, flag=0, max_mat=0;
for(i=0; i<MAX_MATERIALS; i++)
{
if (density_nominal[i]<0.0f)
break; // Skip report for materials not defined in the input file
// Report the material file names removing the absolute file system path for clarity:
char file_name_material_without_path[250];
char* last_slash = strrchr(file_name_materials[i],'/'); // Return a pointer to the last character '/' in the input name, or NULL if not found
if (last_slash==NULL)
strcpy(file_name_material_without_path, file_name_materials[i]);
else
strcpy(file_name_material_without_path, (last_slash+1));
Edep = ((double)materials_dose[i].x) / SCALE_eV * inv_N;
std_dev = sqrt( (((double)materials_dose[i].y)*inv_N - Edep*Edep) * inv_N ); // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist] (not scaling "materials_dose[i].y" by SCALE_eV in kernel to prevent overflow).
if (Edep>0.0)
rel_diff = std_dev/Edep;
else
rel_diff = 0.0;
dose = Edep / max_value(mass_materials[i], 0.00001); // Prevent division by 0
std_dev = std_dev / max_value(mass_materials[i], 0.00001);
printf("\t%d\t%.5lf\t\t%.5lf\t\t%.2lf\t\t%.2lf\t\t%.5lf\t\t%.5lf\t\t%s\n", (i+1), dose, 2.0*std_dev, 2.0*100.0*rel_diff, Edep, ((double)materials_dose[i].x)/SCALE_eV/max_value(mass_materials[i], 0.00001)*(1.0e3/6.2415e15), mass_materials[i], file_name_material_without_path);
if (materials_dose[i].x>1e16 || dose!=abs(dose) || std_dev!=abs(std_dev)) // !!DeBuG!! Try to detect a possible overflow in any material: large counter or negative, nan value
{
flag = 1;
if (materials_dose[i].x>materials_dose[max_mat].x)
max_mat = i;
}
}
if (flag!=0) // !!DeBuG!! Try to detect a possible overflow: large counter or negative, nan value. The value of SCALE_eV can be reduced to prevent this overflow in some cases.
{
printf("\n WARNING: it is possible that the unsigned long long int counter used to tally the standard deviation overflowed (>2^64).\n"); // !!DeBuG!!
printf(" The standard deviation may be incorrectly measured, but it will surely be very small (<< 1%%).\n");
printf(" Max counter (mat=%d): E_dep = %llu , E_dep^2 = %llu\n\n", max_mat+1, materials_dose[max_mat].x, materials_dose[max_mat].y);
}
fflush(stdout);
return 0;
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Sets the tomographic acquisition trajectory: store in memory the source and detector
//! positions and rotation matrices that are needed to simulate the multiple projections.
//! The first projection ([0] = 0 degrees) was previously initialized in function "read_input".
//! The antiscatter grid is disabled after projection [0]; the motion blur is disabled for projection [0].
///////////////////////////////////////////////////////////////////////////////
void set_CT_trajectory(int myID, int num_projections, struct source_struct* source_data, struct detector_struct* detector_data, double translation_helical, bool flag_detectorFixed) // !!DBTv1.4!! NEW VERSION with general rotations!!
{
MASTER_THREAD
{
printf("\n -- Setting the parameters of the sources and detectors for the %d tomographic projections (MAX_NUM_PROJECTIONS=%d):\n\n", num_projections, MAX_NUM_PROJECTIONS);
if (flag_detectorFixed)
{
printf(" \"flag_detectorFixed==true\": the detector will stay fixed at 0 deg position (as in DBT acquisition).\n\n");
}
}
int i;
for (i=1; i<=num_projections; i++) // The first projection (i=0) was initialized in function "read_input"
{
double angle = source_data[0].angle_offset + (i-1)*source_data[0].angle_per_projection;
// --Initialize the source and detector structures to the values in projection 0:
source_data[i] = source_data[0];
detector_data[i] = detector_data[0];
// --Set "source_data[i]->rot_fan" multiplying the rotation matrix for the original 0 degrees with the new rotation of "angle" degrees around "axis_of_rotation". // !!DBTv1.4!!
// The new matrix multiplies "from the left side" (will be applied last; non-commutative):
float m[9];
m[0] = 1; m[1] = 0; m[2] = 0; // Init rotation matrix to identity
m[3] = 0; m[4] = 1; m[5] = 0;
m[6] = 0; m[7] = 0; m[8] = 1;
create_rotation_matrix_around_axis(angle, source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z, m);
multiply_3x3(source_data[i].rot_fan, m, source_data[0].rot_fan);
// --Set "source_data[i].direction" multiplying by the rotation matrix:
apply_rotation(&source_data[i].direction, m);
// --Translate the source position and rotation_point according to the input translation along the axis_of_rotation for an helical scan:
source_data[i].position.x += (i-1)*translation_helical*source_data[0].axis_of_rotation.x;
source_data[i].position.y += (i-1)*translation_helical*source_data[0].axis_of_rotation.y;
source_data[i].position.z += (i-1)*translation_helical*source_data[0].axis_of_rotation.z;
source_data[i].rotation_point.x += (i-1)*translation_helical*source_data[0].axis_of_rotation.x;
source_data[i].rotation_point.y += (i-1)*translation_helical*source_data[0].axis_of_rotation.y;
source_data[i].rotation_point.z += (i-1)*translation_helical*source_data[0].axis_of_rotation.z;
// --Set "source_data[i].position" multiplying by the rotation matrix in the reference system where the "rotation_point" is the origin of coordinates:
source_data[i].position.x -= source_data[i].rotation_point.x; source_data[i].position.y -= source_data[i].rotation_point.y; source_data[i].position.z -= source_data[i].rotation_point.z;
apply_rotation(&source_data[i].position, m);
source_data[i].position.x += source_data[i].rotation_point.x; source_data[i].position.y += source_data[i].rotation_point.y; source_data[i].position.z += source_data[i].rotation_point.z;
if (flag_detectorFixed==false) // Check if the detector rotates with the source or if it stays static as in DBT
{
// --Set "detector_data[i].center" multiplying by the rotation matrix in the reference system where the "rotation_point" is the origin of coordinates:
detector_data[i].center.x -= source_data[i].rotation_point.x; detector_data[i].center.y -= source_data[i].rotation_point.y; detector_data[i].center.z -= source_data[i].rotation_point.z;
apply_rotation(&detector_data[i].center, m);
detector_data[i].center.x += source_data[i].rotation_point.x; detector_data[i].center.y += source_data[i].rotation_point.y; detector_data[i].center.z += source_data[i].rotation_point.z;
// --Set "detector_data[i].rot_inv" multiplying the inverse rotation matrix by "-angle". The new matrix multiplies "from the right side" (will be applied first): // !!DBTv1.4!!
m[0] = 1; m[1] = 0; m[2] = 0; // Init rotation matrix to identity
m[3] = 0; m[4] = 1; m[5] = 0;
m[6] = 0; m[7] = 0; m[8] = 1;
create_rotation_matrix_around_axis(-angle, source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z, m);
multiply_3x3(detector_data[i].rot_inv, detector_data[0].rot_inv, m);
}
MASTER_THREAD printf(" << Projection #%d >>\t Angle=%.5f degrees\n", i, angle*RAD2DEG);
MASTER_THREAD printf(" \t Source position=(%.8f,%.8f,%.8f), direction=(%.8f,%.8f,%.8f)\n", source_data[i].position.x,source_data[i].position.y,source_data[i].position.z, source_data[i].direction.x,source_data[i].direction.y,source_data[i].direction.z);
MASTER_THREAD printf(" \t Detector center=(%.8f,%.8f,%.8f)\n", detector_data[i].center.x, detector_data[i].center.y, detector_data[i].center.z);
if (detector_data[0].grid_freq>0.0f)
detector_data[i].grid_freq = -detector_data[0].grid_freq; // Disable grid after projection [0], if used !!DBTv1.5!!
}
source_data[0].rotation_blur = 0.0f; // Motion blur disabled for the 0 deg projection (eg, mammo). // !!DBTv1.4!!
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Initialize the first seed of the pseudo-random number generator (PRNG)
//! RANECU to a position far away from the previous history (leap frog technique).
//! This function is equivalent to "init_PRNG" but only updates one of the seeds.
//!
//! Note that if we use the same seed number to initialize the 2 MLCGs of the PRNG
//! we can only warranty that the first MLCG will be uncorrelated for each value
//! generated by "update_seed_PRNG". There is a tiny chance that the final PRNs will
//! be correlated because the leap frog on the first MLCG will probably go over the
//! repetition cycle of the MLCG, which is much smaller than the full RANECU. But any
//! correlataion is extremely unlikely. Function "init_PRNG" doesn't have this issue.
//!
//! @param[in] batch_number Elements to skip (eg, MPI thread_number).
//! @param[in] total_histories Histories to skip.
//! @param[in,out] seed Initial PRNG seeds; returns the updated seed.
////////////////////////////////////////////////////////////////////////////////
inline void update_seed_PRNG(int batch_number, unsigned long long int total_histories, int* seed)
{
if (0==batch_number)
return;
unsigned long long int leap = total_histories * (batch_number * LEAP_DISTANCE);
int y = 1;
int z = a1_RANECU;
// -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j
// -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m):
// S(i+j) = [(a**j MOD m)*S(i)] MOD m
// S_i = abMODm(m,S_i,AjMODm)
*seed = abMODm(m1_RANECU, *seed, y);
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Read the energy spectrum file and initialize the Walker aliasing sampling.
//!
//! @param[in] file_name_espc File containing the energy spectrum (lower energy value in each bin and its emission probability).
//! @param[in,out] source_energy_data Energy spectrum and other source data. The Walker alias and cutoffs are initialized in this function.
//! @param[out] mean_energy_spectrum Mean energy in the input x-ray energy spectrum.
////////////////////////////////////////////////////////////////////////////////
void init_energy_spectrum(char* file_name_espc, struct source_energy_struct* source_energy_data, float *mean_energy_spectrum)
{
char *new_line_ptr = NULL, new_line[250];
float lower_energy_bin, prob;
float prob_espc_bin[MAX_ENERGY_BINS]; // The input probabilities of each energy bin will be discarded after Walker is initialized
// -- Read spectrum from file:
FILE* file_ptr = fopen(file_name_espc, "r");
if (NULL==file_ptr)
{
printf("\n\n !!init_energy_spectrum ERROR!! Error trying to read the energy spectrum input file \"%s\".\n\n", file_name_espc);
exit(-1);
}
int current_bin = -1;
do
{
current_bin++; // Update bin counter
if (current_bin >= MAX_ENERGY_BINS)
{
printf("\n !!init_energy_spectrum ERROR!!: too many energy bins in the input spectrum. Increase the value of MAX_ENERGY_BINS=%d.\n", MAX_ENERGY_BINS);
printf( " A negative probability marks the end of the spectrum.\n\n");
exit(-1);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // Read the following line of text skipping comments and extra spaces
if (new_line_ptr==NULL)
{
printf("\n\n !!init_energy_spectrum ERROR!! The input file for the x ray spectrum (%s) is not readable or incomplete (a negative probability marks the end of the spectrum).\n", file_name_espc);
exit(-1);
}
prob = -123456789.0f;
sscanf(new_line, "%f %f", &lower_energy_bin, &prob); // Extract the lowest energy in the bin and the corresponding emission probability from the line read
prob_espc_bin[current_bin] = prob;
source_energy_data->espc[current_bin] = lower_energy_bin;
if (prob == -123456789.0f)
{
printf("\n !!init_energy_spectrum ERROR!!: invalid energy bin number %d?\n\n", current_bin);
exit(-1);
}
else if (lower_energy_bin < source_energy_data->espc[max_value(current_bin-1,0)]) // (Avoid a negative index using the macro "max_value" defined in the header file)
{
printf("\n !!init_energy_spectrum ERROR!!: input energy bins with decreasing energy? espc(%d)=%f, espc(%d)=%f\n\n", current_bin-1, source_energy_data->espc[max_value(current_bin-1,0)], current_bin, lower_energy_bin);
exit(-1);
}
}
while (prob > -1.0e-11f); // A negative probability marks the end of the spectrum
// Store the number of bins read from the input energy spectrum file:
source_energy_data->num_bins_espc = current_bin;
// Init the remaining bins (which will not be used) with the last energy read (will be assumed as the highest energy in the last bin) and 0 probability of emission.
register int i;
for (i=current_bin; i<MAX_ENERGY_BINS; i++)
{
source_energy_data->espc[i] = lower_energy_bin;
prob_espc_bin[i] = 0.0f;
}
// Compute the mean energy in the spectrum, taking into account the energy and prob of each bin:
float all_energy = 0.0f;
float all_prob = 0.0f;
for(i=0; i<source_energy_data->num_bins_espc; i++)
{
all_energy += 0.5f*(source_energy_data->espc[i]+source_energy_data->espc[i+1])*prob_espc_bin[i];
all_prob += prob_espc_bin[i];
}
*mean_energy_spectrum = all_energy/all_prob;
// -- Init the Walker aliasing sampling method (as it is done in PENELOPE):
IRND0(prob_espc_bin, source_energy_data->espc_cutoff, source_energy_data->espc_alias, source_energy_data->num_bins_espc); //!!Walker!! Calling PENELOPE's function to init the Walker method
// !!Verbose!! Test sampling
// Sampling the x ray energy using the Walker aliasing algorithm from PENELOPE:
// int sampled_bin = seeki_walker(source_energy_data->espc_cutoff, source_energy_data->espc_alias, 0.5, source_energy_data->num_bins_espc);
// float e = source_energy_data->espc[sampled_bin] + ranecu(seed) * (source_energy_data->espc[sampled_bin+1] - source_energy_data->espc[sampled_bin]); // Linear interpolation of the final energy within the sampled energy bin
// printf("\n\n !!Walker!! Energy center bin %d = %f keV\n", sampled_bin, 0.001f*e);
}
//********************************************************************
//! Finds the interval (x(i),x(i+1)] containing the input value
//! using Walker's aliasing method.
//!
//! Input:
//! cutoff(1..n) -> interval cutoff values for the Walker method
//! cutoff(1..n) -> alias for the upper part of each interval
//! randno -> point to be located
//! n -> no. of data points
//! Output:
//! index i of the semiopen interval where randno lies
//! Comments:
//! -> The cutoff and alias values have to be previously
//! initialised calling the penelope subroutine IRND0.
//!
//!
//! Algorithm implementation based on the PENELOPE code developed
//! by Francesc Salvat at the University of Barcelona. For more
//! info: www.oecd-nea.org/science/pubs/2009/nea6416-penelope.pdf
//!
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//C PENELOPE/PENGEOM (version 2006) C
//C Copyright (c) 2001-2006 C
//C Universitat de Barcelona C
//C C
//C Permission to use, copy, modify, distribute and sell this software C
//C and its documentation for any purpose is hereby granted without C
//C fee, provided that the above copyright notice appears in all C
//C copies and that both that copyright notice and this permission C
//C notice appear in all supporting documentation. The Universitat de C
//C Barcelona makes no representations about the suitability of this C
//C software for any purpose. It is provided "as is" without express C
//C or implied warranty. C
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
inline int seeki_walker(float *cutoff, short int *alias, float randno, int n)
{
float RN = randno * n; // Find initial interval (array starting at 0):
int int_part = (int)(RN); // -- Integer part
float fraction_part = RN - ((float)int_part); // -- Fractional part
if (fraction_part < cutoff[int_part]) // Check if we are in the aliased part
return int_part; // Below the cutoff: return current value
else
return (int)alias[int_part]; // Above the cutoff: return alias
}
//****************************************************************** *
//* SUBROUTINE IRND0 *
//********************************************************************
//*
//! Initialisation of Walker's aliasing algorithm for random
//! sampling from discrete probability distributions.
//!
//! Input arguments:
//! N ........ number of different values of the random variable.
//! W(1:N) ... corresponding point probabilities (not necessarily
//! normalised to unity).
//! Output arguments:
//! F(1:N) ... cutoff values.
//! K(1:N) ... alias values.
//!
//!
//! This subroutine is part of the PENELOPE 2006 code developed
//! by Francesc Salvat at the University of Barcelona. For more
//! info: www.oecd-nea.org/science/pubs/2009/nea6416-penelope.pdf
//*
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//C PENELOPE/PENGEOM (version 2006) C
//C Copyright (c) 2001-2006 C
//C Universitat de Barcelona C
//C C
//C Permission to use, copy, modify, distribute and sell this software C
//C and its documentation for any purpose is hereby granted without C
//C fee, provided that the above copyright notice appears in all C
//C copies and that both that copyright notice and this permission C
//C notice appear in all supporting documentation. The Universitat de C
//C Barcelona makes no representations about the suitability of this C
//C software for any purpose. It is provided "as is" without express C
//C or implied warranty. C
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
void IRND0(float *W, float *F, short int *K, int N)
{
register int I;
// **** Renormalisation.
double WS=0.0;
for (I=0; I<N; I++)
{
if(W[I] < 0.0f)
{
printf("\n\n !!ERROR!! IRND0: Walker sampling initialization. Negative point probability? W(%d)=%f\n\n", I, W[I]);
exit(-1);
}
WS = WS + W[I];
}
WS = ((double)N) / WS;
for (I=0; I<N; I++)
{
K[I] = I;
F[I] = W[I] * WS;
}
if (N==1)
return;
// **** Cutoff and alias values.
float HLOW, HIGH;
int ILOW, IHIGH, J;
for (I=0; I<N-1; I++)
{
HLOW = 1.0f;
HIGH = 1.0f;
ILOW = -1;
IHIGH= -1;
for (J=0; J<N; J++)
{
if(K[J]==J)
{
if(F[J]<HLOW)
{
HLOW = F[J];
ILOW = J;
}
else if(F[J]>HIGH)
{
HIGH = F[J];
IHIGH = J;
}
}
}
if((ILOW==-1) || (IHIGH==-1))
return;
K[ILOW] = IHIGH;
F[IHIGH]= HIGH + HLOW - 1.0f;
}
return;
}
///////////////////////////////////////////////////////////////////////////////
// !!DBTv1.4!!
//!* Create the rotation matrix that will rotate a vector the input angle around the input axis
//!* (using Rodrigues' formula: http://mathworld.wolfram.com/RodriguesRotationFormula.html)
//!* The new rotation matrix is multiplied with the input matrix m[9] to create compounded rotations
//!* (input the identity matrix I[3x3] if no previous rotation exists!).
void create_rotation_matrix_around_axis(float angle, float wx, float wy, float wz, float *m)
{
float mm[9];
float nn[9];
int i, flag=0;
for (i=0; i<9; i++)
{
mm[i] = m[i]; // Create temporary copy of input matrix
if (fabsf(m[i])>1.0e-7f)
flag += 1; // Detect non-null elements in the matrix
}
if (0==flag)
{
printf("\n\n !!WARNING!! Null rotation matrix input to \'create_rotation_matrix_around_axis\'???\n");
printf( " Reseting the matrix to identity to preserve the following rotations.\n\n\n");
mm[0] = mm[4] = mm[8] = 1.0f;
mm[1] = mm[2] = mm[3] = mm[5] = mm[6] = mm[7] = 0.0f;
}
double c = cos(angle);
double s = sin(angle);
nn[0] = (float) c+wx*wx*(1.0-c);
nn[3] = (float) wz*s+wx*wy*(1.0-c);
nn[6] = (float)-wy*s+wx*wz*(1.0-c);
nn[1] = (float)-wz*s+wx*wy*(1.0-c);
nn[4] = (float) c+wy*wy*(1.0-c);
nn[7] = (float) wx*s+wy*wz*(1.0-c);
nn[2] = (float) wy*s+wx*wz*(1.0-c);
nn[5] = (float)-wx*s+wy*wz*(1.0-c);
nn[8] = (float) c+wz*wz*(1.0-c);
multiply_3x3(m, nn, mm); // Multiply new rotation matrix with the input one (which should be the identity if no previous rotations)
}
//! Multiply two input 3x3 matrices: m_out[9] = n[9] x m[9]
void multiply_3x3(float *m_out, float *n, float *m)
{
if (m_out==m)
printf("\n\n !!ERROR in \"multiply_3x3\"!! Input and output pointes are the same; the matrix multiplication will be wrong!!??\n\n");
m_out[0] = n[0]*m[0] + n[1]*m[3] + n[2]*m[6];
m_out[1] = n[0]*m[1] + n[1]*m[4] + n[2]*m[7];
m_out[2] = n[0]*m[2] + n[1]*m[5] + n[2]*m[8];
m_out[3] = n[3]*m[0] + n[4]*m[3] + n[5]*m[6];
m_out[4] = n[3]*m[1] + n[4]*m[4] + n[5]*m[7];
m_out[5] = n[3]*m[2] + n[4]*m[5] + n[5]*m[8];
m_out[6] = n[6]*m[0] + n[7]*m[3] + n[8]*m[6];
m_out[7] = n[6]*m[1] + n[7]*m[4] + n[8]*m[7];
m_out[8] = n[6]*m[2] + n[7]*m[5] + n[8]*m[8];
}
///////////////////////////////////////////////////////////////////////////////
| 18fba99afc4860626d4a8f816c5b398c982d8874.cu |
// ** CHANGE LIST ** See below \section sec_changes
// !!DBTv1.5!! (June 28, 2017)
// -- Improved detector model and fluorescence
// -- Added electronic noise and Swank factor for detected charges output
// -- Anti-scatter grid based on Day and Dance, Phys Med Biol 28, pp. 1429-1433 (1983)
// !!DBTv1.4!!
// -- Improved detector model with input thickness, attenuation, fluorescence escape.
// -- User-defined rotation axis for the tomography scan
// -- Enabled the simulation of tomosynthesis scans: half cone source, emission angle offset, etc
// -- Enable translation of the voxelized geometry and of the image sensor within the detector plane
// -- Code upgraded from CUDA 4 to CUDA 5, after cutil_inline.h has been eliminated from the SDK:
// CUDA 5.0: Using "getLastCudaError" instead of "cutilCheckMsg"
// CUDA 5.0: Using "checkCudaErrors" instead of "cutilSafeCall" or "cutilCheckError"
// CUDA 5.0: Using "gpuGetMaxGflopsDeviceId instead of "cutGetMaxGflopsDeviceId"
// CUDA 5.0: Substitute all the CUDA timer functions (cutResetTimer, cutStartTimer, cutStopTimer, cutGetTimerValue) for standard C clock() calls.
////////////////////////////////////////////////////////////////////////////////////////
//
// ****************************
// *** MC-GPU , version 1.5 ***
// ****************************
//
/**
* \mainpage MC-GPU v1.5_DBT
*
* \code
*
* Andreu Badal, PhD (Andreu.Badal-Soler{at}fda.hhs.gov)
*
* Division of Imaging and Applied Mathematics
* Office of Science and Engineering Laboratories
* Center for Devices and Radiological Health
* U.S. Food and Drug Administration
*
* Code release date: 2012/12/12
*
*
*
* \endcode
*
*
*
* \b MC-GPU [1-4] is a Monte Carlo simulation code that can generate synthetic radiographic
* images and computed tomography (CT) scans of realistic models of the human anatomy using the
* computational power of commodity Graphics Processing Unit (GPU) cards.
* The code implements a massively multi-threaded Monte Carlo simulation algorithm
* for the transport of x rays in a voxelized geometry. The x ray interaction models and material
* properties have been adapted from \b PENELOPE \b 2006 [5].
*
*
* \b MC-GPU was developed using the \b CUDA programming model from \b NVIDIA [6] to achieve
* maximum performance on NVIDIA GPUs. The code can also be compiled with a standard C compiler
* to be executed in a regular CPU.
* In a typical medical imaging simulation, the use of GPU computing with MC-GPU has been shown
* to provide a speed up of between 20 and 40 times, compared to the execution on a single CPU core.
*
* The MC-GPU code has been described in different scientific publications [1-4].
* The main reference of this work, which the users should cite, is the following [1]:
* \code
* Andreu Badal and Aldo Badano, "Accelerating Monte Carlo simulations of
* photon transport in a voxelized geometry using a massively parallel
* Graphics Processing Unit", Medical Physics 36, pp. 4878–4880 (2009)
* \endcode
* The main developer of MC-GPU is \b Andreu \b Badal, working at the U.S. \b Food \b and
* \b Drug \b Administration (Center for Devices and Radiological Health, Office of Science
* and Engineering Laboratories, Division of Imaging and Applied Mathematics).
* The source code of MC-GPU is free and open software in the public domain, as explained
* in the Disclaimer section below.
* The source code of MC-GPU and its auxiliary files are distributed from the website: http://code.google.com/.
*
*
* This documentation has been automatically generated by \b Doxygen parsing the comments in
* the MC-GPU source code.
* This code is still in development, please report to the author any issue/bug
* that you may encounter. Feel free to suggest improvements to the code too!
*
*
*
* \section sec_changes List of modifications in different versions of the code
*
* \subsection sec_changes_v13 Version 1.3 (release date: 2012/12/12)
*
* - Code upgraded to CUDA 5.0 (not compatible with previous versions of CUDA!).
* - Removed limit on the amount of projection images that can be simulated per CT scan (source and
* detector parameters now stored in global memory and transferring to shared memory at run time
* to avoid using the limited constant memory).
* - New material dose tally implemented to estimate the dose deposited in each material independently
* of the voxel dose tally (the voxel dose tally measures the dose in each material adding the energy
* deposited in each voxel of that material within the defined voxelized region-of-interest).
* - Interaction loop re-organized to maximize performance (virtual interactions simulated before real ones).
* - Improvements and small corrections in the source sampling and tally routines.
* - Allow input of material and voxel geometry files compressed with gzip (zlib library now required for compilation).
*
*
*
* \subsection sec_changes_v12 Version 1.2 (release date: 2011/10/25)
*
* - Implemented a voxel dose tally.
* - Polyenergetic source model.
* - MPI support for simulating individual projections.
* - Simulation by time limit.
* - Improved flexibility of the CT trajectories, helical scans.
*
*
*
* \section sec_disc Disclaimer
*
* This software and documentation (the "Software") were developed at the Food and
* Drug Administration (FDA) by employees of the Federal Government in the course
* of their official duties. Pursuant to Title 17, Section 105 of the United States
* Code, this work is not subject to copyright protection and is in the public
* domain. Permission is hereby granted, free of charge, to any person obtaining a
* copy of the Software, to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, or sell copies of the Software or derivatives, and to permit persons
* to whom the Software is furnished to do so. FDA assumes no responsibility
* whatsoever for use by other parties of the Software, its source code,
* documentation or compiled executables, and makes no guarantees, expressed or
* implied, about its quality, reliability, or any other characteristic. Further,
* use of this code in no way implies endorsement by the FDA or confers any
* advantage in regulatory decisions. Although this software can be redistributed
* and/or modified freely, we ask that any derivative works bear some notice that
* they are derived from it, and any modified versions bear some notice that they
* have been modified.
*
*
*
* \section sec_Intro Code features
*
* In this section we provide a brief description of the features of the MC-GPU code. A
* more complete description of the code can be found in our published articles.
* important information regarding the operation of the code is provided as comments in the
* input files of the sample simulations provided with the MC-GPU package.
* Detailed information on each function of the code can be found in the complete Doxygen
* documentation of the source code
*
* The basic operation of the code consists in adapting the simulation input file
* to describe the location and characteristics of the x ray source, define the CT trajectory
* (if any), list the materials to be used in the simulation, define the geometry of
* the x ray detector and, finally, specify the voxelized object file to be
* used as the simulation material universe.
* In the first line of the input file, the user can fix the total number of x rays that have
* to be simulated (> 1e5 histories) or the total simulation time (maximum 1e5 seconds).
*
*
* The coordinate system of the simulated world is determined by the input voxelized geometry.
* The origin of coordinates is assumed to be located at the lower-back corner of the voxelized
* volume, and the axis are located on the vertices of the voxelized volume.
* This means that the lower-back corner of the first voxel is on the origin and the
* following voxels are located along the positive X, Y and Z axis (first quadrant).
*
*
* To simulate the atomic interactions, MC-GPU uses a database of material properties based on the
* database from PENELOPE. A PENELOPE 2006 material file can be converted into an MC-GPU material
* file using the auxiliary utility "MC-GPU_create_material_data.f" provided with the MC-GPU
* package. Pre-defined material files for a set of materials typically used in medical imaging
* simulations are already provided in the folder "MC-GPU_material_files".
*
*
* The code includes two tally options: an \b image \b tally that creates projection x-ray images,
* and a radiation \b dose \b tally that estimates the dose deposited inside the patient model.
* MC-GPU does not currently simulate the transport of electrons and therefore the dose
* deposition tally (KERMA tally rigorously) will not be accurate for high energies or near
* material interfaces and small voxels.
* In the image tally the images are formed by counting the energy that enters a user-defined 2D
* grid of pixels, which is a simple approximation to a noise-free flat-panel detector with
* 100% detection efficiency. The pixel values have units of eV/cm^2.
* Four different images are reported at the end of the simulation, corresponding
* to the signal produced by x rays that did not interact between the source and the detector
* (non-scattered), x rays that suffered a single Compton (inelastic) interaction, a single
* Rayleigh (elastic) interaction, and multi-scattered x rays.
* The dose tally counts the energy deposited by each x ray track inside each voxel of the
* geometry, within a user-defined volumetric region-of-interest (ROI). The average dose deposited
* inside each voxel and in each material (and the associated statistical uncertainties) are reported
* at the end of the simulation.
*
*
* MC-GPU can simulate a single projection image or a full CT scan.
* The CT is simulated generating many projection images around the static
* voxelized geometry. Currently, the code is limited to perform a simple
* CT trajectory rotating around the Z axis. The user can specify the angular shift and longitudinal
* translation (pitch) of the source between each projection and also the distance between the
* source and the axis of rotation (the axis is assumed to be parallel to the Z axis).
* By now, the code does not simulate some relevant components of a CT scanner such as the
* anti-scatter grid, a bow-tie filter or a curved detector (flat-panel detector only).
*
*
* The x ray source is defined as a point source emitting x rays with an energy randomly sampled
* from the user-provided energy spectrum. The polyenergetic spectrum is efficiently sampled
* using the Walker aliasing algorithm. The emitted cone beam is computationally
* collimated to produce a rectangular field on the detector plane, within the azimuthal and
* polar angles specified by the user.
* The detector plane is automatically located at the specified distance right in front of the
* source focal spot, with the collimated cone beam pointing towards the geometric center of the detector.
*
*
* In order to optimize the particle tracking algorithm (ray-tracing) and minimize
* the accesses to the slow GPU main memory, the photon trajectories across the voxels
* are computed using the Woodcock tracking algorithm.
* With this technique the photons perceive the geometry as a uniform medium
* composed of the material of the most attenuating voxel.
* In this way, the voxel boundaries do not have to be explicitly calculated and
* multiple voxels can be crossed in a single step.
* To keep the simulation unbiased, some of the interactions are considered
* "virtual" (i.e., do not change the photon energy or direction of movement),
* depending on the x ray energy and the actual material at the interaction site.
* In typical medical imaging simulations where the most attenuating material is cortical bone,
* the Woodcock tracking algorithm gives an speed up of almost one order of magnitude compared
* to computing voxel boundaries all the time. However, if the geometry includes a high
* density voxel, such as a metallic implant, the performance of the code can be severely
* reduced because a large fraction of the sampled interactions will be virtual.
*
*
* The random number generator used in PENELOPE [5], RANECU, is also used in the GPU
* program. To ensure that the simulated tracks are not correlated, each thread initializes
* the generator to a unique position in the random sequence, far enough from the
* other threads, using the algorithm implemented in the seedsMLCG code [7].
*
*
* In a typical simulation, several thousand threads are launched simultaneously in
* the GPU, each one of them simulating a batch of several x ray tracks.
* If the code is compiled with MPI support (see below), multiple GPUs can be used in parallel.
* The code will perform a short speed test to estimate the relative speed of each GPU used
* in the simulation and then distribute the number of particles among the available GPUs correspondingly.
* If the user specified a time limit in the simulation, all the GPUs will simulate in parallel
* for the allowed time. Since the code is already optimized to scale well in
* thousands of GPU threads, it scales almost linearly with the number of GPUs in most
* situations, with only a few seconds of overhead in the initialization of the multiple GPUs
* and in the reduction of the final results.
*
*
*
*
* \section sec_output Code output
*
* At the end of the simulation the code reports the tallied 3D dose distribution and the
* final simulated images in RAW binary form, as 32-bits float values. The image data is provided
* as a collection of five consecutive images corresponding to: total image (scatter+primaries),
* primary particles, Compton, Rayleigh and multi-scatter.
* The dose data is reported as two RAW files with the mean dose and twice the standard deviation
* of the dose in each voxel of the geometry respectively, within the input ROI.
* The average dose deposited in each material of the geometry is also reported to the standard output.
* Organ doses can be obtained by post-processing the output dose file, knowing which voxel
* corresponds to each organ.
* The pixel and voxel dose data values are stored with the X coordinate incrementing first, the Y
* coordinate incrementing second, and the Z coordinate incrementing last.
*
* The program also reports the simulated images and the dose at the Z plane at the level of the x ray
* source as ASCII text files. The ASCII output can be readily visualized with the GNUPLOT scripts
* distributed with MC-GPU. The header section at the beginning of these text files provides the
* information required to easily read the RAW binary files with IMAGEJ, OCTAVE or other programs.
*
*
*
* \section sec_compilation Code compilation and execution
*
* MC-GPU has been developed and tested only in the Linux operating system.
* A Makefile script is provided to compile the MC-GPU code in Linux.
* The CUDA libraries and the GNU GCC compiler must be previously installed.
* The Makefile may have to be edited to modify the library path.
* The code requires the "zlib.h" library to be able to open gzipped input files.
*
*
* MC-GPU uses CUDA to access NVIDIA GPUs but all the actual computations are coded
* in standard C and the CUDA-specific commands are enclosed within preprocessor
* "if" statements. Defining the pre-processor variable "USING_CUDA" (i.e.,
* compiling with "-DUSING_CUDA") the particle transport routines are compiled to simulate
* many x ray histories in parallel in an NVIDIA GPU using CUDA. Otherwise, the code is
* sequentially executed in the CPU.
* The same coding approach has been used to allow the use of multiple GPUs.
* Defining the pre-processor variable "USING_MPI" (i.e., compiling with
* "-DUSING_MPI"), Message Passing Interface (MPI) library calls are used to share information
* between multiple CPU threads in different computers.
* Each MPI thread gets a unique id in the CPU and addresses a unique GPU.
* At the end of the simulation the images and doses tallied by the different GPUs are
* reduced to form single output file equivalent to a sequential simulation of the same
* number of particles.
*
* The code can be easily compiled executing the command "make" or running the provided
* "./make.sh" script.
* Optionally, the code can be executed from the command line with a command like this
* (example using CUDA and MPI, openMPI library in this case):
* \code
* nvcc -DUSING_CUDA -DUSING_MPI MC-GPU_v1.3.cu -o MC-GPU_v1.3.x -O3
* -use_fast_math -L/usr/lib/ -I. -I/usr/local/cuda/include
* -I/usr/local/cuda/samples/common/inc -I/usr/local/cuda/samples/shared/inc/
* -I/usr/include/openmpi -lmpi -lz --ptxas-options=-v
* -gencode=arch=compute_20,code=sm_20 -gencode=arch=compute_30,code=sm_30
* \endcode
*
* The same source code can also be compiled for a regular CPU using:
* \code
* gcc -x c -O3 MC-GPU_v1.3.cu -o MC-GPU_v1.3_CPU.x -I./ -lm -lz
* \endcode
*
* To run a simulation (and keep the information reported to the standard
* output in an external file) the compiled code can be executed as:
* \code
* ./MC-GPU_v1.3.x MC-GPU_v1.3.in | tee MC-GPU_v1.3.out
* \endcode
*
* All simulation can be executed in the same way using the code compiled for the CPU
* or the GPU (however, the number of histories should be reduced for the CPU to finish
* the simulation in a reasonable time).
* To run the simulation in parallel with MPI in multiple GPUs (or CPU cores) in the
* current computer the user can execute:
* \code
* mpirun -n 4 ./MC-GPU_v1.3.x MC-GPU_v1.3.in
* \endcode
*
* To use GPUs in different computers, the user must make sure all computers can access the simulation
* files and that the libraries are correctly set up in all nodes.
* To execute a simulation (with verbose MPI information being reported):
* \code
* mpirun --tag-output -v -x LD_LIBRARY_PATH -hostfile myhostfile.txt -n 8
* /fullPath/MC-GPU_v1.3.x /fullPath/MC-GPU_v1.3.in | tee MC-GPU_v1.3.out
* \endcode
*
* The text file 'hostfile' lists the IP addresses and number of computing slots (GPUs) of the
* computers collaborating in the simulation. This file is not necessary when using multiple
* GPUs in a single workstation. When using multiple computers, the simulation files should
* be located in a shared drive to make sure every node can access the input data.
* The different workstations must have different host names in order to be differentiated by
* the MPI threads. The multiple threads communicate to each other to make sure they don't
* use the same GPU in the same workstation.
*
*
*
* \section sec_issues Known issues
*
* In extremely long simulations, it is theoretically possible to cause an overflow of the counters
* estimating the mean and standard deviation of the material or voxel doses. If this happen, the
* results will be incorrect and even negative or nan values can be reported.
*
*
*
*
* \section sec_ref References
*
* -# A. Badal and A. Badano, Accelerating Monte Carlo simulations of photon transport in a voxelized geometry using a massively parallel Graphics Processing Unit, Med. Phys. 36, p. 4878-4880 (2009)
* -# A. Badal and A. Badano, Monte Carlo Simulation of X-Ray Imaging Using a Graphics Processing Unit, IEEE NSC-MIC, Conference Record , HP3–1, p. 4081-4084 (2009)
* -# A. Badal, I. Kyprianou, D. Sharma and A. Badano, Fast cardiac CT simulation using a Graphics Processing Unit-accelerated Monte Carlo code, Proc. SPIE Medical Imaging Conference 7622, p. 762231 (2010)
* -# A. Badal and A. Badano, Fast Simulation of Radiographic Images Using a Monte Carlo X-Ray Transport Algorithm Implemented in CUDA, Chapter 50 of GPU Computing Gems (Emerald Edition), p. 813-830, editor Wen-mei W. Hwu, publisher Morgan Kaufmann (Elsevier), Burlington MA, 2010
* -# F. Salvat, J. M. Fernandez-Varea and J. Sempau, PENELOPE – A code system for Monte Carlo simulation of electron and photon transport, NEA-OECD, Issy-les-Moulineaux, available at www.nea.fr/html/dbprog/peneloperef.html (2006)
* -# NVIDIA Corporation, NVIDIA CUDA(TM) Programming Guide, Technical Report available at www.nvidia.com/cuda (2011)
* -# A. Badal and J. Sempau, A package of Linux scripts for the parallelization of Monte Carlo simulations, Comput. Phys. Commun. 175 (6), p. 440-450 (2006)
*
*
*
* @file MC-GPU_v1.5.cu
* @author Andreu Badal ([email protected])
* @date 2012/12/12
* -- MC-GPU v.1.4_DBT: 2016/02/02
* -- MC-GPU v.1.3: 2012/12/12
* -- MC-GPU v.1.2: 2011/10/25
* -- MC-GPU v.1.1: 2010/06/25
* -- MC-GPU v.1.0: 2009/03/17
*/
////////////////////////////////////////////////////////////////////////////////////////
// *** Include header file with the structures and functions declarations
#include <MC-GPU_v1.5b.h>
// *** Include the computing kernel:
#include <MC-GPU_kernel_v1.5b.cu>
////////////////////////////////////////////////////////////////////////////////
//! Main program of MC-GPU: initialize the simulation enviroment, launch the GPU
//! kernels that perform the x ray transport and report the final results.
//! This function reads the description of the simulation from an external file
//! given in the command line. This input file defines the number of particles to
//! simulate, the characteristics of the x-ray source and the detector, the number
//! and spacing of the projections (if simulating a CT), the location of the
//! material files containing the interaction mean free paths, and the location
//! of the voxelized geometry file.
//!
//! @author Andreu Badal
//!
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// -- Start time counter:
time_t current_time = time(NULL); // Get current time (in seconds)
clock_t clock_start, clock_end, clock_start_beginning; // (requires standard header <time.h>)
clock_start = clock(); // Get current clock counter
clock_start_beginning = clock_start;
#ifdef USING_MPI
// -- Using MPI to access multiple GPUs to simulate the x-ray projection image:
int myID = -88, numprocs = -99, return_reduce = -1;
MPI_Init(&argc, &argv); // Init MPI and get the current thread ID
MPI_Comm_rank(MPI_COMM_WORLD, &myID);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
char MPI_processor_name[81];
int resultlen = -1;
MPI_Get_processor_name(MPI_processor_name, &resultlen);
char* char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located betwen the characters 11 and 19.
printf(" >> MPI run (myId=%d, numprocs=%d) on processor \"%s\" (time: %s) <<\n", myID, numprocs, MPI_processor_name, &char_time[11]);
fflush(stdout); // Clear the screen output buffer
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
MASTER_THREAD printf(" -- Time spent initializing the MPI world (MPI_Barrier): %.3f s\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
#else
int myID = 0, numprocs = 1; // Only one CPU thread used when MPI is not activated (multiple projections will be simulated sequentially).
#endif
MASTER_THREAD
{
printf("\n\n *****************************************************************************\n");
printf( " *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
printf( " *** ***\n");
printf( " *** A. Badal and A. Badano, \"Accelerating Monte Carlo simulations of *** \n");
printf( " *** photon transport in a voxelized geometry using a massively parallel *** \n");
printf( " *** Graphics Processing Unit\", Medical Physics 36, pp. 4878–4880 (2009) ***\n");
printf( " *** ***\n");
printf( " *** Andreu Badal ([email protected]) ***\n");
printf( " *****************************************************************************\n\n");
printf("****** Code execution started on: %s\n\n", ctime(¤t_time));
fflush(stdout);
}
#ifdef USING_CUDA
// The "MASTER_THREAD" macro prints the messages just once when using MPI threads (it has no effect if MPI is not used): MASTER_THREAD == "if(0==myID)"
MASTER_THREAD printf ("\n *** CUDA SIMULATION IN THE GPU ***\n");
#else
MASTER_THREAD printf ("\n *** SIMULATION IN THE CPU ***\n");
#endif
MASTER_THREAD printf("\n -- INITIALIZATION phase:\n");
MASTER_THREAD fflush(stdout); // Clear the screen output buffer for the master thread
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Declare the arrays and structures that will contain the simulation data:
struct voxel_struct voxel_data; // Define the geometric constants of the voxel file
struct detector_struct detector_data[MAX_NUM_PROJECTIONS+1]; // Define an x ray detector (for each projection)
struct source_struct source_data[MAX_NUM_PROJECTIONS+1]; // Define the particles source (for each projection)
struct source_energy_struct source_energy_data; // Define the source energy spectrum
struct linear_interp mfp_table_data; // Constant data for the linear interpolation
struct compton_struct compton_table; // Structure containing Compton sampling data (to be copied to CONSTANT memory)
struct rayleigh_struct rayleigh_table; // Structure containing Rayleigh sampling data (to be copied to CONSTANT memory)
// float2 *voxel_mat_dens = NULL;
// char *voxel_mat_dens = NULL; // Pointer where voxels array will be allocated //!!FixedDensity_DBT!! Density taken from function "density_LOT"
int *voxel_mat_dens = NULL; //!!bitree!! v1.5 --> using an integer value to be able to store both the material number (positive) or pointers to the binary tree branches (negative)
long long int voxel_mat_dens_bytes = 0; // Size (in bytes) of the voxels array (using unsigned int to allocate up to 4.2GBytes)
char *bitree = NULL; // Array storing the binary tree structures for each non-uniform low resolution voxel //!!bitree!! v1.5b
unsigned int bitree_bytes = 0; // Size (in bytes) of the bitree array
int *voxel_geometry_LowRes = NULL; // Array to temporary store the low resolution version of the geometry when the binary tree is created
unsigned int voxel_geometry_LowRes_bytes = 0;
float density_max[MAX_MATERIALS];
float density_nominal[MAX_MATERIALS];
unsigned long long int *image = NULL; // Poiter where image array will be allocated
int image_bytes = -1; // Size of the image array
int mfp_table_bytes = -1, mfp_Woodcock_table_bytes = -1; // Size of the table arrays
float2 *mfp_Woodcock_table = NULL; // Linear interpolation data for the Woodcock mean free path [cm]
float3 *mfp_table_a = NULL, *mfp_table_b = NULL; // Linear interpolation data for 3 different interactions:
// (1) inverse total mean free path (divided by density, cm^2/g)
// (2) inverse Compton mean free path (divided by density, cm^2/g)
// (3) inverse Rayleigh mean free path (divided by density, cm^2/g)
short int dose_ROI_x_min, dose_ROI_x_max, dose_ROI_y_min, dose_ROI_y_max, dose_ROI_z_min, dose_ROI_z_max; // Coordinates of the dose region of interest (ROI)
ulonglong2 *voxels_Edep = NULL; // Poiter where the voxel energy deposition array will be allocated
int voxels_Edep_bytes = 0; // Size of the voxel Edep array
ulonglong2 materials_dose[MAX_MATERIALS]; // Array for tally_materials_dose. !!tally_materials_dose!!
int kk;
for(kk=0;kk<MAX_MATERIALS;kk++)
{
materials_dose[kk].x = 0; // Initializing data !!tally_materials_dose!!
materials_dose[kk].y = 0;
density_nominal[kk] =-1.0f;
}
clock_t clock_kernel; // Using only cpu timers after CUDA 5.0
double time_elapsed_MC_loop = 0.0, time_total_MC_simulation = 0.0, time_total_MC_init_report = 0.0;
unsigned long long int total_histories;
int histories_per_thread, seed_input, num_threads_per_block, gpu_id, num_projections;
int flag_material_dose=-2;
bool flag_simulateMammoAfterDBT=false, flag_detectorFixed=false; // !!DBTv1.4!!;
double SRotAxisD=-1.0, translation_helical=0.0;
char file_name_voxels[250], file_name_materials[MAX_MATERIALS][250], file_name_output[250], file_dose_output[250], file_name_espc[250];
// *** Read the input file given in the command line and return the significant data:
read_input(argc, argv, myID, &total_histories, &seed_input, &gpu_id, &num_threads_per_block, &histories_per_thread, detector_data, &image, &image_bytes, source_data, &source_energy_data, &voxel_data, file_name_voxels, file_name_materials, file_name_output, file_name_espc, &num_projections, &voxels_Edep, &voxels_Edep_bytes, file_dose_output, &dose_ROI_x_min, &dose_ROI_x_max, &dose_ROI_y_min, &dose_ROI_y_max, &dose_ROI_z_min, &dose_ROI_z_max, &SRotAxisD, &translation_helical, &flag_material_dose, &flag_simulateMammoAfterDBT, &flag_detectorFixed);
// *** Read the energy spectrum and initialize its sampling with the Walker aliasing method:
MASTER_THREAD printf(" -- Reading the energy spectrum and initializing the Walker aliasing sampling algorithm.\n");
float mean_energy_spectrum = 0.0f;
init_energy_spectrum(file_name_espc, &source_energy_data, &mean_energy_spectrum);
// *** Output some of the data read to make sure everything was correctly read:
MASTER_THREAD
{
printf("\n -- Data read from the input file:\n");
if (total_histories < (unsigned long long int)(100000))
printf(" simulation time = %lld s\n", total_histories);
else
printf(" x-ray tracks to simulate = %lld\n", total_histories);
printf(" initial random seed = %d\n", seed_input);
double phi0 = ((double)source_data[0].D_phi)*RAD2DEG;
double theta0 = 2.0*(90.0 - acos(((double)source_data[0].cos_theta_low))*RAD2DEG);
if (source_data[0].flag_halfConeX)
theta0 = 0.5*theta0;
printf(" NOTE: sampling only upper half of collimated cone beam, with beam offset to edge of the image (eg, mammo).\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
printf(" azimuthal (phi), polar apertures = %.6f , %.6f degrees\n", phi0, theta0);
printf(" (max_height_at_y1cm = %f , max_width_at_y1cm = %f)\n", source_data[0].max_height_at_y1cm, source_data[0].max_width_at_y1cm); // !!DBTv1.4!! !!DeBuG!!
printf(" source direction = (%f, %f, %f)\n", source_data[0].direction.x, source_data[0].direction.y, source_data[0].direction.z);
printf(" focal spot position = (%f, %f, %f)\n", source_data[0].position.x, source_data[0].position.y, source_data[0].position.z);
printf(" focal spot Gaussian blur FWHM = %f (3D Gaussian dist. cropped at 2*sigma)\n", source_data[0].focal_spot_FWHM); // !!DBTv1.4!!
if (num_projections!=1 && flag_simulateMammoAfterDBT==true)
printf(" focal spot rotation blur = %f degrees (disabled for the first single projection at 0 deg)\n", source_data[0].rotation_blur*RAD2DEG); // !!DBTv1.5!!
else
printf(" focal spot rotation blur = %f degrees\n", source_data[0].rotation_blur*RAD2DEG); // !!DBTv1.5!!
printf(" source-detector distance = %f cm\n", detector_data[0].sdd);
printf(" detector center position = (%f, %f, %f)\n", detector_data[0].center.x, detector_data[0].center.y, detector_data[0].center.z);
printf(" image offset from beam at center = (%f, %f)\n", detector_data[0].offset.x, detector_data[0].offset.y); // !!DBTv1.4!!
printf(" detector layer thickness = %f cm (=%.2f micron)\n", detector_data[0].scintillator_thickness, 1.0e4f*detector_data[0].scintillator_thickness); // !!DBTv1.4!!
printf(" detector material average MFP = %f cm\n", detector_data[0].scintillator_MFP);
printf(" detector material K-edge energy = %f eV\n", detector_data[0].kedge_energy);
printf(" fluorescence energy and yield = %f eV , %f\n", detector_data[0].fluorescence_energy, detector_data[0].fluorescence_yield);
printf(" MFP at fluorescence energy = %f cm\n", detector_data[0].fluorescence_MFP);
if (detector_data[0].gain_W>0.001f)
{
printf(" detector gain and Swank factor = %f eV/detected_charge, %f (%f relative std_dev)\n", detector_data[0].gain_W, 1.0f/(1.0f+detector_data[0].Swank_rel_std*detector_data[0].Swank_rel_std), detector_data[0].Swank_rel_std); // Swank_factor = mean^2/(mean^2 + std_dev^2) --> (std_dev/mean) = sqrt(1/Swank_factor - 1)
printf(" electronic noise per pixel = %f electrons\n", detector_data[0].electronic_noise);
}
printf(" detector cover thickness = %f cm\n", detector_data[0].cover_thickness); // !!DBTv1.5!!
printf(" cover average mean free path = %f cm\n", detector_data[0].cover_MFP);
if (detector_data[0].grid_freq > 0.0f)
{
printf(" Antiscatter grid ratio = %f\n", fabsf(detector_data[0].grid_ratio)); // !!DBTv1.5!!
printf(" Antiscatter grid frequency = %f lines per cm\n", detector_data[0].grid_freq);
printf(" Antiscatter grid strip thickness = %f cm (=%.2f micron)\n", detector_data[0].grid_strip_thickness, 1.0e4*detector_data[0].grid_strip_thickness);
float h = fabsf(detector_data[0].grid_ratio)*(1.0f/detector_data[0].grid_freq - detector_data[0].grid_strip_thickness); // Height of the grid, according to input grid ratio, freq, and strip thickness
printf(" Computed antiscatter grid height = %f cm (=%.2f micron)\n", h, 1.0e4*h);
printf(" strips average mean free path = %f cm\n", 1.0f/detector_data[0].grid_strip_mu);
printf(" interspace average mean free path = %f cm\n", 1.0f/detector_data[0].grid_interspace_mu);
if (detector_data[0].grid_ratio<0.0f)
printf(" Antiscatter grid orientation = 0 --> 1D collimated grid with strips perpendicular to lateral direction (mammo style)\n");
else
printf(" Antiscatter grid orientation = 1 --> 1D collimated grid with strips parallel to lateral direction (DBT style)\n");
}
else
printf("\n Antiscatter grid: DISABLED!\n\n"); // !!DBTv1.5!!
printf(" number of pixels image = %dx%d = %d\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y, detector_data[0].total_num_pixels);
printf(" pixel size = %.5fx%.5f cm\n", 1.0f/detector_data[0].inv_pixel_size_X, 1.0f/detector_data[0].inv_pixel_size_Z);
printf(" detector size = %.5fx%.5f cm\n", detector_data[0].width_X, detector_data[0].height_Z);
printf(" number of projections = %d\n", num_projections);
if (num_projections!=1 || source_data[0].rotation_blur>0.000001f) // Report data if blur is used bc the rotation is around the source-rotation axis
{
printf(" source-rotation axis-distance = %lf cm\n", SRotAxisD);
printf(" angle between projections = %lf\n", source_data[0].angle_per_projection*RAD2DEG);
printf(" initial angle offset = %lf\n", source_data[0].angle_offset*RAD2DEG); // !!DBTv1.4!!
printf(" rotation point = (%f, %f, %f)\n", source_data[0].rotation_point.x, source_data[0].rotation_point.y, source_data[0].rotation_point.z); // !!DBTv1.4!!
printf(" axis of rotation = (%f, %f, %f)\n", source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z); // !!DBTv1.4!!
printf(" translation between proj = %lf\n", translation_helical);
}
printf(" output image file = %s\n", file_name_output);
printf(" input voxel file = %s\n", file_name_voxels);
printf(" voxel geometry offset = (%f, %f, %f) cm\n", voxel_data.offset.x, voxel_data.offset.y, voxel_data.offset.z); // !!DBTv1.4!!
printf(" size coarse voxels for binary trees = %d x %d x %d\n", (int)voxel_data.num_voxels_coarse.x, (int)voxel_data.num_voxels_coarse.y, (int)voxel_data.num_voxels_coarse.z); // !!bitree!! v1.5b
if (dose_ROI_x_max>-1)
{
printf(" output dose file = %s\n", file_dose_output);
printf(" input region of interest dose = X[%d,%d], Y[%d,%d], Z[%d,%d]\n", dose_ROI_x_min+1, dose_ROI_x_max+1, dose_ROI_y_min+1, dose_ROI_y_max+1, dose_ROI_z_min+1, dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
}
printf("\n energy spectrum file = %s\n", file_name_espc);
printf( " number of energy bins read = %d\n", source_energy_data.num_bins_espc);
printf( " minimum, maximum energies = %.3f, %.3f keV\n", 0.001f*source_energy_data.espc[0], 0.001f*source_energy_data.espc[source_energy_data.num_bins_espc]);
printf( " mean energy spectrum = %.3f keV\n\n", 0.001f*mean_energy_spectrum);
fflush(stdout);
}
// *** Set the detectors and sources for the CT trajectory (if needed, ie, for more than one projection):
if (num_projections != 1)
{
set_CT_trajectory(myID, num_projections, source_data, detector_data, translation_helical, flag_detectorFixed);
}
fflush(stdout);
double mass_materials[MAX_MATERIALS];
// !!bitree!! If the binary tree is used, read the geometry only with the master thread, and then broadcast the new data:
// if the tree is not used, every thread reads the input geometry at the same time.
if (0==myID || (voxel_data.num_voxels_coarse.x)==0)
{
// *** Read the voxel data and allocate the density map matrix. Return the maximum density:
if (voxel_data.num_voxels.x<1)
{
// -- Read ASCII format geometry: geometric parameters will be read from the header file !!DBTv1.4!!
load_voxels(myID, file_name_voxels, density_max, &voxel_data, &voxel_mat_dens, &voxel_mat_dens_bytes, &dose_ROI_x_max, &dose_ROI_y_max, &dose_ROI_z_max);
}
else
{
// -- Read binary RAW format geometry: geometric parameters given in input file !!DBTv1.4!!
load_voxels_binary_VICTRE(myID, file_name_voxels, density_max, &voxel_data, &voxel_mat_dens, &voxel_mat_dens_bytes, &dose_ROI_x_max, &dose_ROI_y_max, &dose_ROI_z_max); //!!DBT!! // !!DBTv1.4!!
}
// -- Pre-compute the total mass of each material present in the voxel phantom (to be used in "report_materials_dose"):
double voxel_volume = 1.0 / ( ((double)voxel_data.inv_voxel_size.x) * ((double)voxel_data.inv_voxel_size.y) * ((double)voxel_data.inv_voxel_size.z) );
for(kk=0; kk<MAX_MATERIALS; kk++)
mass_materials[kk] = 0.0;
long long int llk;
for(llk=0; llk<((long long int)voxel_data.num_voxels.x*(long long int)voxel_data.num_voxels.y*(long long int)voxel_data.num_voxels.z); llk++) // For each voxel in the geometry
{
// mass_materials[((int)voxel_mat_dens[llk].x)-1] += ((double)voxel_mat_dens[llk].y)*voxel_volume; // Add material mass = density*volume
mass_materials[((int)voxel_mat_dens[llk])] += ((double)density_LUT((int)voxel_mat_dens[llk]))*voxel_volume; // Add material mass = density*volume (first material==0) //!!FixedDensity_DBT!! Density taken from function "density_LOT"
}
// ** Create the low resolution version of the phantom and the binary tree structures, if requested in the input file and dose dep tally disabled: //!!bitree!! v1.5b
if ((voxel_data.num_voxels_coarse.x)!=0)
{
if (dose_ROI_x_max>0)
{
MASTER_THREAD printf("\n\n !!ERROR!! Sorry, the voxel dose deposition tally cannot be used when the binary tree is active. Please, disable the binary tree.\n\n");
exit(-1);
}
MASTER_THREAD printf("\n !!bitree!! Creating a binary tree structure to minimize memory use.\n"); // !!bitree!! v1.5b
#ifdef DISABLE_CANON
MASTER_THREAD printf(" !!bitree!! Tree branch canonicalization was disabled by defining the pre-processor parameter \"DISABLE_CANON\"\n"); // !!bitree!! v1.5b !!DeBuG!!
#endif
create_bitree(myID, &voxel_data, voxel_mat_dens, &bitree, &bitree_bytes, &voxel_geometry_LowRes, &voxel_geometry_LowRes_bytes); //!!bitree!! v1.5b
MASTER_THREAD printf(" >> RAM memory allocation: original voxelized geometry = %f MBytes; low resolution voxelized geometry = %f MBytes;\n", voxel_mat_dens_bytes/(1024.f*1024.f), voxel_geometry_LowRes_bytes/(1024.f*1024.f));
MASTER_THREAD printf(" binary tree = %f MBytes; image vector = %f MBytes; data structures = %f Mbytes\n", bitree_bytes/(1024.f*1024.f), image_bytes/(1024.f*1024.f), (sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp)+2*mfp_table_bytes+sizeof(struct rayleigh_struct)+sizeof(struct compton_struct))/(1024.f*1024.f));
MASTER_THREAD printf(" (reduction in memory use with bitree: [low res voxels + binary tree]-[high res voxels] = %f MBytes = %.3f%%)\n", (voxel_geometry_LowRes_bytes+bitree_bytes-voxel_mat_dens_bytes)/(1024.f*1024.f), 100.f*(voxel_geometry_LowRes_bytes+bitree_bytes-voxel_mat_dens_bytes)/voxel_mat_dens_bytes);
// -- Replace the high resolution version of the geometry by the low resolution version: !!DeBuG!! voxel dose tally can't be used now!!
free(voxel_mat_dens); //!!bitree!! v1.5b
voxel_mat_dens = voxel_geometry_LowRes; //!!bitree!! v1.5b
voxel_mat_dens_bytes = voxel_geometry_LowRes_bytes; //!!bitree!! v1.5b
}
else
{
MASTER_THREAD printf("\n !!bitree!! Binary tree structure disabled: standard voxelized geometry in use.\n\n"); // !!bitree!! v1.5b
MASTER_THREAD printf(" >> RAM memory allocation: voxelized geometry = %f MBytes; image vector = %f MBytes; data structures = %f Mbytes\n", voxel_mat_dens_bytes/(1024.f*1024.f), image_bytes/(1024.f*1024.f), (sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp)+2*mfp_table_bytes+sizeof(struct rayleigh_struct)+sizeof(struct compton_struct))/(1024.f*1024.f));
}
}
fflush(stdout);
// !!bitree!! If the binary tree is used, broadcast the tree data and all auxiliary data from master to every other thread: !!DeBuG!!
if (numprocs>1 && (voxel_data.num_voxels_coarse.x)!=0)
{
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
// Send all the geometric adata that has been read or changed by root node:
MPI_Bcast(&voxel_data.num_voxels.x, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.num_voxels.y, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.num_voxels.z, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&voxel_data.size_bbox.x, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.size_bbox.y, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.size_bbox.z, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
voxel_data.voxel_size_HiRes.x = voxel_data.voxel_size.x; voxel_data.voxel_size_HiRes.y = voxel_data.voxel_size.y; voxel_data.voxel_size_HiRes.z = voxel_data.voxel_size.z; // Save the original high resolution voxel size
MPI_Bcast(&voxel_data.voxel_size.x, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.voxel_size.y, 1, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Bcast(&voxel_data.voxel_size.z, 1, MPI_FLOAT, 0, MPI_COMM_WORLD);
voxel_data.inv_voxel_size.x = 1.0/voxel_data.voxel_size.x; voxel_data.inv_voxel_size.y = 1.0/voxel_data.voxel_size.y; voxel_data.inv_voxel_size.z = 1.0/voxel_data.voxel_size.z;
MPI_Bcast(density_max, MAX_MATERIALS, MPI_FLOAT, 0, MPI_COMM_WORLD);
MPI_Bcast(mass_materials, MAX_MATERIALS, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Allocate memory (except root) and transmit voxel+binary tree links data:
MPI_Bcast(&voxel_geometry_LowRes_bytes, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast(&bitree_bytes, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
if(0!=myID)
{
voxel_mat_dens_bytes = voxel_geometry_LowRes_bytes;
voxel_mat_dens = (int*) malloc(voxel_mat_dens_bytes); // Allocate voxels (low resolution)
bitree = (char*) malloc(bitree_bytes); // Allocate binary tree elements
}
MPI_Bcast(voxel_mat_dens, ((unsigned long long int)voxel_data.num_voxels.x)*(voxel_data.num_voxels.y*voxel_data.num_voxels.z), MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(bitree, bitree_bytes, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
}
// *** Read the material mean free paths and set the interaction table in a "linear_interp" structure:
load_material(myID, file_name_materials, density_max, density_nominal, &mfp_table_data, &mfp_Woodcock_table, &mfp_Woodcock_table_bytes, &mfp_table_a, &mfp_table_b, &mfp_table_bytes, &rayleigh_table, &compton_table);
// -- Check that the input material tables and the x-ray source are consistent:
if ( (source_energy_data.espc[0] < mfp_table_data.e0) || (source_energy_data.espc[source_energy_data.num_bins_espc] > (mfp_table_data.e0 + (mfp_table_data.num_values-1)/mfp_table_data.ide)) )
{
MASTER_THREAD
{
printf("\n\n\n !!ERROR!! The input x-ray source energy spectrum minimum (%.3f eV) and maximum (%.3f eV) energy values\n", source_energy_data.espc[0], source_energy_data.espc[source_energy_data.num_bins_espc]);
printf( " are outside the tabulated energy interval for the material properties tables (from %.3f to %.3f eV)!!\n", mfp_table_data.e0, (mfp_table_data.e0+(mfp_table_data.num_values-1)/mfp_table_data.ide));
printf( " Please, modify the input energy spectra to fit the tabulated limits or create new tables.\n\n");
}
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
// *** Initialize the GPU using the NVIDIA CUDA libraries, if USING_CUDA parameter defined at compile time:
#ifdef USING_CUDA
// -- Declare the pointers to the device global memory, when using the GPU:
// float2 *voxel_mat_dens_device = NULL,
// *mfp_Woodcock_table_device = NULL;
// char *voxel_mat_dens_device = NULL; //!!FixedDensity_DBT!! Allocate material vector as char
int *voxel_mat_dens_device = NULL; //!!bitree!! v1.5 --> using an integer value to be able to store both the material number (positive) or pointers to the binary tree branches (negative)
char *bitree_device = NULL; //!!bitree!! v1.5b
float2 *mfp_Woodcock_table_device = NULL; //!!FixedDensity_DBT!!
float3 *mfp_table_a_device = NULL,
*mfp_table_b_device = NULL;
unsigned long long int *image_device = NULL;
struct rayleigh_struct *rayleigh_table_device = NULL;
struct compton_struct *compton_table_device = NULL;
ulonglong2 *voxels_Edep_device = NULL;
struct detector_struct *detector_data_device = NULL;
struct source_struct *source_data_device = NULL;
ulonglong2 *materials_dose_device = NULL; // !!tally_materials_dose!!
int* seed_input_device = NULL; // Store latest random seed used in GPU in global memory to continue random sequence in consecutive projections. !!DBTv1.4!!
// -- Sets the CUDA enabled GPU that will be used in the simulation, and allocate and copies the simulation data in the GPU global and constant memories.
init_CUDA_device(&gpu_id, myID, numprocs, &voxel_data, source_data, &source_energy_data, detector_data, &mfp_table_data, /*Variables GPU constant memory*/
voxel_mat_dens, &voxel_mat_dens_device, voxel_mat_dens_bytes, /*Variables GPU global memory*/
bitree, &bitree_device, bitree_bytes, //!!bitree!! v1.5b
image, &image_device, image_bytes,
mfp_Woodcock_table, &mfp_Woodcock_table_device, mfp_Woodcock_table_bytes,
mfp_table_a, mfp_table_b, &mfp_table_a_device, &mfp_table_b_device, mfp_table_bytes,
&rayleigh_table, &rayleigh_table_device,
&compton_table, &compton_table_device, &detector_data_device, &source_data_device,
voxels_Edep, &voxels_Edep_device, voxels_Edep_bytes, &dose_ROI_x_min, &dose_ROI_x_max, &dose_ROI_y_min, &dose_ROI_y_max, &dose_ROI_z_min, &dose_ROI_z_max,
materials_dose, &materials_dose_device, flag_material_dose, &seed_input_device, &seed_input, (num_projections+1));
// !!DBTv1.4!! Allocate space for one extra projection (num_projections+1) for case flag_simulateMammoAfterDBT==true !!DBTv1.4!!
// -- Constant data already moved to the GPU: clean up unnecessary RAM memory
free(mfp_Woodcock_table);
free(mfp_table_a);
free(mfp_table_b);
if (0!=myID) // Keep the geometry data for the MPI root because the voxel densities are still needed to compute the final doses
free(voxel_mat_dens);
#endif
MASTER_THREAD
{
current_time=time(NULL);
printf("\n -- INITIALIZATION finished: elapsed time = %.3f s. \n\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
}
#ifdef USING_MPI
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads before starting the MC phase.
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
MASTER_THREAD
{
current_time=time(NULL);
printf("\n\n -- MONTE CARLO LOOP phase. Time: %s\n\n", ctime(¤t_time));
fflush(stdout);
}
// -- A number of histories smaller than 24 hours in sec (3600*24=86400) means that the user wants to simulate for the input number of seconds in each GPU, not a fix number of histories:
unsigned long long int total_histories_INPUT = total_histories; // Save the original input values to be re-used for multiple projections
int doing_speed_test = -1, simulating_by_time = 0; // 0==false
if (total_histories < (unsigned long long int)(95000))
simulating_by_time = 1; // 1=true
int num_blocks_speed_test = 0;
unsigned long long int histories_speed_test = (unsigned long long int)0, total_histories_speed_test = (unsigned long long int)0;
float node_speed = -1.0f, total_speed = 1.0f;
double current_angle = -999;
int num_p; // == current projection number
// *************************************************************************************
// *** CT simulation loop (including speed test, if simulating by time or multi-GPU) ***
// *************************************************************************************
int num_projections_loop = num_projections;
if (num_projections>1)
{
num_projections_loop++; // Add an extra projection because [0] always corresponds to the 0 deg projection (first tomographic image starts at [1]) !!DBTv1.4!!
}
for (num_p=0; num_p<num_projections_loop; num_p++)
{
// --Re-load the num histories for each new projection !!DBTv1.4!!
total_histories = total_histories_INPUT;
// --Skip the initial 0 deg projection if we are simulating a tomographic scan and we don't want a separate projection with the full dose: !!DBTv1.4!!
if (0==num_p && num_projections>1 && flag_simulateMammoAfterDBT==false)
continue;
if (flag_simulateMammoAfterDBT && 0==num_p)
{
// -- !!DBT!! Simulate the first 0 deg projection (mammo) with almost as many histories (SCALE_MAMMO_DBT factor) as the whole tomographic scan to follow: !!DBTv1.4!!
total_histories = total_histories_INPUT * num_projections * SCALE_MAMMO_DBT; //!!DeBuG!! !!DBTv1.4!! Scaling dose a factor SCALE_MAMMO_DBT (eg, factor 2/3 for 1 mGy mammo for 1.5 mGy DBT)
MASTER_THREAD
{
printf("\n\n !!DBT!! Simulating first a 0 degree projection with %.4f times the number of histories as the complete scan with %d projections = %lld histories\n", SCALE_MAMMO_DBT, num_projections, total_histories);
printf( " Afterwards, simulate the tomo acquisition starting at most negative angle and ending at most positive angle.\n"); // !!DBT!! !!DBTv1.4!!
printf( " If defined, motion blur is disabled and anti-scatter grid enabled only for the single projection.\n\n");
}
}
else if (flag_simulateMammoAfterDBT && 1==num_p)
MASTER_THREAD printf("\n\n !!DBT!! After the first full simulation (eg, mammo), simulate a DBT acquisition (starting at neg angle) with the input number histories per projections.\n\n");
if (0==num_p)
current_angle = 0.0;
else
current_angle = source_data[0].angle_offset + (num_p-1) * source_data[0].angle_per_projection;
MASTER_THREAD
if (num_projections!=1)
if (flag_simulateMammoAfterDBT && 0==num_p)
printf("\n\n\n\n << Simulating a 0 degree projection (mammography) with %d * %f as many histories as each tomographic projection >>\n\n", num_projections, SCALE_MAMMO_DBT);
else
printf("\n\n\n\n << Simulating tomographic projection %d of %d >> Angle: %lf degrees.\n\n", num_p, num_projections, current_angle*RAD2DEG);
clock_start = clock(); // Start the CPU clock
#ifdef USING_CUDA
// *** Simulate in the GPUs the input amount of time or amount of particles:
// -- Estimate GPU speed to use a total simulation time or multiple GPUs:
if ( simulating_by_time==0 && // Simulating a fixed number of particles, not a fixed time (so performing the speed test only once)
node_speed>0.0f && // Speed test already performed for a previous projection in this simulation (node_speed and total_speed variables set)
numprocs>1) // Using multiple GPUs (ie, multiple MPI threads)
{
// -- Simulating successive projections after the first one with a fix number of particles, with multiple MPI threads: re-use the speed test results from the first projection image:
total_histories = (unsigned long long int)(0.5 + ((double)total_histories) * (((double)node_speed)/total_speed));
doing_speed_test = 0; // No speed test for this projection.
}
else if ( simulating_by_time==1 || numprocs>1)
{
// -- Simulating with a time limit OR multiple MPI threads for the first time (num_p==0): run a speed test to calculate the speed of the current GPU and distribute the number of particles to the multiple GPUs or estimate the total number of particles required to run the input amount of time:
// Note that this ELSE IF block will be skipped if we are using a single MPI thread and a fix number of particles.
doing_speed_test = 1; // Remember that we are performing the speed test to make sure we add the test histories to the total before the tally reports.
if (node_speed<0.0f) // Speed test not performed before (first projection being simulated): set num_blocks_speed_test and histories_speed_test.
{
num_blocks_speed_test = guestimate_GPU_performance(gpu_id); // Guestimating a good number of blocks to estimate the speed of different generations of GPUs. Slower GPUs will simulate less particles and hopefully the fastest GPUs will not have to wait much.
}
histories_speed_test = (unsigned long long int)(num_blocks_speed_test*num_threads_per_block)*(unsigned long long int)(histories_per_thread);
dim3 blocks_speed_test(num_blocks_speed_test, 1);
dim3 threads_speed_test(num_threads_per_block, 1);
#ifdef USING_MPI
// -- Init the current random number generator seed to avoid overlapping sequences with other MPI threads:
if (simulating_by_time == 1)
// Simulating by time: set an arbitrary huge number of particles to skip.
update_seed_PRNG((myID + num_p*numprocs), (unsigned long long int)(123456789012), &seed_input); // Set the random number seed far from any other MPI thread (myID) and away from the seeds used in the previous projections (num_p*numprocs).
else
// Simulating by histories (default):
update_seed_PRNG(myID, total_histories_INPUT*num_projections, &seed_input); // Init the random seed for each MPI thread as far away from the previous thread as if all "total_histories*num_projections" histories were simulated by each thread --> warranty that each thread has uncorrelated random sequence of random values (at least for the first seed of RANECU). !!DBTv1.4!! !!DeBuG!! !!DeBuG!!
checkCudaErrors(cudaMemcpy(seed_input_device, &seed_input, sizeof(int), cudaMemcpyHostToDevice)); // Upload initial seed value to GPU memory. !!DBTv1.4!!
printf(" ==> CUDA (MPI process #%d in \"%s\"): estimate GPU speed executing %d blocks of %d threads, %d histories per thread: %lld histories in total (random seed=%d).\n", myID, MPI_processor_name, num_blocks_speed_test, num_threads_per_block, histories_per_thread, histories_speed_test, seed_input);
#else
printf(" ==> CUDA: Estimating the GPU speed executing %d blocks of %d threads, %d histories per thread: %lld histories in total.\n", num_blocks_speed_test, num_threads_per_block, histories_per_thread, histories_speed_test);
#endif
fflush(stdout);
clock_kernel = clock();
// -- Launch Monte Carlo simulation kernel for the speed test:
track_particles<<<blocks_speed_test,threads_speed_test>>>(histories_per_thread, (short int)num_p, seed_input_device, image_device, voxels_Edep_device, voxel_mat_dens_device, bitree_device, mfp_Woodcock_table_device, mfp_table_a_device, mfp_table_b_device, rayleigh_table_device, compton_table_device, detector_data_device, source_data_device, materials_dose_device);
#ifdef USING_MPI
// Find out the total number of histories simulated in the speed test by all the GPUs. Note that this MPI call will be executed in parallel with the GPU kernel because it is located before the cudaThreadSynchronize command!
return_reduce = MPI_Allreduce(&histories_speed_test, &total_histories_speed_test, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error reducing (MPI_Allreduce) the total number of histories in the speed test test??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
else
#else
total_histories_speed_test = histories_speed_test;
#endif
fflush(stdout);
cudaThreadSynchronize(); // Force the runtime to wait until GPU kernel has completed
getLastCudaError("\n\n !!Kernel execution failed while simulating particle tracks!! "); // Check if the CUDA function returned any error
float speed_test_time = float(clock()-clock_kernel)/CLOCKS_PER_SEC;
node_speed = (float) (((double)histories_speed_test)/speed_test_time);
#ifdef USING_MPI
printf(" (MPI process #%d): Estimated GPU speed = %lld hist / %.4f s = %.3f hist/s\n", myID, histories_speed_test, speed_test_time, node_speed);
#else
printf(" Estimated GPU speed = %lld hist / %.3f s = %.3f hist/s\n", histories_speed_test, speed_test_time, node_speed);
#endif
// !!DBTv1.4!! !!DeBuG!! No need to update the seed in the main program bc each GPU continues its series!
// // -- Init random number generator seed to avoid repeating the random numbers used in the speed test:
// update_seed_PRNG(1, histories_speed_test, &seed_input);
if (simulating_by_time==1)
{
// -- Set number of histories for each GPU when simulating by time:
if (total_histories > speed_test_time)
total_histories = (total_histories - speed_test_time)*node_speed; // Calculate the total number of remaining histories by "GPU speed" * "remaining time"
else
total_histories = 1; // Enough particles simulated already, simulate just one more history (block) and report (kernel call would fail if total_histories < or == 0).
}
else
{
#ifdef USING_MPI
// -- Simulating a fix number of histories divided between all GPUs (execution time variable):
// Compute the fraction of the total speed that accounts for the current MPI thread:
return_reduce = MPI_Allreduce(&node_speed, &total_speed, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD); // Sum all the times and send result to all processes
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error reducing (MPI_Allreduce) the speed test results??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
else
MASTER_THREAD
{
printf("\n -- Total speed for all GPUs (MPI_Allreduce) = %.3f hist/s; total histories simulated in the speed test (MPI_Allreduce) = %lld.\n", total_speed, total_histories_speed_test);
printf(" The master thread will simulate %.2f%% of the x rays in the simulation.\n", 100.0f*node_speed/total_speed);
}
#else
total_speed = node_speed;
#endif
// - Divide the remaining histories among the MPI threads (GPUs) according to their fraction of the total speed (rounding up).
if (total_histories_speed_test < total_histories)
total_histories = (unsigned long long int)(0.5 + ((double)(total_histories-total_histories_speed_test)) * ((double)(node_speed/total_speed)));
else
total_histories = numprocs; // Enough particles simulated already, simulate just one more history (block) and report (kernel call would fail if total_histories < or == 0).
}
} // [Done with case of simulating projections by time or first projection by number of particles]
// else ==> if using only 1 GPU and a fixed number of histories the whole speed test is skipped. The random seed will be different for each projection because it is updated after calling the kernel below.
// fflush(stdout);
// MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads here if we want to have a better organized output text at the expense of losing some performance
// *** Perform the MC simulation itself (the speed test would be skipped for a single CPU thread using a fix number of histories):
// -- Compute the number of CUDA blocks to simulate, rounding up and making sure it is below the limit of 65535 blocks.
// The total number of particles simulated will be increased to the nearest multiple "histories_per_thread".
double total_threads = ceil(((double)total_histories)/((double)histories_per_thread)); // Divide the histories among GPU threads, rounding up and avoiding overflow // New in MC-GPU v1.4 (Mina's bug)
int total_threads_blocks = (int)(((double)total_threads)/((double)num_threads_per_block) + 0.9990); // Divide the GPU threads among CUDA blocks, rounding up
if (total_threads_blocks>65535)
{
#ifdef USING_MPI
printf(" WARNING (MPI process #%d): %d hist per thread would produce %d CUDA blocks (>65535 maximum).", myID, histories_per_thread, total_threads_blocks);
#else
printf("\n WARNING: %d hist per thread would produce %d CUDA blocks, more than the maximum value of 65535.", histories_per_thread, total_threads_blocks);
#endif
total_threads_blocks = 65000; // Increase the histories per thread to have exactly 65000 blocks.
histories_per_thread = (int) ( ((double)total_histories)/((double)(total_threads_blocks*num_threads_per_block)) + 0.9990 );
printf(" Increasing to %d hist to run exactly %d blocks in the GPU.\n", histories_per_thread, total_threads_blocks);
}
else if (total_threads_blocks<1)
{
total_threads_blocks = 1; // Make sure we have at least 1 block to run
}
total_histories = ((unsigned long long int)(total_threads_blocks*num_threads_per_block))*histories_per_thread; // Total histories will be equal or higher than the input value due to the rounding up in the division of the histories
float total_histories_current_kernel_float = (float)total_histories; // Keep a float approx of the num histories for the timing below
checkCudaErrors(cudaMemcpy(&seed_input, seed_input_device, sizeof(int), cudaMemcpyDeviceToHost)); // Download latest seed value used in the GPU. !!DBTv1.4!!
fflush(stdout);
#ifdef USING_MPI
MASTER_THREAD printf("\n\n");
printf(" ==> CUDA (MPI process #%d in \"%s\"): Executing %d blocks of %d threads, with %d histories in each thread: %lld histories in total (random seed=%d, num_p=%d).\n", myID, MPI_processor_name, total_threads_blocks, num_threads_per_block, histories_per_thread, total_histories, seed_input, num_p);
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads to better organize output
#else
printf("\n ==> CUDA: Executing %d blocks of %d threads, with %d histories in each thread: %lld histories in total (random seed=%d, num_p=%d).\n", total_threads_blocks, num_threads_per_block, histories_per_thread, total_histories, seed_input, num_p);
#endif
fflush(stdout);
// -- Setup the execution parameters (Max number threads per block: 512, Max sizes each dimension of grid: 65535x65535x1)
dim3 blocks(total_threads_blocks, 1);
dim3 threads(num_threads_per_block, 1);
clock_kernel = clock();
// *** Execute the x-ray transport kernel in the GPU ***
track_particles<<<blocks,threads>>>(histories_per_thread, (short int)num_p, seed_input_device, image_device, voxels_Edep_device, voxel_mat_dens_device, bitree_device, mfp_Woodcock_table_device, mfp_table_a_device, mfp_table_b_device, rayleigh_table_device, compton_table_device, detector_data_device, source_data_device, materials_dose_device);
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread:
{
// -- Compute the total number of histories simulated with all MPI thread, including the speed test (histories_speed_test==0 if speed test was skipped).
// These MPI messajes are sent concurrently with the GPU kernel computation for maximum efficiency.
unsigned long long int current_GPU_histories = total_histories;
return_reduce = MPI_Reduce(¤t_GPU_histories, &total_histories, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // Sum all the simulated particles and send to thread 0
}
#endif
if (1==doing_speed_test)
total_histories += total_histories_speed_test; // Speed test was done: compute the total number of histories including the particles simulated in the speed test
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread:
{
MASTER_THREAD
{
if (MPI_SUCCESS != return_reduce)
printf("\n\n !!ERROR!! Error getting the total number of particles simulated in all the GPUs (MPI_Reduce). return_reduce = %d.\n\n\n", return_reduce);
if (1==simulating_by_time || 1==doing_speed_test)
{
printf("\n -- Total number of histories being simulated in all the GPUs for the current projection (including speed test)= %.3lld.\n\n", total_histories);
}
}
}
#endif
fflush(stdout);
cudaThreadSynchronize(); // Force the runtime to wait until the GPU kernel is completed
getLastCudaError("\n\n !!Kernel execution failed while simulating particle tracks!! "); // Check if kernel execution generated any error
float real_GPU_speed = total_histories_current_kernel_float/(float(clock()-clock_kernel)/CLOCKS_PER_SEC); // GPU speed for all the image simulation, not just the speed test.
// #ifdef USING_MPI
// printf(" ==> CUDA (MPI process #%d in \"%s\"): GPU kernel execution time: %.4f s (%.3f hist/s)\n", myID, MPI_processor_name, time_kernel, total_histories_current_kernel_float/time_kernel);
// #else
// printf(" ==> CUDA: Kernel execution time: %.4f s\n", time_kernel);
// #endif
// -- Copy the simulated image from the GPU memory to the CPU:
checkCudaErrors(cudaMemcpy(image, image_device, image_bytes, cudaMemcpyDeviceToHost) ); // Copy final results to host
///////////////////////////////////////////////////////////////////////////////////////////////////
#else
// *** Executing the kernel in the CPU:
// If using more than one MPI thread, the number of particles is equally dividied among the threads.
// !!DeBuG!! --> NOT USING SPEED TEST IN THE CPU!! Not possible to limit the execution by time in the CPU.
int total_threads = (int)(((double)total_histories)/((double)histories_per_thread*numprocs) + 0.9990); // Divide the histories among MPI threads, rounding up
unsigned long long int total_histories_per_thread = ((unsigned long long int)(total_threads))*histories_per_thread;
total_histories = total_histories_per_thread*numprocs; // Total histories will be equal or higher than the input value due to the rounding up in the division of the histories
if (numprocs>1)
{
#ifdef USING_MPI
update_seed_PRNG(myID, total_histories, &seed_input); // Compute the initial random seed for each MPI threads, avoiding overlapping of the random sequences
printf(" Executing %d history batches in the CPU, with %d histories in each batch (thread %d of %d at \'%s\'): %lld histories (random seed=%d).\n", total_threads, histories_per_thread, myID+1, numprocs, MPI_processor_name, total_histories_per_thread, seed_input);
MASTER_THREAD printf(" Simulating %lld histories in total for the %d MPI threads.\n\n", total_histories, numprocs);
#endif
}
else
{
printf(" Executing %d history batches in the CPU, with %d histories in each batch: %lld histories in total.\n\n", total_threads, histories_per_thread, total_histories);
}
fflush(stdout);
// -- Copy local structures to global struct variables accessible from "track_particles" (__constant__ variables in the GPU):
source_energy_data_CONST = source_energy_data;
voxel_data_CONST = voxel_data;
mfp_table_data_CONST = mfp_table_data;
dose_ROI_x_min_CONST = dose_ROI_x_min;
dose_ROI_x_max_CONST = dose_ROI_x_max;
dose_ROI_y_min_CONST = dose_ROI_y_min;
dose_ROI_y_max_CONST = dose_ROI_y_max;
dose_ROI_z_min_CONST = dose_ROI_z_min;
dose_ROI_z_max_CONST = dose_ROI_z_max;
int CPU_batch;
for(CPU_batch=0; CPU_batch<total_threads; CPU_batch++)
{
// -- Simulate a particle track initializing the PRNG with the particle number 'n':
track_particles(CPU_batch, histories_per_thread, (short int)num_p, seed_input_device, image, voxels_Edep, voxel_mat_dens, bitree_device, mfp_Woodcock_table, mfp_table_a, mfp_table_b, &rayleigh_table, &compton_table, detector_data, source_data, materials_dose);
}
#endif
// Get current time and calculate execution time in the MC loop:
time_elapsed_MC_loop = ((double)(clock()-clock_start))/CLOCKS_PER_SEC;
time_total_MC_simulation += time_elapsed_MC_loop; // Count total time (in seconds).
// printf("\n -- MONTE CARLO LOOP finished: time tallied in MAIN program: %.3f s\n\n", time_elapsed_MC_loop);
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Move the images simulated in the GPU (or multiple CPU cores) to the host memory space:
#ifdef USING_MPI
if (numprocs>1) // Using more than 1 MPI thread
{
// -- Add the images simulated in all the MPI threads:
MASTER_THREAD printf("\n >> Synchronize the MPI threads and accumulate the simulated images (MPI_Reduce).\n\n");
// Allocate the memory for the final image in the master thread:
unsigned long long int *image_MPI = NULL;
MASTER_THREAD image_MPI = (unsigned long long int*) malloc(image_bytes);
MASTER_THREAD if (image_MPI==NULL)
{
printf("\n\n !!malloc ERROR!! Problem allocating the total MPI image. Out of memory??\n\n");
exit(-4);
}
// !!DeBuG!! To know how much time the threads lose waiting for other threads in the MPI_Reduce, I have to use an explicit barrier here. It may be more efficient to let the threads advance to the MPI_Reduce directly.
clock_start = clock();
MPI_Barrier(MPI_COMM_WORLD); // Synchronize MPI threads
current_time=time(NULL);
char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located between the characters 11 and 19.
#ifdef USING_CUDA
if (1==doing_speed_test) // This message will be shown only for the first projection simulated in the GPU.
printf(" ==> CUDA (MPI process #%d in \"%s\"): GPU speed = %.4f hist/s. Time spent at MPI_Barrier waiting to add the partial images: %.6f s (time: %8s)\n", myID, MPI_processor_name, real_GPU_speed, ((double)(clock()-clock_start))/CLOCKS_PER_SEC, &char_time[11]);
#else
if (-1==doing_speed_test)
{
printf(" ==> CUDA (MPI process #%d in \"%s\"): Time spent at MPI_Barrier waiting to add the partial images: %.6f s (time: %8s)\n", myID, MPI_processor_name, ((double)(clock()-clock_start))/CLOCKS_PER_SEC, &char_time[11]);
doing_speed_test = 0;
}
#endif
fflush(stdout);
MASTER_THREAD clock_start = clock();
// -- Sum the pixel values from the different simulated images and send to thread 0.
// MPI_Reduce will act as a synchronization barrier for all the MPI threads.
int num_pixels_image = image_bytes/((int)sizeof(unsigned long long int)); // Number of elements allocated in the "image" array.
return_reduce = MPI_Reduce(image, image_MPI, num_pixels_image, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
if (MPI_SUCCESS != return_reduce)
{
printf("\n\n !!ERROR!! Possible error reducing (MPI_SUM) the image results??? Returned value MPI_Reduce = %d\n\n\n", return_reduce);
}
// -- Exchange the image simulated in thread 0 for the final image from all threads, in the master thread:
MASTER_THREAD
{
free(image);
image = image_MPI; // point the image pointer to the new image in host memory
image_MPI = NULL;
printf("\n -- Time reducing the images simulated by all the MPI threads (MPI_Reduce) according to the master thread = %.6f s.\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC);
}
}
#endif
// *** Report the final results:
char file_name_output_num_p[253];
// if (1==num_projections || (flag_simulateMammoAfterDBT && 0==num_p)) // !!DBTv1.4!!
if (1==num_projections)
strcpy(file_name_output_num_p, file_name_output); // Use the input name for single projection
else
sprintf(file_name_output_num_p, "%s_%04d", file_name_output, num_p); // Create the output file name with the input name + projection number (4 digits, padding with 0)
if (num_p>0)
{
MASTER_THREAD report_image(file_name_output_num_p, detector_data, source_data, mean_energy_spectrum, image, time_elapsed_MC_loop, total_histories, num_p, num_projections, myID, numprocs, current_angle, &seed_input);
}
else
{
// Projection 0 happens only when num_projections==1 or when flag_simulateMammoAfterDBT==true:
MASTER_THREAD report_image(file_name_output_num_p, detector_data, source_data, mean_energy_spectrum, image, time_elapsed_MC_loop, total_histories, 0, 1, myID, numprocs, current_angle, &seed_input);
}
// *** Clear the image after reporting, unless this is the last projection to simulate:
if (num_p<num_projections)
{
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
#ifdef USING_CUDA
MASTER_THREAD printf(" ==> CUDA: Launching kernel to reset the device image to 0: number of blocks = %d, threads per block = 128\n", (int)(ceil(pixels_per_image/128.0f)+0.01f) );
init_image_array_GPU<<<(int)(ceil(pixels_per_image/128.0f)+0.01f),128>>>(image_device, pixels_per_image);
fflush(stdout);
cudaThreadSynchronize();
getLastCudaError("\n\n !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error:
#else
memset(image, 0, image_bytes); // Init memory space to 0. (see http://www.lainoox.com/c-memset-examples/)
#endif
}
if (num_p==0 && flag_material_dose==1 && flag_simulateMammoAfterDBT) // !!DBTv1.4!!
{
// --Report "tally_materials_dose" for the first projection corresponding to a mammo acquisition, and reset dose counter. The dose for the DBT scan only will be reported at the end: !!mammo-DBT!!
MASTER_THREAD printf("\n\n !!DBT Reporting \"tally_materials_dose\" for the first 0 deg projection, and reseting material and voxel dose counters.\n");
checkCudaErrors( cudaMemcpy( materials_dose, materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2), cudaMemcpyDeviceToHost) ); // Copy materials dose results to host
#ifdef USING_MPI
ulonglong2 materials_dose_total[MAX_MATERIALS];
return_reduce = MPI_Reduce(materials_dose, materials_dose_total, 2*MAX_MATERIALS, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // !!tally_materials_dose!!
#else
ulonglong2 *materials_dose_total = materials_dose; // Create a dummy pointer to the materials_dose data
#endif
MASTER_THREAD report_materials_dose(1, total_histories, density_nominal, materials_dose_total, mass_materials, file_name_materials); // Report the material dose for the mammo image only !!tally_materials_dose!!
int kk;
for(kk=0;kk<MAX_MATERIALS;kk++) // Reset dose in CPU and GPU memory
{
materials_dose[kk].x = 0;
materials_dose[kk].y = 0;
}
checkCudaErrors(cudaMemcpy(materials_dose_device, materials_dose, MAX_MATERIALS*sizeof(ulonglong2), cudaMemcpyHostToDevice)); // !!tally_materials_dose!!
// --Reject the voxel doses tallied for this first projection: re-copy the empty host data to GPU // !!DeBuG!! (It would be more efficient to disable the tally in kernel directly for the first projection...)
if (dose_ROI_x_max > -1)
checkCudaErrors(cudaMemcpy(voxels_Edep_device, &voxels_Edep, voxels_Edep_bytes, cudaMemcpyHostToDevice) ); // !!DBTv1.4!!
}
} // [Projection loop end: iterate for next CT projection angle]
///////////////////////////////////////////////////////////////////////////////////////////////////
// *** Simulation finished! Report dose and timings and clean up.
#ifdef USING_CUDA
if (dose_ROI_x_max > -1)
{
MASTER_THREAD clock_kernel = clock();
checkCudaErrors( cudaMemcpy( voxels_Edep, voxels_Edep_device, voxels_Edep_bytes, cudaMemcpyDeviceToHost) ); // Copy final dose results to host (for every MPI threads)
MASTER_THREAD printf(" ==> CUDA: Time copying dose results from device to host: %.6f s\n", float(clock()-clock_kernel)/CLOCKS_PER_SEC);
}
if (flag_material_dose==1)
checkCudaErrors( cudaMemcpy( materials_dose, materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2), cudaMemcpyDeviceToHost) ); // Copy materials dose results to host, if tally enabled in input file. !!tally_materials_dose!!
// -- Clean up GPU device memory:
clock_kernel = clock();
cudaFree(voxel_mat_dens_device);
cudaFree(image_device);
cudaFree(mfp_Woodcock_table_device);
cudaFree(mfp_table_a_device);
cudaFree(mfp_table_b_device);
cudaFree(voxels_Edep_device);
checkCudaErrors( cudaThreadExit() );
MASTER_THREAD printf(" ==> CUDA: Time freeing the device memory and ending the GPU threads: %.6f s\n", float(clock()-clock_kernel)/CLOCKS_PER_SEC);
#endif
#ifdef USING_MPI
current_time=time(NULL); // Get current time (in seconds)
char_time = ctime(¤t_time); char_time[19] = '\0'; // The time is located betwen the characters 11 and 19.
printf(" >> MPI thread %d in \"%s\" done! (local time: %s)\n", myID, MPI_processor_name, &char_time[11]);
fflush(stdout); // Clear the screen output buffer
#endif
// *** Report the total dose for all the projections, if the tally is not disabled (must be done after MPI_Barrier to have all the MPI threads synchronized):
MASTER_THREAD clock_start = clock();
if (dose_ROI_x_max > -1)
{
#ifdef USING_MPI
if (numprocs>1)
{
// -- Use MPI_Reduce to accumulate the dose from all projections:
// Allocate memory in the root node to combine the dose results with MPI_REDUCE:
int num_voxels_ROI = voxels_Edep_bytes/((int)sizeof(ulonglong2)); // Number of elements allocated in the "dose" array.
ulonglong2 *voxels_Edep_total = (ulonglong2*) malloc(voxels_Edep_bytes);
if (voxels_Edep_total==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d voxels by the MPI root node for the total deposited dose (and uncertainty) array (%f Mbytes)!!\n\n", num_voxels_ROI, voxels_Edep_bytes/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD
{
printf("\n >> Array for the total deposited dose correctly allocated by the MPI root node (%f Mbytes).\n", voxels_Edep_bytes/(1024.f*1024.f));
printf( " Waiting at MPI_Barrier for thread synchronization.\n");
}
}
MASTER_THREAD printf("\n >> Calling MPI_Reduce to accumulate the dose from all projections...\n\n");
return_reduce = MPI_Reduce(voxels_Edep, voxels_Edep_total, 2*num_voxels_ROI, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // Sum all the doses in "voxels_Edep_total" at thread 0.
// !!DeBuG!! I am sending a "ulonglong2" array as if it was composed of 2 "ulonglong" variables per element. There could be problems if the alignment in the structure includes some extra padding space (but it seems ok for a 64-bit computer).
if (MPI_SUCCESS != return_reduce)
{
printf("\n\n !!ERROR!! Possible error reducing (MPI_SUM) the dose results??? return_reduce = %d for thread %d\n\n\n", return_reduce, myID);
}
// -- Exchange the dose simulated in thread 0 for the final dose from all threads
MASTER_THREAD
{
free(voxels_Edep);
voxels_Edep = voxels_Edep_total; // point the voxels_Edep pointer to the final voxels_Edep array in host memory
voxels_Edep_total = NULL; // This pointer is not needed by now
}
}
#endif
// -- Report the total dose for all the projections:
MASTER_THREAD report_voxels_dose(file_dose_output, num_projections, &voxel_data, voxel_mat_dens, voxels_Edep, time_total_MC_simulation, total_histories, dose_ROI_x_min, dose_ROI_x_max, dose_ROI_y_min, dose_ROI_y_max, dose_ROI_z_min, dose_ROI_z_max, source_data);
}
// -- Report "tally_materials_dose" with data from all MPI threads, if tally enabled:
if (flag_material_dose==1)
{
#ifdef USING_MPI
ulonglong2 materials_dose_total[MAX_MATERIALS];
return_reduce = MPI_Reduce(materials_dose, materials_dose_total, 2*MAX_MATERIALS, MPI_UNSIGNED_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); // !!tally_materials_dose!!
#else
ulonglong2 *materials_dose_total = materials_dose; // Create a dummy pointer to the materials_dose data
#endif
MASTER_THREAD report_materials_dose(num_projections, total_histories, density_nominal, materials_dose_total, mass_materials, file_name_materials); // Report the material dose !!tally_materials_dose!!
}
MASTER_THREAD clock_end = clock();
MASTER_THREAD printf("\n\n ==> CUDA: Time reporting the dose data: %.6f s\n", ((double)(clock_end-clock_start))/CLOCKS_PER_SEC);
// *** Clean up RAM memory. If CUDA was used, the geometry and table data were already cleaned for MPI threads other than root after copying data to the GPU:
free(voxels_Edep);
free(image);
#ifdef USING_CUDA
MASTER_THREAD free(voxel_mat_dens);
#else
free(voxel_mat_dens);
free(mfp_Woodcock_table);
free(mfp_table_a);
free(mfp_table_b);
#endif
MASTER_THREAD
{
printf("\n\n\n -- SIMULATION FINISHED!\n");
time_total_MC_init_report = ((double)(clock()-clock_start_beginning))/CLOCKS_PER_SEC;
unsigned long long int total_histories_final = total_histories*((unsigned long long int)num_projections);
if (flag_simulateMammoAfterDBT)
total_histories_final = total_histories_final + total_histories_final*SCALE_MAMMO_DBT; // Add the histories for both the tomo and the 0 deg projection
// -- Report total performance:
printf("\n\n ****** TOTAL SIMULATION PERFORMANCE (including initialization and reporting) ******\n\n");
printf( " >>> Execution time including initialization, transport and report: %.3f s.\n", time_total_MC_init_report);
printf( " >>> Time spent in the Monte Carlo transport only: %.3f s.\n", time_total_MC_simulation);
printf( " >>> Time spent in initialization, reporting and clean up: %.3f s.\n", (time_total_MC_init_report-time_total_MC_simulation));
printf( " >>> Total number of simulated x rays: %lld\n", total_histories_final);
if (time_total_MC_init_report>0.000001)
printf( " >>> Total speed (using %d thread, including transport, initialization and report times) [x-rays/s]: %.2f\n", numprocs, (double)(total_histories_final/time_total_MC_init_report));
printf( " >>> Total speed Monte Carlo transport only (using %d thread) [x-rays/s]: %.2f\n\n", numprocs, (double)(total_histories_final/time_total_MC_simulation));
current_time=time(NULL); // Get current time (in seconds)
printf("\n****** Code execution finished on: %s\n\n", ctime(¤t_time));
}
#ifdef USING_CUDA
cudaDeviceReset(); // Destroy the CUDA context before ending program (flush visual debugger data).
#endif
#ifdef USING_MPI
MPI_Finalize(); // Finalize MPI library: no more MPI calls allowed below.
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Read the input file given in the command line and return the significant data.
//! Example input file:
//!
//! 1000000 [Total number of histories to simulate]
//! geometry.vox [Voxelized geometry file name]
//! material.mat [Material data file name]
//!
//! @param[in] argc Command line parameters
//! @param[in] argv Command line parameters: name of input file
//! @param[out] total_histories Total number of particles to simulate
//! @param[out] seed_input Input random number generator seed
//! @param[out] num_threads_per_block Number of CUDA threads for each GPU block
//! @param[out] detector_data
//! @param[out] image
//! @param[out] source_data
//! @param[out] file_name_voxels
//! @param[out] file_name_materials
//! @param[out] file_name_output
////////////////////////////////////////////////////////////////////////////////
void read_input(int argc, char** argv, int myID, unsigned long long int* total_histories, int* seed_input, int* gpu_id, int* num_threads_per_block, int* histories_per_thread, struct detector_struct* detector_data, unsigned long long int** image_ptr, int* image_bytes, struct source_struct* source_data, struct source_energy_struct* source_energy_data, struct voxel_struct* voxel_data, char* file_name_voxels, char file_name_materials[MAX_MATERIALS][250] , char* file_name_output, char* file_name_espc, int* num_projections, ulonglong2** voxels_Edep_ptr, int* voxels_Edep_bytes, char* file_dose_output, short int* dose_ROI_x_min, short int* dose_ROI_x_max, short int* dose_ROI_y_min, short int* dose_ROI_y_max, short int* dose_ROI_z_min, short int* dose_ROI_z_max, double* SRotAxisD, double* translation_helical, int* flag_material_dose, bool* flag_simulateMammoAfterDBT, bool* flag_detectorFixed)
{
FILE* file_ptr = NULL;
char new_line[250];
char *new_line_ptr = NULL;
double dummy_double;
// -- Read the input file name from command line, if given (otherwise keep default value):
if (2==argc)
{
file_ptr = fopen(argv[1], "r");
if (NULL==file_ptr)
{
printf("\n\n !!read_input ERROR!! Input file not found or not readable. Input file name: \'%s\'\n\n", argv[1]);
// Not finalizing MPI here because we want the execution to fail if there is a problem with any MPI thread!!! MPI_Finalize(); // Finalize MPI library: no more MPI calls allowed below.
exit(-1);
}
}
else if (argc>2)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Too many input parameter (argc=%d)!! Provide only the input file name.\n\n", argc);
// Finalizing MPI because all threads will detect the same problem and fail together.
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input file name not given as an execution parameter!! Try again...\n\n");
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1);
}
MASTER_THREAD printf("\n -- Reading the input file \'%s\':\n", argv[1]);
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION SIMULATION CONFIG v.2009-05-12]:
do
{
new_line_ptr = fgets(new_line, 250, file_ptr); // Read full line (max. 250 characters).
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SIMULATION CONFIG v.2009-05-12\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION SIMULATION CONFIG v.2009-05-12")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", &dummy_double);
*total_histories = (unsigned long long int) (dummy_double+0.0001); // Maximum unsigned long long value: 18446744073709551615
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", seed_input); // Set the RANECU PRNG seed (the same seed will be used to init the 2 MLCGs in RANECU)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", gpu_id); // GPU NUMBER WHERE SIMULATION WILL RUN
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", num_threads_per_block); // GPU THREADS PER CUDA BLOCK
#ifdef USING_CUDA
if ((*num_threads_per_block%32)!=0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of GPU threads per CUDA block must be a multiple of 32 (warp size). Input value: %d !!\n\n", *num_threads_per_block);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
#endif
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", histories_per_thread); // HISTORIES PER GPU THREAD
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION SOURCE v.2009-05-12]: !!DBTv1.4!! ; [SECTION SOURCE v.2011-07-12] ; [SECTION SOURCE v.2009-05-12]
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION SOURCE v.2016-12-02\'!!\n"); // !!DBTv1.4!!
exit(-2);
}
}
while(strstr(new_line,"SECTION SOURCE v.2016-12-02")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // X-RAY ENERGY SPECTRUM FILE
trim_name(new_line, file_name_espc);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &source_data[0].position.x, &source_data[0].position.y, &source_data[0].position.z); // SOURCE POSITION: X Y Z [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &source_data[0].direction.x, &source_data[0].direction.y, &source_data[0].direction.z); // SOURCE DIRECTION COSINES: U V W
// -- Normalize the input beam direction to 1:
dummy_double = 1.0/sqrt((double)(source_data[0].direction.x*source_data[0].direction.x + source_data[0].direction.y*source_data[0].direction.y + source_data[0].direction.z*source_data[0].direction.z));
source_data[0].direction.x = (float)(((double)source_data[0].direction.x)*dummy_double);
source_data[0].direction.y = (float)(((double)source_data[0].direction.y)*dummy_double);
source_data[0].direction.z = (float)(((double)source_data[0].direction.z)*dummy_double);
// Read input fan beam polar (theta) and azimuthal (phi) aperture angles (deg):
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
double phi_aperture, theta_aperture;
sscanf(new_line, "%lf %lf", &phi_aperture, &theta_aperture);
if (0.5*theta_aperture > 180.0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input polar semi-aperture must be in [0,180] deg.!\n");
MASTER_THREAD printf(" 0.5*theta_aperture = %lf, 0.5*phi_aperture = %lf\n", 0.5*theta_aperture, 0.5*phi_aperture);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (0.5*phi_aperture > 360.0)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Input azimuthal semi-aperture must be in [0,360] deg.!\n");
MASTER_THREAD printf(" 0.5*theta_aperture = %lf, 0.5*phi_aperture = %lf\n", 0.5*theta_aperture, 0.5*phi_aperture);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
// Read the source rotation: necessary to define which direction is azimuthal (width) and polar (height) in the rotated source emission: !!DBTv1.4!!
double rotZ1, rotY2, rotZ3;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf %lf %lf", &rotZ1, &rotY2, &rotZ3); // EULER ANGLES (RzRyRz) TO ROTATE RECTANGULAR BEAM FROM DEFAULT POSITION AT Y=0, NORMAL=(0,-1,0) //!!DBTv1.4!!
// *** Init the fan beam source model:
source_data[0].rot_fan[0] = 1; source_data[0].rot_fan[1] = 0; source_data[0].rot_fan[2] = 0; // Init rotation matrix to identity
source_data[0].rot_fan[3] = 0; source_data[0].rot_fan[4] = 1; source_data[0].rot_fan[5] = 0;
source_data[0].rot_fan[6] = 0; source_data[0].rot_fan[7] = 0; source_data[0].rot_fan[8] = 1;
create_rotation_matrix_around_axis(rotZ1*DEG2RAD, 0, 0, 1, source_data[0].rot_fan); // 1st rotation around Z !!DBTv1.4!!
create_rotation_matrix_around_axis(rotY2*DEG2RAD, 0, 1, 0, source_data[0].rot_fan); // 2nd rotation around Y !!DBTv1.4!!
create_rotation_matrix_around_axis(rotZ3*DEG2RAD, 0, 0, 1, source_data[0].rot_fan); // 3rd rotation around Z !!DBTv1.4!!
MASTER_THREAD printf(" Input Euler angles to rotate the source from (0,1,0) to the input direction [deg]: rotZ1=%f , rotY2=%f , rotZ3=%f\n", rotZ1, rotY2, rotZ3); // !!DBTv1.4!! !!VERBOSE!!
// printf("\n [%f %f %f]\n",source_data[0].rot_fan[0],source_data[0].rot_fan[1],source_data[0].rot_fan[2]);
// printf( " Rotation matrix: Rodrigues = |%f %f %f|\n",source_data[0].rot_fan[3],source_data[0].rot_fan[4],source_data[0].rot_fan[5]); // !!DBTv1.4!! !!VERBOSE!!
// printf( " [%f %f %f]\n\n",source_data[0].rot_fan[6],source_data[0].rot_fan[7],source_data[0].rot_fan[8]);
float3 default_direction;
default_direction.x = 0.0f; default_direction.y = 1.0f; default_direction.z = 0.0f;
apply_rotation(&default_direction, source_data[0].rot_fan);
if ( fabsf(default_direction.x-source_data[0].direction.x)>1e-5f || fabsf(default_direction.y-source_data[0].direction.y)>1e-5f || fabsf(default_direction.z-source_data[0].direction.z)>1e-5f )
{
MASTER_THREAD
{
printf("\n\n!!WARNING!! The input Euler rotation angles for the source are incorrect!!!!!\n"); // !!DBTv1.4!! !!DeBuG!!
printf( " The Euler angles are defined as a rotation around Z axis, then Y, then Z again; positive rotations are counter-clock (eg, to move the detector from Y=0 to Z=0, input: 90.0, -90.0, 180.0).\n");
printf( " The input angles would rotate the default source direction (0,1,0) towards direction (%f,%f,%f), but the input direction was (%f,%f,%f) instead.\n\n", default_direction.x, default_direction.y, default_direction.z, source_data[0].direction.x, source_data[0].direction.y, source_data[0].direction.z);
printf( " Please, provide a consistent set of source direction and Euler angle rotation or the code can't determine the orientation of the square field and detector. \n\n\n"); // !!DBTv1.4!! !!DeBuG!!
}
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-1); // !!DBTv1.4!! !!DeBuG!!
}
// Read parameters for the non-ideal focal spot: !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].focal_spot_FWHM); // SOURCE GAUSSIAN FOCAL SPOT FWHM [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].rotation_blur); // ANGULAR BLUR DUE TO MOVEMENT ([exposure_time]*[angular_speed]) [degrees]
source_data[0].rotation_blur = fabsf(source_data[0].rotation_blur*DEG2RAD);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // COLLIMATE BEAM TOWARDS POSITIVE X ANGLES ONLY? (ie, cone-beam center aligned with chest wall in mammography) [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
source_data[0].flag_halfConeX = true;
// MASTER_THREAD printf(" \'flag_halfConeX = true\': sampling only upper half beam for mammo geometry; beam centered at image edge.\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
source_data[0].flag_halfConeX = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the beam collimation question in \'SECTION SOURCE\'.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION IMAGE DETECTOR v.2017-06-20]]: !!DBTv1.5!!
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION IMAGE DETECTOR v.2017-06-20]\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION IMAGE DETECTOR v.2017-06-20")==NULL); // Skip comments and empty lines until the section begins !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
trim_name(new_line, file_name_output); // OUTPUT IMAGE FILE NAME (no spaces)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
float dummy_num_pixels_x, dummy_num_pixels_y; // Read input pixel number as float and truncated to integer
sscanf(new_line, "%f %f", &dummy_num_pixels_x, &dummy_num_pixels_y); // NUMBER OF PIXELS IN THE IMAGE: Nx Nz
detector_data[0].num_pixels.x = (int)(dummy_num_pixels_x+0.001f);
detector_data[0].num_pixels.y = (int)(dummy_num_pixels_y+0.001f);
detector_data[0].total_num_pixels = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
if (detector_data[0].total_num_pixels < 1 || detector_data[0].total_num_pixels > 99999999 )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of pixels is incorrect. Input: X_pix = %d, Y_pix = %d, total_num_pix = %d!!\n\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y, detector_data[0].total_num_pixels);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].width_X, &detector_data[0].height_Z); // IMAGE SIZE (width, height): Dx Dz [cm]
detector_data[0].inv_pixel_size_X = detector_data[0].num_pixels.x / detector_data[0].width_X;
detector_data[0].inv_pixel_size_Z = detector_data[0].num_pixels.y / detector_data[0].height_Z;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].sdd); // SOURCE-TO-DETECTOR DISTANCE [cm] (detector set in front of the source, normal to the input direction)
if ((detector_data[0].sdd)<1.0e-6)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The source-to-detector distance must be positive. Input: sdd=%f!!\n\n", detector_data[0].sdd);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
// Input parameters for the improved detector model: !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].offset.x, &detector_data[0].offset.y); // IMAGE OFFSET ON DETECTOR PLANE IN WIDTH AND HEIGHT DIRECTIONS (BY DEFAULT BEAM CENTERED AT IMAGE CENTER) [cm] !!DBTv1.4!!
if (source_data[0].flag_halfConeX)
detector_data[0].offset.y = detector_data[0].offset.y + 0.5*detector_data[0].height_Z; // Center the cone beam at the edge of the image with a halfCone (mammo). !!DBT!! !!HalfBeam!! !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].scintillator_thickness); // DETECTOR THICKNESS [cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].scintillator_MFP); // DETECTOR MATERIAL AVERAGE MEAN FREE PATH [1/cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f %f", &detector_data[0].kedge_energy, &detector_data[0].fluorescence_energy, &detector_data[0].fluorescence_yield, &detector_data[0].fluorescence_MFP); // DETECTOR K-EDGE ENERGY [eV], K-FLUORESCENCE ENERGY [eV], K-FLUORESCENCE YIELD, MFP AT FLUORESCENCE ENERGY [cm]
// NOTE: K-EDGE ENERGY, K-FLUORESCENCE ENERGY and K-FLUORESCENCE YIELD are tabulated in the XRAYLIB and other tables !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
float Swank_factor = -1.0f;
sscanf(new_line, "%f %f", &detector_data[0].gain_W, &Swank_factor); // EFECTIVE DETECTOR GAIN, W_+- [eV/ehp], SWANK FACTOR (input 0 to report ideal energy fluence)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &detector_data[0].electronic_noise); // ELECTRONIC NOISE LEVEL (electrons/pixel)
if (detector_data[0].gain_W<0.001f || Swank_factor<0.001f)
{
MASTER_THREAD printf("\n !!read_input!! Negative gain or Swank factor input: reporting pixel values as energy fluence, disabling conversion to detected charges and electronic noise.\n\n");
detector_data[0].gain_W = 0.0f;
detector_data[0].Swank_rel_std = 0.0f;
detector_data[0].electronic_noise = 0.0f;
}
else
{
if (Swank_factor > 0.9999995f)
detector_data[0].Swank_rel_std = 0.0f; // Swank_rel_std = 0 ==> Swank factor = 1 ==> no variability in the amount of ehp generated
else
detector_data[0].Swank_rel_std = sqrtf(1.0f/Swank_factor - 1.0f);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &detector_data[0].cover_thickness, &detector_data[0].cover_MFP); // PROTECTIVE COVER THICKNESS (detector and grid) [cm], MEAN FREE PATH AT AVERAGE ENERGY [cm] !!DBTv1.5!!
float grid_strip_MFP=-1.0f, grid_interspace_MFP=-1.0f;
int grid_orientation=99;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &detector_data[0].grid_ratio, &detector_data[0].grid_freq, &detector_data[0].grid_strip_thickness); // ANTISCATTER GRID RATIO, FREQUENCY, STRIP THICKNESS [X:1, lp/cm, cm] !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f", &grid_strip_MFP, &grid_interspace_MFP); // ANTISCATTER STRIPS AND INTERSPACE MEAN FREE PATHS AT MEAN ENERGY [cm] !!DBTv1.5!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", &grid_orientation); // ORIENTATION 1D FOCUSED ANTISCATTER GRID LINES: 0==STRIPS PERPENDICULAR LATERAL DIRECTION (mammo style); 1==STRIPS PARALLEL LATERAL DIRECTION (DBT style) !!DBTv1.5!!
detector_data[0].grid_strip_mu = 1.0f/grid_strip_MFP; // Store the coefficients of attenuation for the attenuating strips and the interspace material [1/cm]
detector_data[0].grid_interspace_mu = 1.0f/grid_interspace_MFP;
if (detector_data[0].grid_ratio<1e-7f || detector_data[0].grid_freq<1e-7f || detector_data[0].grid_strip_thickness<2e-8f)
{
detector_data[0].grid_freq = -1.0f; // Signal that the grid is disabled
}
if (0==grid_orientation)
detector_data[0].grid_ratio = -1.0f*detector_data[0].grid_ratio; // A negative grid ratio will signal orientation 0 !!DBTv1.5!!
else if (grid_orientation!=0 && grid_orientation!=1)
{
if (detector_data[0].grid_freq>0.0f)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Incorrect grid orientation value: input 0 for strips perpendicular to image width (lateral direction) as in mammography, or 1 for strips parallel to image widtht. Input: orientation=%d!!\n\n", grid_orientation);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2); // !!DBTv1.5!!
}
}
if ( (theta_aperture < -1.0e-7) || (phi_aperture < -1.0e-7) )
{
// Negative angle input: set total fan beam angle to exactly cover the detector surface.
theta_aperture= 2.0 * atan(0.5*detector_data[0].height_Z/(detector_data[0].sdd)) * RAD2DEG;
phi_aperture = 2.0 * atan(0.5*detector_data[0].width_X/(detector_data[0].sdd)) * RAD2DEG;
}
if (source_data[0].flag_halfConeX)
theta_aperture= 2.0*theta_aperture; // Double the input aperture towards the nipple to send beam only towards positive angles (+X) !!DBT!! !!HalfBeam!! !!DBTv1.4!!
// *** RECTANGULAR BEAM INITIALIZATION: aperture initially centered at (0,1,0), ie, THETA_0=90, PHI_0=90
// Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat).
source_data[0].cos_theta_low = (float)( cos((90.0 - 0.5*theta_aperture)*DEG2RAD) );
source_data[0].D_cos_theta = (float)( -2.0*source_data[0].cos_theta_low ); // Theta aperture is symetric above and below 90 deg
source_data[0].phi_low = (float)( (90.0 - 0.5*phi_aperture)*DEG2RAD );
source_data[0].D_phi = (float)( phi_aperture*DEG2RAD );
// Particular case of pencil beam input: convert the 0 angle to a very small square beam to avoid precission errors in sampling
if (abs(theta_aperture) < 1.0e-7)
{
theta_aperture = +1.00e-8;
source_data[0].cos_theta_low = 0.0f; // = cos(90-0)
source_data[0].D_cos_theta = 0.0f;
}
if (abs(phi_aperture) < 1.0e-7)
{
phi_aperture = +1.00e-8;
source_data[0].phi_low = (float)( 90.0*DEG2RAD );
source_data[0].D_phi = 0.0f;
}
source_data[0].max_height_at_y1cm = (float) ( tan(0.5*theta_aperture*DEG2RAD) ); // !!DBTv1.4!!
source_data[0].max_width_at_y1cm = (float) ( tan(0.5*phi_aperture*DEG2RAD) ); // Collimate in both directions when using th non-point focal spot. !!DBTv1.4!!
if (source_data[0].flag_halfConeX) // Sampling only half beam towards +X for mammo geometry! !!DBT!! !!HalfBeam!! !!DBTv1.4!!
source_data[0].D_cos_theta = 0.5f*source_data[0].D_cos_theta; // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02] (OLD NAME SECTION: [SECTION CT SCAN v.2011-10-25]) !!DBTv1.4!!
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION TOMOGRAPHIC TRAJECTORY v.2016-12-02")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d", num_projections); // NUMBER OF PROJECTIONS (set to 1 or less for a single projection)
if (*num_projections<1)
*num_projections = 1; // Zero projections has the same effect as 1 projection (ie, no CT scan rotation).
if ( fabs(*num_projections) > MAX_NUM_PROJECTIONS )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input number of projections is too large. Increase parameter MAX_NUM_PROJECTIONS=%d in the header file and recompile.\n", MAX_NUM_PROJECTIONS);
MASTER_THREAD printf( " There is no limit in the number of projections to be simulated because the source, detector data for each projection is stored in global memory and transfered to shared memory for each projection.\n\n");
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (*num_projections==1)
{
// -- Skip rest of the section if simulating a single projection:
source_data[0].angle_per_projection = 0.0f; // Init variables to default values hat will not be used anyway
source_data[0].angle_offset = 0.0f;
source_data[0].axis_of_rotation.x = 1.0f; source_data[0].axis_of_rotation.y = source_data[0].axis_of_rotation.z = 0.0f;
source_data[0].rotation_point.x = source_data[0].rotation_point.y = source_data[0].rotation_point.z = 0.0f;
}
else
{
// -- Tomographic scan with multiple projections:
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", SRotAxisD); // SOURCE-TO-ROTATION AXIS DISTANCE
if (*SRotAxisD<0.0 || *SRotAxisD>detector_data[0].sdd)
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Invalid source-to-rotation axis distance! Input: %f (sdd=%f).\n\n\n", *SRotAxisD, detector_data[0].sdd);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
source_data[0].rotation_point.x = source_data[0].position.x + source_data[0].direction.x * (*SRotAxisD); // Store the rotation point to apply rotation in the kernel !!DBTv1.4!!
source_data[0].rotation_point.y = source_data[0].position.y + source_data[0].direction.y * (*SRotAxisD);
source_data[0].rotation_point.z = source_data[0].position.z + source_data[0].direction.z * (*SRotAxisD);
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].angle_per_projection); // ANGLE BETWEEN PROJECTIONS [degrees] (360/num_projections for full CT) !!DBTv1.4!!
source_data[0].angle_per_projection = source_data[0].angle_per_projection*DEG2RAD; // store the angle in radians
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f", &source_data[0].angle_offset); // ANGULAR ROTATION TO FIRST PROJECTION (USEFUL FOR DBT, INPUT SOURCE DIRECTION CONSIDERED AS 0 DEGREES) [degrees] !!DBTv1.4!!
source_data[0].angle_offset = source_data[0].angle_offset*DEG2RAD;
double wx,wy,wz,norm;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf %lf %lf", &wx, &wy, &wz); // AXIS OF ROTATION !!DBTv1.4!!
norm = 1.0/sqrt(wx*wx+wy*wy+wz*wz);
source_data[0].axis_of_rotation.x = (float) wx*norm;
source_data[0].axis_of_rotation.y = (float) wy*norm;
source_data[0].axis_of_rotation.z = (float) wz*norm;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%lf", translation_helical); // TRANSLATION ALONG ROTATION AXIS BETWEEN PROJECTIONS (HELICAL SCAN) [cm]
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // KEEP DETECTOR FIXED AT 0 DEGREES FOR DBT? [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
*flag_detectorFixed = true;
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
*flag_detectorFixed = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO to KEEP DETECTOR FIXED AT 0 DEGREES FOR DBT.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // SIMULATE BOTH 0 deg PROJECTION AND TOMOGRAPHIC SCAN (WITHOUT GRID) WITH 2/3 TOTAL NUM HIST IN 1st PROJ (eg, DBT+mammo)? [YES/NO] !!DBTv1.4!!
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
*flag_simulateMammoAfterDBT = true;
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
*flag_simulateMammoAfterDBT = false;
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO to SIMULATE BOTH FIRST PROJECTION AND TOMOGRAPHIC SCAN (WITHOUT GRID) WITH SAME NUM HIST (eg, DBT+mammo).\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if (*num_projections==1)
*flag_simulateMammoAfterDBT=false; // Make sure the flag is always false if simulating a single projection
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION DOSE DEPOSITION v.2012-12-12] (MC-GPU v.1.3):
// Electrons are not transported and therefore we are approximating that the dose is equal to the KERMA (energy released by the photons alone).
// This approximation is acceptable when there is electronic equilibrium and when the range of the secondary electrons is shorter than the voxel size.
// Usually the doses will be acceptable for photon energies below 1 MeV. The dose estimates may not be accurate at the interface of low density volumes.
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION DOSE DEPOSITION v.2012-12-12\'!!\n");
exit(-2);
}
if (strstr(new_line,"SECTION DOSE DEPOSITION v.2011-02-18")!=NULL) // Detect previous version of input file
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Please update the input file to the new version of MC-GPU (v1.3)!!\n\n You simply have to change the input file text line:\n [SECTION DOSE DEPOSITION v.2011-02-18]\n\n for these two lines:\n [SECTION DOSE DEPOSITION v.2012-12-12]\n NO # TALLY MATERIAL DOSE? [YES/NO]\n\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION DOSE DEPOSITION v.2012-12-12")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // TALLY MATERIAL DOSE? [YES/NO] --> turn on/off the material dose tallied adding the Edep in each material, independently of the voxels.
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
{
*flag_material_dose = 1;
MASTER_THREAD printf(" Material dose deposition tally ENABLED.\n");
}
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
{
*flag_material_dose = 0; // -- NO: disabling tally
MASTER_THREAD printf(" Material dose deposition tally DISABLED.\n");
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the first two line of \'SECTION DOSE DEPOSITION\' to enable or disable the material dose and 3D voxel dose tallies.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // TALLY 3D VOXEL DOSE? [YES/NO]
if (0==strncmp("YE",new_line,2) || 0==strncmp("Ye",new_line,2) || 0==strncmp("ye",new_line,2))
{
// -- YES: using the tally
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); trim_name(new_line, file_dose_output); // OUTPUT DOSE FILE NAME (no spaces)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_x_min, dose_ROI_x_max); // # VOXELS TO TALLY DOSE: X-index min max (first voxel has index 1)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_y_min, dose_ROI_y_max); // # VOXELS TO TALLY DOSE: Y-index min max
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); sscanf(new_line, "%hd %hd", dose_ROI_z_min, dose_ROI_z_max); // # VOXELS TO TALLY DOSE: Z-index min max
*dose_ROI_x_min -= 1; *dose_ROI_x_max -= 1; // -Re-scale input coordinates to have index=0 for the first voxel instead of 1.
*dose_ROI_y_min -= 1; *dose_ROI_y_max -= 1;
*dose_ROI_z_min -= 1; *dose_ROI_z_max -= 1;
MASTER_THREAD printf(" 3D voxel dose deposition tally ENABLED.\n");
if ( ((*dose_ROI_x_min)>(*dose_ROI_x_max)) || ((*dose_ROI_y_min)>(*dose_ROI_y_max)) || ((*dose_ROI_z_min)>(*dose_ROI_z_max)) ||
(*dose_ROI_x_min)<0 || (*dose_ROI_y_min)<0 || (*dose_ROI_z_min)<0 )
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! The input region-of-interst in \'SECTION DOSE DEPOSITION\' is not valid: the minimum voxel index may not be zero or larger than the maximum index.\n");
MASTER_THREAD printf( " Input data = X[%d,%d], Y[%d,%d], Z[%d,%d]\n\n", *dose_ROI_x_min+1, *dose_ROI_x_max+1, *dose_ROI_y_min+1, *dose_ROI_y_max+1, *dose_ROI_z_min+1, *dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
if ( ((*dose_ROI_x_min)==(*dose_ROI_x_max)) && ((*dose_ROI_y_min)==(*dose_ROI_y_max)) && ((*dose_ROI_z_min)==(*dose_ROI_z_max)) )
{
MASTER_THREAD printf("\n\n !!read_input!! According to the input region-of-interest in \'SECTION DOSE DEPOSITION\', only the dose in the voxel (%d,%d,%d) will be tallied.\n\n",*dose_ROI_x_min,*dose_ROI_y_min,*dose_ROI_z_min);
}
}
else if (0==strncmp("NO",new_line,2) || 0==strncmp("No",new_line,2) || 0==strncmp("no",new_line,2))
{
// -- NO: disabling tally
MASTER_THREAD printf(" 3D voxel dose deposition tally DISABLED.\n");
*dose_ROI_x_min = (short int) 32500; *dose_ROI_x_max = (short int) -32500; // Set absurd values for the ROI to make sure we never get any dose tallied
*dose_ROI_y_min = (short int) 32500; *dose_ROI_y_max = (short int) -32500; // (the maximum values for short int variables are +-32768)
*dose_ROI_z_min = (short int) 32500; *dose_ROI_z_max = (short int) -32500;
}
else
{
MASTER_THREAD printf("\n\n !!read_input ERROR!! Answer YES or NO in the first two line of \'SECTION DOSE DEPOSITION\' to enable or disable the material dose and 3D voxel dose tallies.\n Input text: %s\n\n",new_line);
#ifdef USING_MPI
MPI_Finalize();
#endif
exit(-2);
}
MASTER_THREAD printf("\n");
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION VOXELIZED GEOMETRY FILE v.2017-07-26] // !!v1.5bitree!! // 2016-12-02] // !!DBTv1.4!! // Previous version: v.2009-11-30
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION VOXELIZED GEOMETRY FILE v.2017-07-26\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION VOXELIZED GEOMETRY FILE v.2017-07-26")==NULL); // Skip comments and empty lines until the section begins
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
trim_name(new_line, file_name_voxels); // VOXEL GEOMETRY FILE (penEasy 2008 format)
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &voxel_data->offset.x, &voxel_data->offset.y, &voxel_data->offset.z); // OFFSET OF THE VOXEL GEOMETRY (DEFAULT ORIGIN AT LOWER BACK CORNER) [cm] !!DBTv1.4!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%d %d %d", &voxel_data->num_voxels.x, &voxel_data->num_voxels.y, &voxel_data->num_voxels.z); // NUMBER OF VOXELS: INPUT A "0" TO READ ASCII FORMAT WITH HEADER SECTION, RAW VOXELS WILL BE READ OTHERWISE !!DBTv1.4!!
if (voxel_data->num_voxels.x<1 || voxel_data->num_voxels.y<1 || voxel_data->num_voxels.z<1)
voxel_data->num_voxels.x = -1; // Indicate to read ASCII format geometry: geometric parameters will be read from the header file !!DBTv1.4!! !!DeBuG!!
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
sscanf(new_line, "%f %f %f", &voxel_data->voxel_size.x, &voxel_data->voxel_size.y, &voxel_data->voxel_size.z); // VOXEL SIZES [cm] !!DBTv1.4!!
voxel_data->inv_voxel_size.x = 1.0f/voxel_data->voxel_size.x;
voxel_data->inv_voxel_size.y = 1.0f/voxel_data->voxel_size.y;
voxel_data->inv_voxel_size.z = 1.0f/voxel_data->voxel_size.z;
int split_x=-1, split_y=-1, split_z=-1;
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); //!!bitree!! v1.5b
sscanf(new_line, "%d %d %d", &split_x, &split_y, &split_z); // SIZE OF LOW RESOLUTION VOXELS THAT WILL BE DESCRIBED BY A BINARY TREE, GIVEN AS POWERS OF TWO (eg, 2 2 3 = 2^2x2^2x2^3 = 128 input voxels per low res voxel; 0 0 0 disables tree) //!!bitre!! v1.5b
if( (split_x+split_y+split_z)==0 || split_x<0 || split_y<0 || split_z<0)
{
// Disable bitree generation if a negative value or all zeros are entered:
voxel_data->num_voxels_coarse.x = voxel_data->num_voxels_coarse.y = voxel_data->num_voxels_coarse.z = (unsigned char) 0;
}
else
{
voxel_data->num_voxels_coarse.x = (unsigned char) min_value(pow(2, split_x), 256); // Limit max size coarse voxel and void overflow // !!bitree!! v1.5b
voxel_data->num_voxels_coarse.y = (unsigned char) min_value(pow(2, split_y), 256);
voxel_data->num_voxels_coarse.z = (unsigned char) min_value(pow(2, split_z), 256);
// voxel_data->num_voxels_LowRes.x = (int)((float)voxel_data->num_voxels.x/(float)voxel_data->num_voxels_coarse.x + 0.99f); // !!bitree!! v1.5b
// voxel_data->num_voxels_LowRes.y = (int)((float)voxel_data->num_voxels.y/(float)voxel_data->num_voxels_coarse.y + 0.99f);
// voxel_data->num_voxels_LowRes.z = (int)((float)voxel_data->num_voxels.z/(float)voxel_data->num_voxels_coarse.z + 0.99f);
}
/////////////////////////////////////////////////////////////////////////////
// -- Init. [SECTION MATERIAL FILE LIST v.2009-11-30]
do
{
new_line_ptr = fgets(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
{
printf("\n\n !!read_input ERROR!! Input file is not readable or does not contain the string \'SECTION MATERIAL FILE LIST v.2009-11-30\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"SECTION MATERIAL FILE LIST v.2009-11-30")==NULL); // Skip comments and empty lines until the section begins
int i;
for (i=0; i<MAX_MATERIALS; i++)
{
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr);
if (new_line_ptr==NULL)
file_name_materials[i][0]='\n'; // The input file is allowed to finish without defining all the materials
else
trim_name(new_line, file_name_materials[i]);
}
// [Finish reading input file]
/////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////
// *** Set the detector center in front of the source: !!DBTv1.4!!
detector_data[0].center.x = source_data[0].position.x + source_data[0].direction.x * detector_data[0].sdd;
detector_data[0].center.y = source_data[0].position.y + source_data[0].direction.y * detector_data[0].sdd;
detector_data[0].center.z = source_data[0].position.z + source_data[0].direction.z * detector_data[0].sdd;
// *** Set the rotation that will bring the particles from the actual detector plane to the default detector plane at +Y=(0,+1,0) where pixelization is simpler: !!DBTv1.4!!
// Essentially I just need to invert the Euler angles and the order of the rotations given for the source, and move the detector a distance SDD in the direction of the cone beam:
detector_data[0].rot_inv[0] = 1; detector_data[0].rot_inv[1] = 0; detector_data[0].rot_inv[2] = 0; // Init rotation matrix to identity
detector_data[0].rot_inv[3] = 0; detector_data[0].rot_inv[4] = 1; detector_data[0].rot_inv[5] = 0;
detector_data[0].rot_inv[6] = 0; detector_data[0].rot_inv[7] = 0; detector_data[0].rot_inv[8] = 1;
create_rotation_matrix_around_axis(-rotZ3*DEG2RAD, 0, 0, 1, detector_data[0].rot_inv); // Inverse 3rd rotation around Z !!DBTv1.4!!
create_rotation_matrix_around_axis(-rotY2*DEG2RAD, 0, 1, 0, detector_data[0].rot_inv); // Inverse 2nd rotation around Y !!DBTv1.4!!
create_rotation_matrix_around_axis(-rotZ1*DEG2RAD, 0, 0, 1, detector_data[0].rot_inv); // Inverse 1st rotation around Z !!DBTv1.4!!
MASTER_THREAD printf(" Rotations from the detector plane to default detector plane at Y=0 [deg]: rotZ=%f , rotY=%f , rotZ=%f\n", -rotZ3, -rotY2, -rotZ1);
/////////////////////////////////////////////////////////////////////////////
// *** Allocate array for the 4 detected images (non-scattered, Compton, Rayleigh, multiple-scatter):
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
*image_bytes = 4 * pixels_per_image * sizeof(unsigned long long int);
(*image_ptr) = (unsigned long long int*) malloc(*image_bytes);
if (*image_ptr==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d pixels for the 4 scatter images (%f Mbytes)!!\n\n", pixels_per_image, (*image_bytes)/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD printf(" Array for 4 scatter images correctly allocated (%d pixels, %f Mbytes)\n", pixels_per_image, (*image_bytes)/(1024.f*1024.f));
}
// *** Initialize the images to 0 in the CPU. The CUDA code will init it to 0 in the GPU global memory later, using kernel "init_image_array_GPU".
memset(*image_ptr, 0, (*image_bytes)); // Init memory space to 0.
// *** Allocate dose and dose^2 array if tally active:
int num_voxels_ROI = ((int)(*dose_ROI_x_max - *dose_ROI_x_min + 1)) * ((int)(*dose_ROI_y_max - *dose_ROI_y_min + 1)) * ((int)(*dose_ROI_z_max - *dose_ROI_z_min + 1));
if ((*dose_ROI_x_max)>-1)
{
*voxels_Edep_bytes = num_voxels_ROI * sizeof(ulonglong2);
(*voxels_Edep_ptr) = (ulonglong2*) malloc(*voxels_Edep_bytes);
if (*voxels_Edep_ptr==NULL)
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate %d voxels for the deposited dose (and uncertainty) array (%f Mbytes)!!\n\n", num_voxels_ROI, (*voxels_Edep_bytes)/(1024.f*1024.f));
exit(-2);
}
else
{
MASTER_THREAD printf(" Array for the deposited dose ROI (and uncertainty) correctly allocated (%d voxels, %f Mbytes)\n", num_voxels_ROI, (*voxels_Edep_bytes)/(1024.f*1024.f));
}
}
else
{
(*voxels_Edep_bytes) = 0;
}
// *** Initialize the voxel dose to 0 in the CPU. Not necessary for the CUDA code if dose matrix init. in the GPU global memory using a GPU kernel, but needed if using cudaMemcpy.
if ((*dose_ROI_x_max)>-1)
{
memset(*voxels_Edep_ptr, 0, (*voxels_Edep_bytes)); // Init memory space to 0.
}
return;
}
////////////////////////////////////////////////////////////////////////////////
//! Extract a file name from an input text line, trimming the initial blanks,
//! trailing comment (#) and stopping at the first blank (the file name should
//! not contain blanks).
//!
//! @param[in] input_line Input sentence with blanks and a trailing comment
//! @param[out] file_name Trimmed file name
////////////////////////////////////////////////////////////////////////////////
void trim_name(char* input_line, char* file_name)
{
int a=0, b=0;
// Discard initial blanks:
while(' '==input_line[a])
{
a++;
}
// Read file name until a blank or a comment symbol (#) is found:
while ((' '!=input_line[a])&&('#'!=input_line[a]))
{
file_name[b] = input_line[a];
b++;
a++;
}
file_name[b] = '\0'; // Terminate output string
}
////////////////////////////////////////////////////////////////////////////////
//! Read a line of text and trim initial blancks and trailing comments (#).
//!
//! @param[in] num Characters to read
//! @param[in] file_ptr Pointer to the input file stream
//! @param[out] trimmed_line Trimmed line from input file, skipping empty lines and comments
////////////////////////////////////////////////////////////////////////////////
char* fgets_trimmed(char* trimmed_line, int num, FILE* file_ptr)
{
char new_line[250];
char *new_line_ptr = NULL;
int a=0, b=0;
trimmed_line[0] = '\0'; // Init with a mark that means no file input
do
{
a=0; b=0;
new_line_ptr = fgets(new_line, num, file_ptr); // Read new line
if (new_line_ptr != NULL)
{
// Discard initial blanks:
while(' '==new_line[a])
{
a++;
}
// Read file until a comment symbol (#) or end-of-line are found:
while (('\n'!=new_line[a])&&('#'!=new_line[a]))
{
trimmed_line[b] = new_line[a];
b++;
a++;
}
}
} while(new_line_ptr!=NULL && '\0'==trimmed_line[0]); // Keep reading lines until end-of-file or a line that is not empty or only comment is found
trimmed_line[b] = '\0'; // Terminate output string
return new_line_ptr;
}
////////////////////////////////////////////////////////////////////////////////
//! Read the voxel data and allocate the material and density matrix.
//! Also find and report the maximum density defined in the geometry.
//!
// -- Sample voxel geometry file:
//
// # (comment lines...)
// #
// # Voxel order: X runs first, then Y, then Z.
// #
// [SECTION VOXELS HEADER v.2008-04-13]
// 411 190 113 No. OF VOXELS IN X,Y,Z
// 5.000e-02 5.000e-02 5.000e-02 VOXEL SIZE (cm) ALONG X,Y,Z
// 1 COLUMN NUMBER WHERE MATERIAL ID IS LOCATED
// 2 COLUMN NUMBER WHERE THE MASS DENSITY IS LOCATED
// 1 BLANK LINES AT END OF X,Y-CYCLES (1=YES,0=NO)
// [END OF VXH SECTION]
// 1 0.00120479
// 1 0.00120479
// ...
//
//! @param[in] file_name_voxels Name of the voxelized geometry file.
//! @param[out] density_max Array with the maximum density for each material in the voxels.
//! @param[out] voxel_data Pointer to a structure containing the voxel number and size.
//! @param[out] voxel_mat_dens_ptr Pointer to the vector with the voxel materials and densities.
//! @param[in] dose_ROI_x/y/z_max Size of the dose ROI: can not be larger than the total number of voxels in the geometry.
////////////////////////////////////////////////////////////////////////////////
// void load_voxels(int myID, char* file_name_voxels, float* density_max, struct voxel_struct* voxel_data, float2** voxel_mat_dens_ptr, unsigned int* voxel_mat_dens_bytes, short int* dose_ROI_x_max, short int* dose_ROI_y_max, short int* dose_ROI_z_max)
void load_voxels(int myID, char* file_name_voxels, float* density_max, struct voxel_struct* voxel_data, int** voxel_mat_dens_ptr, long long int* voxel_mat_dens_bytes, short int* dose_ROI_x_max, short int* dose_ROI_y_max, short int* dose_ROI_z_max) //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
{
char new_line[250];
char *new_line_ptr = NULL;
MASTER_THREAD if (strstr(file_name_voxels,".zip")!=NULL)
printf("\n\n -- WARNING load_voxels! The input voxel file name has the extension \'.zip\'. Only \'.gz\' compression is allowed!!\n\n"); // !!zlib!!
gzFile file_ptr = gzopen(file_name_voxels, "rb"); // Open the file with zlib: the file can be compressed with gzip or uncompressed. !!zlib!!
if (file_ptr==NULL)
{
printf("\n\n !! fopen ERROR load_voxels!! File %s does not exist!!\n", file_name_voxels);
exit(-2);
}
MASTER_THREAD
{
printf("\n -- Reading voxel file \'%s\':\n",file_name_voxels);
if (strstr(file_name_voxels,".gz")==NULL)
printf(" (note that MC-GPU can also read voxel and material files compressed with gzip)\n"); // !!zlib!!
fflush(stdout);
}
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (new_line_ptr==NULL)
{
MASTER_THREAD printf("\n\n !!Reading ERROR load_voxels!! File is not readable or does not contain the string \'[SECTION VOXELS HEADER\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[SECTION VOXELS")==NULL); // Skip comments and empty lines until the header begins
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!! // Read full line (max. 250 characters).
sscanf(new_line, "%d %d %d",&voxel_data->num_voxels.x, &voxel_data->num_voxels.y, &voxel_data->num_voxels.z);
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "%f %f %f", &voxel_data->voxel_size.x, &voxel_data->voxel_size.y, &voxel_data->voxel_size.z);
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (new_line_ptr==NULL)
{
MASTER_THREAD printf("\n\n !!Reading ERROR load_voxels!! File is not readable or does not contain the string \'[END OF VXH SECTION]\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[END OF VXH SECTION")==NULL); // Skip rest of the header
// -- Store the size of the voxel bounding box (used in the source function):
voxel_data->size_bbox.x = voxel_data->num_voxels.x * voxel_data->voxel_size.x;
voxel_data->size_bbox.y = voxel_data->num_voxels.y * voxel_data->voxel_size.y;
voxel_data->size_bbox.z = voxel_data->num_voxels.z * voxel_data->voxel_size.z;
MASTER_THREAD
{
printf(" Number of voxels in the input geometry file: %d x %d x %d = %d\n", voxel_data->num_voxels.x, voxel_data->num_voxels.y, voxel_data->num_voxels.z, (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z));
printf(" Size of the input voxels: %f x %f x %f cm (voxel volume=%f cm^3)\n", voxel_data->voxel_size.x, voxel_data->voxel_size.y, voxel_data->voxel_size.z, voxel_data->voxel_size.x*voxel_data->voxel_size.y*voxel_data->voxel_size.z);
printf(" Voxel bounding box size: %f x %f x %f cm\n", voxel_data->size_bbox.x, voxel_data->size_bbox.y, voxel_data->size_bbox.z);
printf(" Voxel geometry offset: %f, %f, %f cm\n", voxel_data->offset.x, voxel_data->offset.y, voxel_data->offset.z); // !!DBTv1.4!!
}
if (*dose_ROI_x_max > -1) // Check if tally not disabled
{
// -- Make sure the input number of voxels in the vox file is compatible with the input dose ROI (ROI assumes first voxel is index 0):
if ( (*dose_ROI_x_max+1)>(voxel_data->num_voxels.x) || (*dose_ROI_y_max+1)>(voxel_data->num_voxels.y) || (*dose_ROI_z_max+1)>(voxel_data->num_voxels.z) )
{
MASTER_THREAD printf("\n The input region of interest for the dose deposition is larger than the size of the voxelized geometry:\n");
*dose_ROI_x_max = min_value(voxel_data->num_voxels.x-1, *dose_ROI_x_max);
*dose_ROI_y_max = min_value(voxel_data->num_voxels.y-1, *dose_ROI_y_max);
*dose_ROI_z_max = min_value(voxel_data->num_voxels.z-1, *dose_ROI_z_max);
MASTER_THREAD printf( " updating the ROI max limits to fit the geometry -> dose_ROI_max=(%d, %d, %d)\n", *dose_ROI_x_max+1, *dose_ROI_y_max+1, *dose_ROI_z_max+1); // Allowing the input of an ROI larger than the voxel volume: in this case some of the allocated memory will be wasted but the program will run ok.
}
if ( (*dose_ROI_x_max+1)==(voxel_data->num_voxels.x) && (*dose_ROI_y_max+1)==(voxel_data->num_voxels.y) && (*dose_ROI_z_max+1)==(voxel_data->num_voxels.z) )
MASTER_THREAD printf(" The voxel dose tally ROI covers the entire voxelized phantom: the dose to every voxel will be tallied.\n");
else
MASTER_THREAD printf(" The voxel dose tally ROI covers only a fraction of the voxelized phantom: the dose to voxels outside the ROI will not be tallied.\n");
}
// -- Store the inverse of the pixel sides (in cm) to speed up the particle location in voxels.
voxel_data->inv_voxel_size.x = 1.0f/(voxel_data->voxel_size.x);
voxel_data->inv_voxel_size.y = 1.0f/(voxel_data->voxel_size.y);
voxel_data->inv_voxel_size.z = 1.0f/(voxel_data->voxel_size.z);
// -- Allocate the voxel matrix and store array size:
// *voxel_mat_dens_bytes = sizeof(float2)*(voxel_data->num_voxels.x)*(voxel_data->num_voxels.y)*(voxel_data->num_voxels.z);
// *voxel_mat_dens_ptr = (float2*) malloc(*voxel_mat_dens_bytes);
*voxel_mat_dens_bytes = sizeof(int)*(voxel_data->num_voxels.x)*(voxel_data->num_voxels.y)*(voxel_data->num_voxels.z); //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
*voxel_mat_dens_ptr = (int*) malloc(*voxel_mat_dens_bytes); //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
if (*voxel_mat_dens_ptr==NULL)
{
printf("\n\n !!malloc ERROR load_voxels!! Not enough memory to allocate %d voxels (%f Mbytes)!!\n\n", (voxel_data->num_voxels.x*voxel_data->num_voxels.y*voxel_data->num_voxels.z), (*voxel_mat_dens_bytes)/(1024.f*1024.f));
exit(-2);
}
MASTER_THREAD printf("\n\n!!WARNING!! HARDCODED CONVERSION TABLE FROM MATERIAL NUMBER TO DENSITY (kernel function \"density_LUT\") !!DeBuG!!\n"); //!!DeBuG!! !!DeBuG!!
MASTER_THREAD printf( " The densities given in the input .vox file are not used in the actual simulation.\n\n"); //!!DeBuG!! !!DeBuG!!
MASTER_THREAD printf(" -- Initializing the voxel material vector (%f Mbytes). Each voxel density is fixed by its material number using a look up table; individual densities disregarded !!FixedDensity_DBT!!\n\n", (*voxel_mat_dens_bytes)/(1024.f*1024.f)); //!!FixedDensity_DBT!!
MASTER_THREAD fflush(stdout);
// -- Read the voxel densities:
// MASTER_THREAD printf(" Reading the voxel densities... ");
int i, j, k, read_lines=0, dummy_material, read_items = -99;
float dummy_density;
// float2 *voxels_ptr = *voxel_mat_dens_ptr;
int *voxels_ptr = *voxel_mat_dens_ptr; //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"
for (k=0; k<MAX_MATERIALS; k++)
density_max[k] = -999.0f; // Init array with an impossible low density value
for(k=0; k<(voxel_data->num_voxels.z); k++)
{
for(j=0; j<(voxel_data->num_voxels.y); j++)
{
for(i=0; i<(voxel_data->num_voxels.x); i++)
{
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
}
while (('\n'==new_line[0])||('\n'==new_line[1])||('#'==new_line[0])||('#'==new_line[1])); // Skip empty lines and comments.
read_items = sscanf(new_line, "%d %f", &dummy_material, &dummy_density); // Read the next 2 numbers
if (read_items!=2)
printf("\n !!WARNING load_voxels!! Expecting to read 2 items (material and density). read_items=%d, read_lines=%d \n", read_items, read_lines);
if (dummy_material>MAX_MATERIALS)
{
printf("\n\n !!ERROR load_voxels!! Voxel material number too high!! #mat=%d, MAX_MATERIALS=%d, voxel number=%d\n\n", dummy_material, MAX_MATERIALS, read_lines+1);
exit(-2);
}
if (dummy_material<1)
{
printf("\n\n !!ERROR load_voxels!! Voxel material number can not be zero or negative!! #mat=%d, voxel number=%dd\n\n", dummy_material, read_lines+1);
exit(-2);
}
if (dummy_density < 1.0e-9f)
{
printf("\n\n !!ERROR load_voxels!! Voxel density can not be 0 or negative: #mat=%d, density=%f, voxel number=%d\n\n", dummy_material, dummy_density, read_lines+1);
exit(-2);
}
if (dummy_density > density_max[dummy_material-1])
density_max[dummy_material-1] = dummy_density; // Store maximum density for each material
// (*voxels_ptr).x = (float)(dummy_material)+0.0001f; // Assign material value as float (the integer value will be recovered by truncation)
// (*voxels_ptr).y = dummy_density; // Assign density values
(*voxels_ptr) = (int)(dummy_material-1); // Assign material value as char, starting at 0 //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2"; density taken from look up table
voxels_ptr++; // Move to next voxel
read_lines++;
}
}
}
MASTER_THREAD printf(" Total number of voxels read: %d\n",read_lines);
gzclose(file_ptr); // Close input file !!zlib!!
}
////////////////////////////////////////////////////////////////////////////////
//! Read the material input files and set the mean free paths and the "linear_interp" structures.
//! Find the material nominal density. Set the Woodcock trick data.
//
// -- Sample material data file (data obtained from the PENELOPE 2006 database and models):
//
// [MATERIAL NAME]
// Water
// [NOMINAL DENSITY (g/cm^3)]
// 1.000
// [NUMBER OF DATA VALUES]
// 4096
// [MEAN FREE PATHS :: Energy (eV) || Rayleigh | Compton | Photoelectric | Pair-production | TOTAL (cm)]
// 1.00000E+03 7.27451E-01 9.43363E+01 2.45451E-04 1.00000E+35 2.45367E-04
// 5.00000E+03 1.80004E+00 8.35996E+00 2.38881E-02 1.00000E+35 2.35089E-02
// 1.00000E+04 4.34941E+00 6.26746E+00 2.02568E-01 1.00000E+35 1.87755E-01
// ...
// #[RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database)]
// ...
// #[COMPTON INTERACTIONS (relativistic impulse model with approximated one-electron analytical profiles)]
// ...
//
//! @param[in] file_name_materials Array with the names of the material files.
//! @param[in] density_max maximum density in the geometry (needed to set Woodcock trick)
//! @param[out] density_nominal Array with the nominal density of the materials read
//! @param[out] mfp_table_data Constant values for the linear interpolation
//! @param[out] mfp_table_a_ptr First element for the linear interpolation.
//! @param[out] mfp_table_b_ptr Second element for the linear interpolation.
////////////////////////////////////////////////////////////////////////////////
void load_material(int myID, char file_name_materials[MAX_MATERIALS][250], float* density_max, float* density_nominal, struct linear_interp* mfp_table_data, float2** mfp_Woodcock_table_ptr, int* mfp_Woodcock_table_bytes, float3** mfp_table_a_ptr, float3** mfp_table_b_ptr, int* mfp_table_bytes, struct rayleigh_struct *rayleigh_table_ptr, struct compton_struct *compton_table_ptr)
{
char new_line[250];
char *new_line_ptr = NULL;
int mat, i, bin, input_num_values = 0, input_rayleigh_values = 0, input_num_shells = 0;
double delta_e=-99999.0;
// -- Init the number of shells to 0 for all materials
for (mat=0; mat<MAX_MATERIALS; mat++)
compton_table_ptr->noscco[mat] = 0;
// --Read the material data files:
MASTER_THREAD printf("\n -- Reading the material data files (MAX_MATERIALS=%d):\n", MAX_MATERIALS);
for (mat=0; mat<MAX_MATERIALS; mat++)
{
if ((file_name_materials[mat][0]=='\0') || (file_name_materials[mat][0]=='\n')) // Empty file name
continue; // Re-start loop for next material
MASTER_THREAD printf(" Mat %d: File \'%s\'\n", mat+1, file_name_materials[mat]);
// printf(" -- Reading material file #%d: \'%s\'\n", mat, file_name_materials[mat]);
gzFile file_ptr = gzopen(file_name_materials[mat], "rb"); // !!zlib!!
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR!! File %d \'%s\' does not exist!!\n", mat, file_name_materials[mat]);
exit(-2);
}
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // Read full line (max. 250 characters). // !!zlib!!
if (new_line_ptr==NULL)
{
printf("\n\n !!Reading ERROR!! File is not readable or does not contain the string \'[NOMINAL DENSITY\'!!\n");
exit(-2);
}
}
while(strstr(new_line,"[NOMINAL DENSITY")==NULL); // Skip rest of the header
// Read the material nominal density:
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "# %f", &density_nominal[mat]);
if (density_max[mat]>0) // Material found in the voxels
{
MASTER_THREAD printf(" Nominal density = %f g/cm^3; Max density in voxels = %f g/cm^3\n", density_nominal[mat], density_max[mat]);
}
else // Material NOT found in the voxels
{
MASTER_THREAD printf(" This material is not used in any voxel.\n");
// Do not lose time reading the data for materials not found in the voxels, except for the first one (needed to determine the size of the input data).
if (0 == mat)
density_max[mat] = 0.01f*density_nominal[mat]; // Assign a small but positive density; this material will not be used anyway.
else
continue; // Move on to next material
}
// --For the first material, set the number of energy values and allocate table arrays:
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line, "# %d", &input_num_values);
if (0==mat)
{
mfp_table_data->num_values = input_num_values;
MASTER_THREAD printf(" Number of energy values in the mean free path database: %d.\n", input_num_values);
// Allocate memory for the linear interpolation arrays:
*mfp_Woodcock_table_bytes = sizeof(float2)*input_num_values;
*mfp_Woodcock_table_ptr = (float2*) malloc(*mfp_Woodcock_table_bytes); // Allocate space for the 2 parameter table
*mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS;
*mfp_table_a_ptr = (float3*) malloc(*mfp_table_bytes); // Allocate space for the 4 MFP tables
*mfp_table_b_ptr = (float3*) malloc(*mfp_table_bytes);
*mfp_table_bytes = sizeof(float3)*input_num_values*MAX_MATERIALS;
if (input_num_values>MAX_ENERGYBINS_RAYLEIGH)
{
printf("\n\n !!load_material ERROR!! Too many energy bins (Input bins=%d): increase parameter MAX_ENERGYBINS_RAYLEIGH=%d!!\n\n", input_num_values, MAX_ENERGYBINS_RAYLEIGH);
exit(-2);
}
if ((NULL==*mfp_Woodcock_table_ptr)||(NULL==*mfp_table_a_ptr)||(NULL==*mfp_table_b_ptr))
{
printf("\n\n !!malloc ERROR!! Not enough memory to allocate the linear interpolation data: %d bytes!!\n\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes)));
exit(-2);
}
else
{
MASTER_THREAD printf(" Linear interpolation data correctly allocated (%f Mbytes)\n", (*mfp_Woodcock_table_bytes+2*(*mfp_table_bytes))/(1024.f*1024.f));
}
for (i=0; i<input_num_values; i++)
{
(*mfp_Woodcock_table_ptr)[i].x = 99999999.99f; // Init this array with a huge MFP, the minimum values are calculated below
}
}
else // Materials after first
{
if (input_num_values != mfp_table_data->num_values)
{
printf("\n\n !!load_material ERROR!! Incorrect number of energy values given in material \'%s\': input=%d, expected=%d\n",file_name_materials[mat], input_num_values, mfp_table_data->num_values);
exit(-2);
}
}
// -- Read the mean free paths (and Rayleigh cumulative prob):
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
double d_energy, d_rayleigh, d_compton, d_photelectric, d_total_mfp, d_pmax, e_last=-1.0;
for (i=0; i<input_num_values; i++)
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %le %le %le %le %le %le", &d_energy, &d_rayleigh, &d_compton, &d_photelectric, &d_total_mfp, &d_pmax);
// Find and store the minimum total MFP at the current energy, for every material's maximum density:
float temp_mfp = d_total_mfp*(density_nominal[mat])/(density_max[mat]);
if (temp_mfp < (*mfp_Woodcock_table_ptr)[i].x)
(*mfp_Woodcock_table_ptr)[i].x = temp_mfp; // Store minimum total mfp [cm]
// Store the inverse MFP data points with [num_values rows]*[MAX_MATERIALS columns]
// Scaling the table to the nominal density so that I can re-scale in the kernel to the actual local density:
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].x = 1.0/(d_total_mfp*density_nominal[mat]); // inverse TOTAL mfp * nominal density
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].y = 1.0/(d_compton *density_nominal[mat]); // inverse Compton mfp * nominal density
(*mfp_table_a_ptr)[i*(MAX_MATERIALS)+mat].z = 1.0/(d_rayleigh *density_nominal[mat]); // inverse Rayleigh mfp * nominal density
rayleigh_table_ptr->pmax[i*(MAX_MATERIALS)+mat] = d_pmax; // Store the maximum cumulative probability of atomic form factor F^2 for
if (0==i && 0==mat)
{
mfp_table_data->e0 = d_energy; // Store the first energy of the first material
}
if (0==i)
{
if (fabs(d_energy-mfp_table_data->e0)>1.0e-9)
{
printf("\n\n !!load_material ERROR!! Incorrect first energy value given in material \'%s\': input=%f, expected=%f\n", file_name_materials[mat], d_energy, mfp_table_data->e0);
exit(-2);
}
}
else if (1==i)
{
delta_e = d_energy-e_last;
}
else if (i>1)
{
if (((fabs((d_energy-e_last)-delta_e))/delta_e)>0.001) // Tolerate up to a 0.1% relative variation in the delta e (for each bin) to account for possible precission errors reading the energy values
{
printf(" !!ERROR reading material data!! The energy step between mean free path values is not constant!!\n (maybe not enough decimals given for the energy values)\n #value = %d, First delta: %f , New delta: %f, Energy: %f ; Rel.Dif=%f\n", i, delta_e, (d_energy-e_last), d_energy,((fabs((d_energy-e_last)-delta_e))/delta_e));
exit(-2);
}
}
e_last = d_energy;
}
if (0==mat) MASTER_THREAD printf(" Lowest energy first bin = %f eV, last bin = %f eV; bin width = %f eV\n", (mfp_table_data->e0), e_last, delta_e);
// -- Store the inverse of delta energy:
mfp_table_data->ide = 1.0f/delta_e;
// -- Store MFP data slope 'b' (.y for Woodcock):
for (i=0; i<(input_num_values-1); i++)
{
bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns
(*mfp_table_b_ptr)[bin].x = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].x - (*mfp_table_a_ptr)[bin].x) / delta_e;
(*mfp_table_b_ptr)[bin].y = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].y - (*mfp_table_a_ptr)[bin].y) / delta_e;
(*mfp_table_b_ptr)[bin].z = ((*mfp_table_a_ptr)[bin+MAX_MATERIALS].z - (*mfp_table_a_ptr)[bin].z) / delta_e;
}
// After maximum energy (last bin), assume constant slope:
(*mfp_table_b_ptr)[(input_num_values-1)*MAX_MATERIALS+mat] = (*mfp_table_b_ptr)[(input_num_values-2)*MAX_MATERIALS+mat];
// -- Rescale the 'a' parameter (.x for Woodcock) as if the bin started at energy = 0: we will not have to rescale to the bin minimum energy every time
for (i=0; i<input_num_values; i++)
{
d_energy = mfp_table_data->e0 + i*delta_e; // Set current bin lowest energy value
bin = i*MAX_MATERIALS+mat; // Set current bin, skipping MAX_MATERIALS columns
(*mfp_table_a_ptr)[bin].x = (*mfp_table_a_ptr)[bin].x - d_energy*(*mfp_table_b_ptr)[bin].x;
(*mfp_table_a_ptr)[bin].y = (*mfp_table_a_ptr)[bin].y - d_energy*(*mfp_table_b_ptr)[bin].y;
(*mfp_table_a_ptr)[bin].z = (*mfp_table_a_ptr)[bin].z - d_energy*(*mfp_table_b_ptr)[bin].z;
}
// -- Reading data for RAYLEIGH INTERACTIONS (RITA sampling of atomic form factor from EPDL database):
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (gzeof(file_ptr)!=0) // !!zlib!!
{
printf("\n\n !!End-of-file ERROR!! Rayleigh data not found: \"#[DATA VALUES...\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line);
exit(-2);
}
}
while(strstr(new_line,"[DATA VALUES")==NULL); // Skip all lines until this text is found
new_line_ptr = gzgets(file_ptr, new_line, 250); // Read the number of data points in Rayleigh // !!zlib!!
sscanf(new_line, "# %d", &input_rayleigh_values);
if (input_rayleigh_values != NP_RAYLEIGH)
{
printf("\n\n !!ERROR!! The number of values for Rayleigh sampling is different than the allocated space: input=%d, NP_RAYLEIGH=%d. File=\'%s\'\n", input_rayleigh_values, NP_RAYLEIGH, file_name_materials[mat]);
exit(-2);
}
new_line_ptr = gzgets(file_ptr, new_line, 250); // Comment line: #[SAMPLING DATA FROM COMMON/CGRA/: X, P, A, B, ITL, ITU] // !!zlib!!
for (i=0; i<input_rayleigh_values; i++)
{
int itlco_tmp, ituco_tmp;
bin = NP_RAYLEIGH*mat + i;
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %e %e %e %e %d %d", &(rayleigh_table_ptr->xco[bin]), &(rayleigh_table_ptr->pco[bin]),
&(rayleigh_table_ptr->aco[bin]), &(rayleigh_table_ptr->bco[bin]),
&itlco_tmp, &ituco_tmp);
rayleigh_table_ptr->itlco[bin] = (unsigned char) itlco_tmp;
rayleigh_table_ptr->ituco[bin] = (unsigned char) ituco_tmp;
}
// printf(" -- Rayleigh sampling data read. Input values = %d\n",input_rayleigh_values);
// -- Reading COMPTON INTERACTIONS data (relativistic impulse model with approximated one-electron analytical profiles):
do
{
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
if (gzeof(file_ptr)!=0) // !!zlib!!
{
printf("\n\n !!End-of-file ERROR!! Compton data not found: \"[NUMBER OF SHELLS]\" in file \'%s\'. Last line read: %s\n\n", file_name_materials[mat], new_line);
exit(-2);
}
}
while(strstr(new_line,"[NUMBER OF SHELLS")==NULL); // Skip all lines until this text is found
new_line_ptr = gzgets(file_ptr, new_line, 250);
sscanf(new_line, "# %d", &input_num_shells); // Read the NUMBER OF SHELLS
if (input_num_shells>MAX_SHELLS)
{
printf("\n\n !!ERROR!! Too many shells for Compton interactions in file \'%s\': input=%d, MAX_SHELLS=%d\n", file_name_materials[mat], input_num_shells, MAX_SHELLS);
exit(-2);
}
compton_table_ptr->noscco[mat] = input_num_shells; // Store number of shells for this material in structure
new_line_ptr = gzgets(file_ptr, new_line, 250); // Comment line: #[SHELL INFORMATION FROM COMMON/CGCO/: FCO, UICO, FJ0, KZCO, KSCO]
int kzco_dummy, ksco_dummy;
for (i=0; i<input_num_shells; i++)
{
bin = mat + i*MAX_MATERIALS;
new_line_ptr = gzgets(file_ptr, new_line, 250); // !!zlib!!
sscanf(new_line," %e %e %e %d %d", &(compton_table_ptr->fco[bin]), &(compton_table_ptr->uico[bin]),
&(compton_table_ptr->fj0[bin]), &kzco_dummy, &ksco_dummy);
}
gzclose(file_ptr); // Material data read. Close the current material input file. // !!zlib!!
} // ["for" loop: continue with next material]
// -- Store Woodcock MFP slope in component '.y':
for (i=0; i<(mfp_table_data->num_values-1); i++)
(*mfp_Woodcock_table_ptr)[i].y = ((*mfp_Woodcock_table_ptr)[i+1].x - (*mfp_Woodcock_table_ptr)[i].x)/delta_e;
// -- Rescale the first parameter in component .x for Woodcock
for (i=0; i<mfp_table_data->num_values; i++)
{
(*mfp_Woodcock_table_ptr)[i].x = (*mfp_Woodcock_table_ptr)[i].x - (mfp_table_data->e0 + i*delta_e)*(*mfp_Woodcock_table_ptr)[i].y;
}
}
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
////////////////////////////////////////////////////////////////////////////////
//! Select and initialize the CUDA-enabled GPU that will be used in the simulation.
//! Allocates and copies the simulation data in the GPU global and constant memories.
//!
////////////////////////////////////////////////////////////////////////////////
void init_CUDA_device( int* gpu_id, int myID, int numprocs,
/*Variables to GPU constant memory:*/ struct voxel_struct* voxel_data, struct source_struct* source_data, struct source_energy_struct* source_energy_data, struct detector_struct* detector_data, struct linear_interp* mfp_table_data,
// /*Variables to GPU global memory:*/ float2* voxel_mat_dens, float2** voxel_mat_dens_device, unsigned int voxel_mat_dens_bytes,
/*Variables to GPU global memory:*/ int* voxel_mat_dens, int** voxel_mat_dens_device, long long int voxel_mat_dens_bytes, //!!FixedDensity_DBT!!
char* bitree, char** bitree_device, unsigned int bitree_bytes, //!!bitree!! v1.5b
unsigned long long int* image, unsigned long long int** image_device, int image_bytes,
float2* mfp_Woodcock_table, float2** mfp_Woodcock_table_device, int mfp_Woodcock_table_bytes,
float3* mfp_table_a, float3* mfp_table_b, float3** mfp_table_a_device, float3** mfp_table_b_device, int mfp_table_bytes,
struct rayleigh_struct* rayleigh_table, struct rayleigh_struct** rayleigh_table_device,
struct compton_struct* compton_table, struct compton_struct** compton_table_device,
struct detector_struct** detector_data_device, struct source_struct** source_data_device,
ulonglong2* voxels_Edep, ulonglong2** voxels_Edep_device, int voxels_Edep_bytes, short int* dose_ROI_x_min, short int* dose_ROI_x_max, short int* dose_ROI_y_min, short int* dose_ROI_y_max, short int* dose_ROI_z_min, short int* dose_ROI_z_max,
ulonglong2* materials_dose, ulonglong2** materials_dose_device, int flag_material_dose, int** seed_input_device, int* seed_input, int num_projections)
{
cudaDeviceProp deviceProp;
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (0==deviceCount)
{
printf("\n !!ERROR!! No CUDA enabled GPU detected by thread #%d!!\n\n", myID);
exit(-1);
}
#ifdef USING_MPI
if (numprocs>1)
{
// *** Select the appropriate GPUs in the different workstations in the MPI hostfile:
// The idea is that each threads will wait for the previous thread to send a messages with its processor name and GPU id,
// then it will assign the current GPU, and finally it will notify the following thread:
const int NODE_NAME_LENGTH = 31;
char processor_name[NODE_NAME_LENGTH+1], previous_processor_name[NODE_NAME_LENGTH+1];
int resultlen = -1;
MPI_Get_processor_name(processor_name, &resultlen);
MPI_Status status;
int gpu_id_to_avoid = *gpu_id;
clock_t clock_start;
if (myID == (numprocs-1))
clock_start = clock();
// Unless we are the first thread, wait for a message from the previous thread:
// The MPI_Recv command will block the execution of the code until the previous threads have communicated and shared the appropriate information.
if (0!=myID)
{
MPI_Recv(previous_processor_name, NODE_NAME_LENGTH, MPI_CHAR, myID-1, 111, MPI_COMM_WORLD, &status); // Receive the processor name and gpu_id from the previous thread
// printf("\n -> MPI_Recv thread %d: gpu_id=%d, %s\n", myID, (int)previous_processor_name[NODE_NAME_LENGTH-1], previous_processor_name); fflush(stdout); //!!Verbose!!
}
// Compare the 30 first characters of the 2 names to see if we changed the node, except for the first thread that allways gets GPU 0:
if ((0==myID) || (0!=strncmp(processor_name, previous_processor_name, NODE_NAME_LENGTH-1)))
{
*gpu_id = 0; // Thread in a new node: assign to GPU 0:
}
else
{
// Current thread in the same node as the previous one: assign next GPU (previous GPU id given in element NODE_NAME_LENGTH-1 of the array)
*gpu_id = (int)previous_processor_name[NODE_NAME_LENGTH-1] + 1;
}
// Set the following GPU if this is the one to be skipped (given in the input file):
if (*gpu_id == gpu_id_to_avoid)
{
*gpu_id = *gpu_id + 1;
printf(" Skipping GPU %d in thread %d (%s), as selected in the input file: gpu_id=%d\n", gpu_id_to_avoid, myID, processor_name, *gpu_id); fflush(stdout);
}
//!!DeBuG!! MC-GPU_v1.4!! Skip GPUs connected to a monitor, if more GPUs available:
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, *gpu_id));
if (0!=deviceProp.kernelExecTimeoutEnabled) //!!DeBuG!!
{
if((*gpu_id)<(deviceCount-1)) //!!DeBuG!!
{
printf("\n ==> CUDA: GPU #%d is connected to a display and the CUDA driver would limit the kernel run time. Skipping this GPU!!\n", *gpu_id); //!!DeBuG!!
*gpu_id = (*gpu_id)+1; //!!DeBuG!!
}
}
// Send the processor and GPU id to the following thread, unless we are the last thread:
if (myID != (numprocs-1))
{
processor_name[NODE_NAME_LENGTH-1] = (char)(*gpu_id); // Store GPU number in the last element of the array
// printf(" <- MPI_Send thread %d: gpu_id=%d, %s\n", myID, (int)processor_name[NODE_NAME_LENGTH-1], processor_name); fflush(stdout); //!!Verbose!!
MPI_Send(processor_name, NODE_NAME_LENGTH, MPI_CHAR, myID+1, 111, MPI_COMM_WORLD); // Send processor name and gpu_id to the following thread (tag is the current thread id)
}
else
{
printf(" -- Time spent communicating between threads to determine the GPU id to use in each thread: %.6f s\n", ((double)(clock()-clock_start))/CLOCKS_PER_SEC); fflush(stdout);
}
}
#endif
if (*gpu_id>=deviceCount)
{
printf("\n\n !!WARNING!! The selected GPU number is too high, this device number does not exist!! GPU_id (starting at 0)=%d, deviceCount=%d\n", (*gpu_id), deviceCount); fflush(stdout);
if (numprocs==1)
{
*gpu_id = gpuGetMaxGflopsDeviceId();
printf(" Selecting the fastest GPU available using gpuGetMaxGflopsDeviceId(): GPU_id = %d\n\n", (*gpu_id)); fflush(stdout);
}
else
{
exit(-1);
}
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, *gpu_id)); // Re-load card properties in case we chaged gpu_id
if (deviceProp.major>99 || deviceProp.minor>99)
{
printf("\n\n\n !!ERROR!! The selected GPU device does not support CUDA!! GPU_id=%d, deviceCount=%d, compute capability=%d.%d\n\n\n", (*gpu_id), deviceCount, deviceProp.major,deviceProp.minor);
exit(-1);
}
checkCudaErrors(cudaSetDevice(*gpu_id)); // Set the GPU device. (optionally use: cutGetMaxGflopsDeviceId())
if (deviceProp.major>1)
{
#ifdef LARGE_CACHE
// -- Compute capability > 1: set a large L1 cache for the global memory, reducing the size of the shared memory:
// cudaFuncCachePreferShared: shared memory is 48 KB
// cudaFuncCachePreferL1: shared memory is 16 KB
// cudaFuncCachePreferNone: no preference
printf("\n ==> CUDA: LARGE_CACHE defined --> setting a large global memory cache (L1) and a small shared memory (cudaFuncCachePreferL1).\n");
cudaFuncSetCacheConfig(track_particles, cudaFuncCachePreferL1); // -- Set a large cache instead of a large shared memory.
// #else
// -- Using default:
// printf("\n ==> CUDA: LARGE_CACHE not defined --> setting a large shared memory and a small global memory cache (cudaFuncCachePreferShared).\n");
// cudaFuncSetCacheConfig(track_particles, cudaFuncCachePreferShared); // !!DeBuG!! Setting size of shared memory/global cache
#endif
}
// DISCONTINUED CUDA FUNCTION! register int GPU_cores = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount; // CUDA SDK function to get the number of GPU cores
// -- Reading the device properties:
#ifdef USING_MPI
printf("\n ==> CUDA (MPI process #%d): %d CUDA enabled GPU detected! Using device #%d: \"%s\"\n", myID, deviceCount, (*gpu_id), deviceProp.name);
#else
printf("\n ==> CUDA: %d CUDA enabled GPU detected! Using device #%d: \"%s\"\n", deviceCount, (*gpu_id), deviceProp.name);
#endif
// printf(" Compute capability: %d.%d, Number multiprocessors: %d, Number cores: %d\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount, GPU_cores);
printf(" Compute capability: %d.%d, Number multiprocessors: %d\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
printf(" Clock rate: %.2f GHz, Global memory: %.3f Mbyte, Constant memory: %.2f kbyte\n", deviceProp.clockRate*1.0e-6f, deviceProp.totalGlobalMem/(1024.f*1024.f), deviceProp.totalConstMem/1024.f);
printf(" Shared memory per block: %.2f kbyte, Registers per block: %.2f kbyte\n", deviceProp.sharedMemPerBlock/1024.f, deviceProp.regsPerBlock/1024.f);
int driverVersion = 0, runtimeVersion = 0;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version: %d.%d, Runtime Version: %d.%d\n\n", driverVersion/1000, driverVersion%100, runtimeVersion/1000, runtimeVersion%100);
if (0!=deviceProp.kernelExecTimeoutEnabled)
{
printf("\n\n\n !!WARNING!! The selected GPU is connected to a display and therefore CUDA driver will limit the kernel run time to 5 seconds and the simulation will likely fail!!\n");
printf( " You can fix this by executing the simulation in a different GPU (select number in the input file) or by turning off the window manager and using the text-only Linux shell.\n\n\n");
// exit(-1);
}
fflush(stdout);
clock_t clock_init = clock();
// -- Allocate the constant variables in the device:
checkCudaErrors(cudaMemcpyToSymbol(voxel_data_CONST, voxel_data, sizeof(struct voxel_struct)));
checkCudaErrors(cudaMemcpyToSymbol(source_energy_data_CONST, source_energy_data, sizeof(struct source_energy_struct)));
// Source, detector data now copied to global memory and transfered to shared memory in the kernel. OLD CODE: checkCudaErrors(cudaMemcpyToSymbol(detector_data_CONST, detector_data, sizeof(struct detector_struct)));
checkCudaErrors(cudaMemcpyToSymbol(mfp_table_data_CONST, mfp_table_data, sizeof(struct linear_interp)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_x_min_CONST, dose_ROI_x_min, sizeof(short int)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_x_max_CONST, dose_ROI_x_max, sizeof(short int)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_y_min_CONST, dose_ROI_y_min, sizeof(short int)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_y_max_CONST, dose_ROI_y_max, sizeof(short int)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_z_min_CONST, dose_ROI_z_min, sizeof(short int)));
checkCudaErrors(cudaMemcpyToSymbol(dose_ROI_z_max_CONST, dose_ROI_z_max, sizeof(short int)));
double total_mem = sizeof(struct voxel_struct)+sizeof(struct source_struct)+sizeof(struct detector_struct)+sizeof(struct linear_interp) + 6*sizeof(short int);
MASTER_THREAD printf(" ==> CUDA: Constant data successfully copied to the device. CONSTANT memory used: %lf kbytes (%.1lf%%)\n", total_mem/1024.0, 100.0*total_mem/deviceProp.totalConstMem);
// -- Allocate the device global memory:
if (*dose_ROI_x_max > -1) // Allocate dose array only if the tally is not disabled
{
checkCudaErrors(cudaMalloc((void**) voxels_Edep_device, voxels_Edep_bytes));
if (*voxels_Edep_device==NULL)
{
printf("\n cudaMalloc ERROR!! Error allocating the dose array on the device global memory!! (%lf Mbytes)\n", voxels_Edep_bytes/(1024.0*1024.0));
exit(-1);
}
}
checkCudaErrors(cudaMalloc((void**) voxel_mat_dens_device, voxel_mat_dens_bytes));
checkCudaErrors(cudaMalloc((void**) bitree_device, bitree_bytes)); //!!bitree!! v1.5b
checkCudaErrors(cudaMalloc((void**) image_device, image_bytes));
checkCudaErrors(cudaMalloc((void**) mfp_Woodcock_table_device, mfp_Woodcock_table_bytes));
checkCudaErrors(cudaMalloc((void**) mfp_table_a_device, mfp_table_bytes));
checkCudaErrors(cudaMalloc((void**) mfp_table_b_device, mfp_table_bytes));
checkCudaErrors(cudaMalloc((void**) rayleigh_table_device, sizeof(struct rayleigh_struct)));
checkCudaErrors(cudaMalloc((void**) compton_table_device, sizeof(struct compton_struct)));
checkCudaErrors(cudaMalloc((void**) detector_data_device, num_projections*sizeof(struct detector_struct)));
checkCudaErrors(cudaMalloc((void**) source_data_device, num_projections*sizeof(struct source_struct))); // The array of detectors, sources has "MAX_NUM_PROJECTIONS" elements but I am allocating only the used "num_projections" elements to the GPU
checkCudaErrors(cudaMalloc((void**) seed_input_device, sizeof(int))); // Store latest random seed used in GPU in global memory to continue random sequence in consecutive projections. !!DBTv1.4!!
if (flag_material_dose==1)
checkCudaErrors(cudaMalloc((void**) materials_dose_device, MAX_MATERIALS*sizeof(ulonglong2))); // !!tally_materials_dose!!
total_mem = voxels_Edep_bytes + voxel_mat_dens_bytes + image_bytes + mfp_Woodcock_table_bytes + 2*mfp_table_bytes + sizeof(struct compton_struct) + sizeof(struct rayleigh_struct) + num_projections*(sizeof(struct detector_struct) + sizeof(struct source_struct)) + bitree_bytes;
if (*voxel_mat_dens_device==NULL || *image_device==NULL || *mfp_Woodcock_table_device==NULL || *mfp_table_a_device==NULL ||
*mfp_table_a_device==NULL || *rayleigh_table_device==NULL || *compton_table_device==NULL || *detector_data_device==NULL || *source_data_device==NULL)
{
printf("\n cudaMalloc ERROR!! Device global memory not correctly allocated!! (%lf Mbytes)\n", total_mem/(1024.0*1024.0));
exit(-1);
}
else
{
MASTER_THREAD printf(" ==> CUDA: Device global memory correctly allocated. GLOBAL memory used: %lf Mbytes (%.1lf%%)\n", total_mem/(1024.0*1024.0), 100.0*total_mem/deviceProp.totalGlobalMem);
}
// --Copy the host memory to the device:
checkCudaErrors(cudaMemcpy(*voxel_mat_dens_device, voxel_mat_dens, voxel_mat_dens_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*bitree_device, bitree, bitree_bytes, cudaMemcpyHostToDevice)); //!!bitree!! v1.5b
checkCudaErrors(cudaMemcpy(*mfp_Woodcock_table_device, mfp_Woodcock_table, mfp_Woodcock_table_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*mfp_table_a_device, mfp_table_a, mfp_table_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*mfp_table_b_device, mfp_table_b, mfp_table_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*rayleigh_table_device, rayleigh_table, sizeof(struct rayleigh_struct), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*compton_table_device, compton_table, sizeof(struct compton_struct), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*detector_data_device, detector_data, num_projections*sizeof(struct detector_struct),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*source_data_device, source_data, num_projections*sizeof(struct source_struct), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(*seed_input_device, seed_input, sizeof(int), cudaMemcpyHostToDevice)); // Upload initial seed value. !!DBTv1.4!!
// --Init the image array to 0 using a GPU kernel instead of cudaMemcpy:
// Simple version: checkCudaErrors( cudaMemcpy( image_device, image, image_bytes, cudaMemcpyHostToDevice) );
int pixels_per_image = detector_data[0].num_pixels.x * detector_data[0].num_pixels.y;
MASTER_THREAD printf(" ==> CUDA: Launching kernel to initialize the device image to 0: number of blocks = %d, threads per block = 128\n", (int)(ceil(pixels_per_image/128.0f)+0.01f) );
init_image_array_GPU<<<(int)(ceil(pixels_per_image/128.0f)+0.01f),128>>>(*image_device, pixels_per_image);
fflush(stdout);
cudaThreadSynchronize(); // Force the runtime to wait until all device tasks have completed
getLastCudaError("\n\n !!Kernel execution failed initializing the image array!! "); // Check if kernel execution generated any error:
// --Init the dose array to 0 using a GPU kernel, if the tally is not disabled:
if (*dose_ROI_x_max > -1)
{
MASTER_THREAD printf(" ==> CUDA: Initialize the device dose deposition to 0 using cudaMemcpy.\n");
checkCudaErrors(cudaMemcpy(*voxels_Edep_device, voxels_Edep, voxels_Edep_bytes, cudaMemcpyHostToDevice) );
/* // -- OPTIONAL CODE: Launch kernel to initialize the device dose deposition to 0 (MAY FAIL IF DOSE MATRIX IS TOO BIG!) !!DeBuG!!
int num_voxels_dose = voxels_Edep_bytes/sizeof(ulonglong2); // Calculate the number of voxels in the dose array
int num_blocks, num_threads_block = 0;
// Select the number of threads per block making sure we don't try to launch more blocks than CUDA's maximum value:
do
{
num_threads_block += 64;
num_blocks = (int)(ceil(((double)num_voxels_dose)/((double)num_threads_block))+0.001);
}
while (num_blocks > 65500);
MASTER_THREAD printf(" ==> CUDA: Launching kernel to initialize the device dose deposition to 0: number of blocks = %d, threads per block = %d\n", num_blocks, num_threads_block);
init_dose_array_GPU<<<num_blocks,num_threads_block>>>(*voxels_Edep_device, num_voxels_dose);
cudaThreadSynchronize();
getLastCudaError("\n\n !!Kernel execution failed initializing the dose array!! "); // Check if kernel execution generated any error:
*/
}
// Init materials_dose array in GPU with 0 (same as host):
if (flag_material_dose==1)
checkCudaErrors(cudaMemcpy(*materials_dose_device, materials_dose, MAX_MATERIALS*sizeof(ulonglong2), cudaMemcpyHostToDevice)); // !!tally_materials_dose!!
MASTER_THREAD printf(" Time spent allocating and copying memory to the device: %.6f s\n", float(clock()-clock_init)/CLOCKS_PER_SEC);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Guestimate a good number of blocks to estimate the speed of different generations
//! of GPUs. Slower GPUs will simulate less particles and hopefully the fastest GPUs
//! will not have to wait much. If the speed is not accurately estimated in the speed test
//! some GPUs will simulate longer than others and valuable simulation time will be wasted
//! in the idle GPUs.
//!
//! In this function the "optimum" number of blocks for the speed test is heuristically
//! computed as the product of three GPU characteristics:
//! [2.0] * [number of GPU cores] * [core frequency] * [major CUDA compute capability] + [100]
//!
//! The factor 2.0 is arbitrary and can be modified depending on the case (for short
//! simulations this value may have to be reduced or the speed test will take longer
//! than the whole simulation). The constant 100 blocks are added to try to get enough
//! blocks for a reliable timing of slow GPUs.
//!
//! For example, an NVIDIA GeForce 290 will get:
//! 2.0 * 240 (cores) * 1.24 (GHz) * 1 (major compute capability) + 100 = 695.2 ~ 695 blocks
//! An NVIDIA GeForce 580 will get:
//! 2.0 * 512 (cores) * 1.54 (GHz) * 2 (major compute capability) + 100 = 3253.9 ~ 3254 blocks
//! In total the 580 gets 5.7 times more blocks than the 290.
//!
//! @param[in] gpu_id GPU number
//! @param[out] num_blocks Returns a number of blocks related to the expected GPU speed
////////////////////////////////////////////////////////////////////////////////
int guestimate_GPU_performance(int gpu_id)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, gpu_id);
// DISCONTINUED CUDA FUNCTION! float num_cores = (float) _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount;
float num_cores_aprox = 128 * deviceProp.multiProcessorCount; // I can't get the exact number of cores anymore; assume 128 per multiprocessor
float comp_capability = (float) deviceProp.major;
float frequency = deviceProp.clockRate*1.0e-6f;
int guestimated_value = (int)(0.5f*num_cores_aprox*frequency*comp_capability + 64.0f);
return min_value(guestimated_value, 1024); // Limit the returned number of blocks to prevent too long speed tests !!DBT!!
}
#endif
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Report the tallied image in ASCII and binary form (32-bit floats).
//! Separate images for primary and scatter radiation are generated.
//!
//!
//! @param[in] file_name_output File where tallied image is reported
//! @param[in] detector_data Detector description read from the input file (pointer to detector_struct)
//! @param[in] image Tallied image (in meV per pixel)
//! @param[in] time_elapsed Time elapsed during the main loop execution (in seconds)
//! @param[in] total_histories Total number of x-rays simulated
////////////////////////////////////////////////////////////////////////////////
int report_image(char* file_name_output, struct detector_struct* detector_data, struct source_struct* source_data, float mean_energy_spectrum, unsigned long long int* image, double time_elapsed, unsigned long long int total_histories, int current_projection, int num_projections, int myID, int numprocs, double current_angle, int* seed_input)
{
int pixels_per_image = (detector_data[0].num_pixels.x*detector_data[0].num_pixels.y), pixel=0;
float* energy_noScatter_array = (float*) malloc(pixels_per_image*sizeof(float)); // Allocate temporary space to report the binary image at the end
float* energy_compton_array = (float*) malloc(pixels_per_image*sizeof(float));
float* energy_rayleigh_array = (float*) malloc(pixels_per_image*sizeof(float));
float* energy_multiscatter_array = (float*) malloc(pixels_per_image*sizeof(float));
// -- Report data:
printf("\n\n *** IMAGE TALLY PERFORMANCE REPORT ***\n");
if(num_projections!=1) // Output the projection angle when simulating a CT:
{
printf(" Tomographic projection %d of %d: acquisition angle = %lf \n", current_projection, num_projections, current_angle*RAD2DEG);
}
printf(" Simulated x rays: %lld\n", total_histories);
printf(" Simulation time [s]: %.2f\n", time_elapsed);
if (time_elapsed>0.000001)
printf(" Speed [x-rays/s]: %.2f\n\n", ((double)total_histories)/time_elapsed);
FILE* file_ptr = fopen(file_name_output, "w");
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_image!! File %s can not be opened!!\n", file_name_output);
exit(-3);
}
// -- Report the whole image in ASCII text only if the file name contains ".dat". Otherwise, save disc space by writting only the header in ASCII and the data in raw format: // !!DBTv1.4!!
bool flag_report_ASCII = false;
if (strstr(file_name_output,".dat")!=NULL)
flag_report_ASCII = true; // !!DBTv1.4!!
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
fprintf(file_ptr, "# *** ***\n");
fprintf(file_ptr, "# *** Andreu Badal ([email protected]) ***\n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# \n");
#ifdef USING_CUDA
fprintf(file_ptr, "# *** SIMULATION IN THE GPU USING CUDA ***\n");
#else
fprintf(file_ptr, "# *** SIMULATION IN THE CPU ***\n");
#endif
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Image created counting the energy arriving at each pixel: ideal energy integrating detector.\n");
int2 seed;
if (detector_data[0].gain_W<0.001f)
{
// Reporting the image in ideal energy fluence units:
fprintf(file_ptr, "# Pixel value units: eV/cm^2 per history (energy fluence).\n");
}
else
{
// Reporting the image in charge units (electrons) after sampling a number of electron-hole-pairs generated by the tallied energy deposition according to the input detector gain and Swank:
fprintf(file_ptr, "# Pixel value units: electrons/cm^2 per history (detected charge).\n"); //!!DETECTOR_RESPONSE!!
fprintf(file_ptr, "# Detector gain W_+- = %f eV/detected_charge\n", detector_data[0].gain_W);
fprintf(file_ptr, "# Swank factor = %f -> relative std_dev of gain = %f\n", 1.0f/(1.0f+detector_data[0].Swank_rel_std*detector_data[0].Swank_rel_std), detector_data[0].Swank_rel_std); // Swank_factor = mean^2/(mean^2 + std_dev^2) --> (std_dev/mean) = sqrt(1/Swank_factor - 1)
fprintf(file_ptr, "# Mean electronic noise per pixel = %f electrons\n", detector_data[0].electronic_noise);
fprintf(file_ptr, "# Conversion from the energy deposited in each pixel to charge is done sampling a Gaussian distribution with:\n");
fprintf(file_ptr, "# mean = E_deposited/gain_W + electronic_noise\n");
fprintf(file_ptr, "# std_dev^2 = mean*sqrt(1/Swank_factor - 1) + sqrt(electronic_noise)\n");
// Init random seed for the Gaussian sampling:
seed.x = *seed_input;
seed.y = *seed_input;
int l;
for (l=0; l<1234; l++)
{
double dummy = ranecu_double_CPU(&seed); // Waste a few thousand random values to make sure that we don't have problems by using the same seed for both MLCGs
}
}
fprintf(file_ptr, "#\n");
if(num_projections!=1) // Output the projection angle when simulating a CT:
{
// !!DBTv1.4!! First tomo projection is [1], proj [0] is 0 deg if flag_simulateMammoAfterDBT==true
fprintf(file_ptr, "# Tomographic projection %d of %d: acquisition angle = %lf \n", current_projection, num_projections, current_angle*RAD2DEG);
}
fprintf(file_ptr, "# Focal spot position = (%.8f,%.8f,%.8f), cone beam direction = (%.8f,%.8f,%.8f)\n", source_data[current_projection].position.x, source_data[current_projection].position.y, source_data[current_projection].position.z, source_data[current_projection].direction.x, source_data[current_projection].direction.y, source_data[current_projection].direction.z);
fprintf(file_ptr, "# Focal spot Gaussian blur FWHM = %f\n", source_data[current_projection].focal_spot_FWHM);
fprintf(file_ptr, "# Focal spot rotation blur = %f degrees\n", source_data[current_projection].rotation_blur*RAD2DEG);
if (detector_data[current_projection].grid_freq>0.0f)
fprintf(file_ptr, "# Antiscatter grid ratio = %f ; grid frequency = %f lines per cm\n", fabsf(detector_data[current_projection].grid_ratio), detector_data[current_projection].grid_freq); // !!DBTv1.5!!
else
fprintf(file_ptr, "# Antiscatter grid not used.\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# Pixel size: %lf x %lf = %lf cm^2\n", 1.0/(double)(detector_data[0].inv_pixel_size_X), 1.0/(double)(detector_data[0].inv_pixel_size_Z), 1.0/(double)(detector_data[0].inv_pixel_size_X*detector_data[0].inv_pixel_size_Z));
fprintf(file_ptr, "# Number of pixels in X and Z: %d %d\n", detector_data[0].num_pixels.x, detector_data[0].num_pixels.y);
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# The image data is reported in binary format in the .raw files (each pixel given as 32-bit float, little-endian order).\n");
// SEPARATE SCATTER: fprintf(file_ptr, "# Five images are reported one after the other in each .raw file: all signal combined, non-scattered x-rays, Compton scatter, Rayleigh scatter, multiple-scatter.\n");
// NOT SEPARATING SCATTER: January 2018
fprintf(file_ptr, "# Two images are reported one after the other in each .raw file: all SCATTER and PRIMARIES combined and primary x-rays only (which includes additive electronic noise)\n");
fprintf(file_ptr, "# \n");
if (source_data[0].flag_halfConeX) // Sampling only half beam towards +X for mammo geometry! !!DBT!! !!HalfBeam!! !!DBTv1.4!!
{
fprintf(file_ptr, "# NOTE: \"flag_halfCone==true\" --> Image acquired with only half cone beam towards positive azimuthal angles, with beam offset to the edge of the image.\n"); // !!DBT!! !!HalfBeam!! !!DBTv1.4!!
}
if (flag_report_ASCII)
{
fprintf(file_ptr, "# Order of pixel data below: X rows (width) first, blank line separates the different Z rows (height).\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# [NON-SCATTERED] [COMPTON] [RAYLEIGH] [MULTIPLE-SCATTING]\n");
fprintf(file_ptr, "# ==========================================================\n");
}
else
{
fprintf(file_ptr, "# Pixel data is provided only in the .raw files. To report the data in ASCII format, include the string \".dat\" in the input image file name.\n");
fprintf(file_ptr, "# \n");
}
// -- Prepare binary output:
char file_binary[250];
strncpy (file_binary, file_name_output, 250);
strcat(file_binary,".raw"); // !!BINARY!!
FILE* file_binary_ptr = fopen(file_binary, "w"); // !!BINARY!!
if (file_binary_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_image!! Binary file %s can not be opened for writing!!\n", file_binary);
exit(-3);
}
const double invSCALE = 1.0/SCALE_eV; // conversion to eV using the inverse of the constant used in the "tally_image" kernel function (defined in the header file)
const double invHIST = 1.0 / ((double)total_histories); // ==> [eV per history]
const double invAREA = detector_data[0].inv_pixel_size_X * detector_data[0].inv_pixel_size_Z; // ==> [eV/cm^2 per history]
double energy_noScatter, energy_compton, energy_rayleigh, energy_multiscatter;
double energy_integral = 0.0; // Integrate (add) the energy in the image pixels [meV]
double maximum_energy_pixel = -100.0; // Find maximum pixel signal
int maximum_energy_pixel_x=0, maximum_energy_pixel_y=0, maximum_energy_pixel_number=0;
int i, j;
for(j=0; j<detector_data[0].num_pixels.y; j++)
{
for(i=0; i<detector_data[0].num_pixels.x; i++)
{
pixel = i + j*detector_data[0].num_pixels.x; // Set current pixel
energy_noScatter = invSCALE*(double)(image[pixel]);
energy_compton = invSCALE*(double)(image[pixel + pixels_per_image]);
energy_rayleigh = invSCALE*(double)(image[pixel + 2*pixels_per_image]);
energy_multiscatter = invSCALE*(double)(image[pixel + 3*pixels_per_image]);
if (detector_data[0].gain_W<0.001f)
{
// Normalize detected energy by number of histories and pixel area ==> energy fluence per hist.
energy_noScatter = energy_noScatter * invHIST * invAREA;
energy_compton = energy_compton * invHIST * invAREA;
energy_rayleigh = energy_rayleigh * invHIST * invAREA;
energy_multiscatter = energy_multiscatter * invHIST * invAREA;
// -- Write the results in an external file; the image corresponding to all particles not written: it has to be infered adding all images
if (flag_report_ASCII)
fprintf(file_ptr, "%.8lf %.8lf %.8lf %.8lf\n", energy_noScatter, energy_compton, energy_rayleigh, energy_multiscatter); // Report energy fluence per history
// Keep data as a arrays to be output at the end in binary form:
energy_noScatter_array[pixel] = (float)energy_noScatter;
energy_compton_array[pixel] = (float)energy_compton;
energy_rayleigh_array[pixel] = (float)energy_rayleigh;
energy_multiscatter_array[pixel]= (float)energy_multiscatter;
}
else
{
//!!DETECTOR_RESPONSE!! Convert the detected energy to a random number of charges using a Gaussian distribution with variance == mean:
//!!DETECTOR_RESPONSE!! Additive electronic noise is being added only to the No Scatter results. I can't include it to each scatter image bc it would be counted 4 times in final image.
// Sample 4 gaussian distributed random variables (mean=0, std_dev=1):
double g1=0.0, g2=0.0, g3=0.0, g4=0.0;
gausspdf_double_CPU(&g1, &g2, &seed);
gausspdf_double_CPU(&g3, &g4, &seed);
// Conversion from the energy deposited in each pixel to charge is done sampling a Gaussian distribution with:
// mean = E_deposited/gain_W + electronic_noise
// std_dev^2 = mean*sqrt(1/Swank_factor - 1) + sqrt(electronic_noise)
double mean = energy_noScatter/detector_data[0].gain_W;
double std_dev = sqrt(mean*detector_data[0].Swank_rel_std); // Swank_rel_std = sqrt(1.0/Swank-1.0)
mean = mean + detector_data[0].electronic_noise; // Adding additive electronic noise
std_dev = std_dev + sqrt(detector_data[0].electronic_noise);
energy_noScatter = g1*std_dev + mean;
if (energy_noScatter<0.0) energy_noScatter = 0.0; // Prevent negative pixel values
mean = energy_compton/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_compton = g2*std_dev + mean;
if (energy_compton<0.0) energy_compton = 0.0;
mean = energy_rayleigh/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_rayleigh = g3*std_dev + mean;
if (energy_rayleigh<0.0) energy_rayleigh = 0.0;
mean = energy_multiscatter/detector_data[0].gain_W;
std_dev = sqrt(mean*detector_data[0].Swank_rel_std);
energy_multiscatter = g4*std_dev + mean;
if (energy_multiscatter<0.0) energy_multiscatter = 0.0;
if (flag_report_ASCII)
fprintf(file_ptr, "%d %d %d %d\n", (int)(energy_noScatter+0.5), (int)(energy_compton+0.5), (int)(energy_rayleigh+0.5), (int)(energy_multiscatter+0.5)); // Report collected charge
// Keep data as a arrays to be output at the end in binary form:
energy_noScatter_array[pixel] = (float)round(energy_noScatter); // Round the floating point value into an integral number of charges
energy_compton_array[pixel] = (float)round(energy_compton);
energy_rayleigh_array[pixel] = (float)round(energy_rayleigh);
energy_multiscatter_array[pixel]= (float)round(energy_multiscatter);
}
double total_energy_pixel = (double)(image[pixel] + image[pixel + pixels_per_image] + image[pixel + 2*pixels_per_image] + image[pixel + 3*pixels_per_image]); // Find and report the pixel with maximum signal
if (total_energy_pixel>maximum_energy_pixel)
{
maximum_energy_pixel = total_energy_pixel;
maximum_energy_pixel_x = i;
maximum_energy_pixel_y = j;
maximum_energy_pixel_number = pixel;
}
energy_integral += total_energy_pixel; // Count total energy in the whole image
}
if (flag_report_ASCII)
fprintf(file_ptr, "\n"); // Separate rows with an empty line for visualization with gnuplot.
}
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# *** Simulation REPORT: ***\n");
fprintf(file_ptr, "# Fraction of source energy detected (over the mean energy of the spectrum): %.3lf%%\n", 100.0*invSCALE*(energy_integral/(double)(total_histories))/(double)(mean_energy_spectrum));
fprintf(file_ptr, "# Maximum energy fluence detected in pixel %i: (x,y)=(%i,%i) -> pixel value = %lf eV/cm^2 per hist.\n", maximum_energy_pixel_number, maximum_energy_pixel_x, maximum_energy_pixel_y, maximum_energy_pixel*invSCALE*invAREA*invHIST);
fprintf(file_ptr, "# Simulated x rays: %lld\n", total_histories);
fprintf(file_ptr, "# Simulation time [s]: %.2f\n", time_elapsed);
if (time_elapsed>0.000001)
fprintf(file_ptr, "# Speed [x-rays/sec]: %.2f\n\n", ((double)total_histories)/time_elapsed);
fclose(file_ptr); // Close output file and flush stream
printf(" Fraction of initial energy arriving at the detector (over the mean energy of the spectrum): %.3lf%%\n", 100.0*invSCALE*(energy_integral/(double)(total_histories))/(double)(mean_energy_spectrum));
printf(" Maximum energy fluence detected in pixel %i: (x,y)=(%i,%i). Maximum pixel value = %lf eV/cm^2 per hist.\n\n", maximum_energy_pixel_number, maximum_energy_pixel_x, maximum_energy_pixel_y, maximum_energy_pixel*invSCALE*invAREA*invHIST);
fflush(stdout);
// Report binary data as consecutive images in a binary file:
for(i=0; i<pixels_per_image; i++)
{
float energy_float = energy_noScatter_array[i] + energy_compton_array[i] + energy_rayleigh_array[i] + energy_multiscatter_array[i];
fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); // Total image (scatter + primary)
}
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_noScatter_array[i], sizeof(float), 1, file_binary_ptr); // Non-scattered image
// NOT SEPARATING SCATTER: January 2018
/*
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_compton_array[i], sizeof(float), 1, file_binary_ptr); // Compton image
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_rayleigh_array[i], sizeof(float), 1, file_binary_ptr); // Rayleigh image
for(i=0; i<pixels_per_image; i++)
fwrite(&energy_multiscatter_array[i], sizeof(float), 1, file_binary_ptr); // Multiple-scatter image
*/
// //!!DeBuG!! REPORT THE PIXEL ENERGY PRIMARY AFTER THE SAMPLED CHARGES FOR DEBUGGING; NOT NORMALIZED!
// for(i=0; i<pixels_per_image; i++)
// {
// float energy_float = (float)(image[i]*invSCALE); //*invHIST*invAREA);
// fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); //!!DeBuG!! !!DeBuG!!
// }
// for(i=0; i<pixels_per_image; i++)
// {
// float energy_float = (float)((image[i] + image[i + pixels_per_image] + image[i + 2*pixels_per_image] + image[i + 3*pixels_per_image])*invSCALE); //*invHIST*invAREA);
// fwrite(&energy_float, sizeof(float), 1, file_binary_ptr); //!!DeBuG!! !!DeBuG!!
// }
fclose(file_binary_ptr);
free(energy_noScatter_array);
free(energy_compton_array);
free(energy_rayleigh_array);
free(energy_multiscatter_array);
return 0; // Report could return not 0 to continue the simulation...
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Report the total tallied 3D voxel dose deposition for all projections.
//! The voxel doses in the input ROI and their respective uncertainties are reported
//! in binary form (32-bit floats) in two separate .raw files.
//! The dose in a single plane at the level of the focal spot is also reported in
//! ASCII format for simple visualization with GNUPLOT.
//! The total dose deposited in each different material is reported to the standard output.
//! The material dose is calculated adding the energy deposited in the individual voxels
//! within the dose ROI, and dividing by the total mass of the material in the ROI.
//!
//! @param[in] file_dose_output File where tallied image is reported
//! @param[in] detector_data Detector description read from the input file (pointer to detector_struct)
//! @param[in] image Tallied image (in meV per pixel)
//! @param[in] time_elapsed Time elapsed during the main loop execution (in seconds)
//! @param[in] total_histories Total number of x-rays simulated
//! @param[in] source_data Data required to compute the voxel plane to report in ASCII format: Z at the level of the source, 1st projection
////////////////////////////////////////////////////////////////////////////////
// int report_voxels_dose(char* file_dose_output, int num_projections, struct voxel_struct* voxel_data, float2* voxel_mat_dens, ulonglong2* voxels_Edep, double time_total_MC_init_report, unsigned long long int total_histories, short int dose_ROI_x_min, short int dose_ROI_x_max, short int dose_ROI_y_min, short int dose_ROI_y_max, short int dose_ROI_z_min, short int dose_ROI_z_max, struct source_struct* source_data)
int report_voxels_dose(char* file_dose_output, int num_projections, struct voxel_struct* voxel_data, int* voxel_mat_dens, ulonglong2* voxels_Edep, double time_total_MC_init_report, unsigned long long int total_histories, short int dose_ROI_x_min, short int dose_ROI_x_max, short int dose_ROI_y_min, short int dose_ROI_y_max, short int dose_ROI_z_min, short int dose_ROI_z_max, struct source_struct* source_data) //!!FixedDensity_DBT!!
{
printf("\n\n *** VOXEL ROI DOSE TALLY REPORT ***\n\n");
FILE* file_ptr = fopen(file_dose_output, "w");
if (file_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_voxels_dose!! File %s can not be opened!!\n", file_dose_output);
exit(-3);
}
// -- Binary output: // !!BINARY!!
char file_binary_mean[250], file_binary_sigma[250];
strncpy (file_binary_mean, file_dose_output, 250);
strcat(file_binary_mean,".raw");
strncpy (file_binary_sigma, file_dose_output, 250);
strcat(file_binary_sigma,"_PercentRelError2sigma.raw");
FILE* file_binary_mean_ptr = fopen(file_binary_mean, "w"); // !!BINARY!!
FILE* file_binary_sigma_ptr = fopen(file_binary_sigma, "w"); // !!BINARY!!
if (file_binary_mean_ptr==NULL)
{
printf("\n\n !!fopen ERROR report_voxels_dose!! Binary file %s can not be opened!!\n", file_dose_output);
exit(-3);
}
int DX = dose_ROI_x_max - dose_ROI_x_min + 1,
DY = dose_ROI_y_max - dose_ROI_y_min + 1,
DZ = dose_ROI_z_max - dose_ROI_z_min + 1;
// -- Calculate the dose plane that will be output as ASCII text:
int z_plane_dose = (int)(source_data[0].position.z * voxel_data->inv_voxel_size.z + 0.00001f); // Select voxel plane at the level of the source, 1st projections
if ( (z_plane_dose<dose_ROI_z_min) || (z_plane_dose>dose_ROI_z_max) )
z_plane_dose = (dose_ROI_z_max+dose_ROI_z_min)/2;
int z_plane_dose_ROI = z_plane_dose - dose_ROI_z_min;
printf(" Reporting the 3D voxel dose distribution as binary floats in the .raw file, and the 2D dose for Z plane %d as ASCII text.\n", z_plane_dose);
// printf(" Also reporting the dose to each material inside the input ROI adding the energy deposited in each individual voxel\n");
// printf(" (these material dose results will be equal to the materials dose tally below if the ROI covers all the voxels).\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# *** MC-GPU, version 1.4_DBT (http://code.google.com/p/mcgpu/) ***\n");
fprintf(file_ptr, "# *** ***\n");
fprintf(file_ptr, "# *** Andreu Badal ([email protected]) ***\n");
fprintf(file_ptr, "# *****************************************************************************\n");
fprintf(file_ptr, "# \n");
#ifdef USING_CUDA
fprintf(file_ptr, "# *** SIMULATION IN THE GPU USING CUDA ***\n");
#else
fprintf(file_ptr, "# *** SIMULATION IN THE CPU ***\n");
#endif
fprintf(file_ptr, "#\n");
// Report only one dose plane in ASCII, all the other data in binary only:
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# 3D dose deposition map (and dose uncertainty) created tallying the energy deposited by photons inside each voxel of the input geometry.\n");
fprintf(file_ptr, "# Electrons were not transported and therefore we are approximating that the dose is equal to the KERMA (energy released by the photons alone).\n");
fprintf(file_ptr, "# This approximation is acceptable when there is electronic equilibrium and when the range of the secondary electrons is shorter than the voxel size.\n");
fprintf(file_ptr, "# Usually the doses will be acceptable for photon energies below 1 MeV. The dose estimates may not be accurate at the interface of low density volumes.\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# The 3D dose deposition is reported in binary form in the .raw files (data given as 32-bit floats). \n");
fprintf(file_ptr, "# The %% relative error in the voxel dose at 2 standard deviations [=100*2*sigma/voxel_dose] is reported in the *_PercentRelError2sigma.raw file (32-bit floats). \n"); // !!SPIE2013!! Report relative error
fprintf(file_ptr, "# To reduce the memory use and the reporting time this text output reports only the 2D dose at the Z plane at the level\n");
fprintf(file_ptr, "# of the source focal spot: z_coord = %d (z_coord in ROI = %d)\n", z_plane_dose, z_plane_dose_ROI);
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# The total dose deposited in each different material is reported to the standard output.\n");
fprintf(file_ptr, "# The dose is calculated adding the energy deposited in the individual voxels within the dose ROI and dividing by the total mass of the material in the ROI.\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Voxel size: %lf x %lf x %lf = %lf cm^3\n", 1.0/(double)(voxel_data->inv_voxel_size.x), 1.0/(double)(voxel_data->inv_voxel_size.y), 1.0/(double)(voxel_data->inv_voxel_size.z), 1.0/(double)(voxel_data->inv_voxel_size.x*voxel_data->inv_voxel_size.y*voxel_data->inv_voxel_size.z));
fprintf(file_ptr, "# Number of voxels in the reported region of interest (ROI) X, Y and Z:\n");
fprintf(file_ptr, "# %d %d %d\n", DX, DY, DZ);
fprintf(file_ptr, "# Coordinates of the ROI inside the voxel volume = X[%d,%d], Y[%d,%d], Z[%d,%d]\n", dose_ROI_x_min+1, dose_ROI_x_max+1, dose_ROI_y_min+1, dose_ROI_y_max+1, dose_ROI_z_min+1, dose_ROI_z_max+1); // Show ROI with index=1 for the first voxel instead of 0.
fprintf(file_ptr, "#\n");
fprintf(file_ptr, "# Voxel dose units: eV/g per history\n");
fprintf(file_ptr, "# X rows given first, then Y, then Z. One blank line separates the different Y, and two blanks the Z values (GNUPLOT format).\n");
fprintf(file_ptr, "# The dose distribution is also reported with binary FLOAT values (.raw file) for easy visualization in ImageJ.\n");
fprintf(file_ptr, "# \n");
fprintf(file_ptr, "# [DOSE] [2*standard_deviation]\n");
fprintf(file_ptr, "# =====================================\n");
fflush(file_ptr);
double voxel_dose, max_voxel_dose[MAX_MATERIALS], max_voxel_dose_std_dev[MAX_MATERIALS], max_voxel_dose_all_mat=0.0, max_voxel_dose_std_dev_all_mat=0.0;
int max_voxel_dose_x[MAX_MATERIALS], max_voxel_dose_y[MAX_MATERIALS], max_voxel_dose_z[MAX_MATERIALS];
unsigned long long int total_energy_deposited = 0;
double inv_SCALE_eV = 1.0 / SCALE_eV, // conversion to eV using the inverse of the constant used in the tally function (defined in the header file).
inv_N = 1.0 / (double)(total_histories*((unsigned long long int)num_projections));
register int i, j, k, voxel=0;
double mat_Edep[MAX_MATERIALS], mat_Edep2[MAX_MATERIALS], mat_mass_ROI[MAX_MATERIALS]; // Arrays with the total energy, energy squared and mass of each material inside the ROI (mass and dose outside the ROI was not tallied).
unsigned int mat_voxels[MAX_MATERIALS];
for(i=0; i<MAX_MATERIALS; i++)
{
mat_Edep[i] = 0.0;
mat_Edep2[i] = 0.0;
mat_mass_ROI[i] = 0.0;
mat_voxels[i]= 0;
max_voxel_dose[i] =-1.0;
max_voxel_dose_std_dev[i]= 1.0e-15;
max_voxel_dose_x[i] = 0;
max_voxel_dose_y[i] = 0;
max_voxel_dose_z[i] = 0;
}
double voxel_volume = 1.0 / ( ((double)voxel_data->inv_voxel_size.x) * ((double)voxel_data->inv_voxel_size.y) * ((double)voxel_data->inv_voxel_size.z) );
for(k=0; k<DZ; k++)
{
for(j=0; j<DY; j++)
{
for(i=0; i<DX; i++)
{
register int voxel_geometry = (i+dose_ROI_x_min) + (j+dose_ROI_y_min)*voxel_data->num_voxels.x + (k+dose_ROI_z_min)*voxel_data->num_voxels.x*voxel_data->num_voxels.y;
// register double inv_voxel_mass = 1.0 / (voxel_mat_dens[voxel_geometry].y*voxel_volume);
// register int mat_number = (int)(voxel_mat_dens[voxel_geometry].x) - 1 ; // Material number, starting at 0.
// mat_mass_ROI[mat_number] += voxel_mat_dens[voxel_geometry].y*voxel_volume; // Estimate mass and energy deposited in this material
register double inv_voxel_mass = 1.0 / (density_LUT((int)voxel_mat_dens[voxel_geometry])*voxel_volume); //!!FixedDensity_DBT!! Density taken from function "density_LOT"
register int mat_number = (int)(voxel_mat_dens[voxel_geometry]); // Material number, starting at 0. //!!FixedDensity_DBT!!
mat_mass_ROI[mat_number] += density_LUT((int)voxel_mat_dens[voxel_geometry])*voxel_volume; // Estimate mass and energy deposited in this material //!!FixedDensity_DBT!! Density taken from function "density_LOT"
mat_Edep[mat_number] += (double)voxels_Edep[voxel].x; // Using doubles to avoid overflow
mat_Edep2[mat_number] += (double)voxels_Edep[voxel].y;
mat_voxels[mat_number]++; // Count voxels made of this material
// Optional code to eliminate dose deposited in air (first material). Sometimes useful for visualization (dose to air irrelevant, noisy)
// if (voxel_mat_dens[voxel_geometry].x < 1.1f)
// {
// voxels_Edep[voxel].x = 0.0f;
// voxels_Edep[voxel].y = 0.0f;
// }
// -- Convert total energy deposited to dose [eV/gram] per history:
// !!DeBuG!! BUG in first version MC-GPU v1.3, corrected for v1.4 [2013-01-31]. Edep2 is NOT scaled by SCALE_eV!! Also, division by voxel_mass must be done at the end!
// !!DeBuG!! Wrong: voxel_dose = ((double)voxels_Edep[voxel].x) * inv_N * inv_voxel_mass * inv_SCALE_eV;
// !!DeBuG!! Wrong: register double voxel_std_dev = (((double)voxels_Edep[voxel].y) * inv_N * inv_SCALE_eV * inv_voxel_mass - voxel_dose*voxel_dose) * inv_N;
voxel_dose = ((double)voxels_Edep[voxel].x) * inv_N * inv_SCALE_eV; // [<Edep> == Edep / N_hist /scaling_factor ; dose == <Edep> / mass]
total_energy_deposited += voxels_Edep[voxel].x;
register double voxel_std_dev = (((double)voxels_Edep[voxel].y) * inv_N - voxel_dose*voxel_dose) * inv_N * inv_voxel_mass; // [sigma_Edep^2 = (<Edep^2> - <Edep>^2) / N_hist] ; [sigma_dose^2 = sigma_Edep/mass] (not using SCALE_eV for std_dev to prevent overflow)
if (voxel_std_dev>0.0)
voxel_std_dev = sqrt(voxel_std_dev);
voxel_dose *= inv_voxel_mass; // [dose == <Edep> / mass]
if (voxel_dose > max_voxel_dose[mat_number]) // Tally peak dose for each material!
{
// Find the voxel that has the maximum dose:
max_voxel_dose[mat_number] = voxel_dose;
max_voxel_dose_std_dev[mat_number] = voxel_std_dev;
max_voxel_dose_x[mat_number] = i+dose_ROI_x_min;
max_voxel_dose_y[mat_number] = j+dose_ROI_y_min;
max_voxel_dose_z[mat_number] = k+dose_ROI_z_min;
if (voxel_dose > max_voxel_dose_all_mat)
{
max_voxel_dose_all_mat = voxel_dose;
max_voxel_dose_std_dev_all_mat = voxel_std_dev;
}
}
// Report only one dose plane in ASCII:
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "%.6lf %.6lf\n", voxel_dose, 2.0*voxel_std_dev);
float voxel_dose_float = (float)voxel_dose; // After dividing by the number of histories I can report FLOAT bc the number of significant digits will be low.
fwrite(&voxel_dose_float, sizeof(float), 1, file_binary_mean_ptr); // Write dose data in a binary file that can be easyly open in imageJ. !!BINARY!!
// !!DeBuG!! OLD version, reporting sigma: float voxel_sigma_float = 2.0f * (float)(voxel_std_dev); fwrite(&voxel_sigma_float, sizeof(float), 1, file_binary_sigma_ptr);
float voxel_relErr_float = 0.0f;
if (voxel_dose > 0.0)
voxel_relErr_float = 200.0f*(float)(voxel_std_dev/voxel_dose); // New in MC-GPU v1.4: Report relative error for 2*sigma, in % (avoid dividing by 0)
fwrite(&voxel_relErr_float, sizeof(float), 1, file_binary_sigma_ptr);
voxel++;
}
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "\n"); // Separate Ys with an empty line for visualization with gnuplot.
}
if (k == z_plane_dose_ROI)
fprintf(file_ptr, "\n"); // Separate Zs.
}
fprintf(file_ptr, "# ****** DOSE REPORT: TOTAL SIMULATION PERFORMANCE FOR ALL PROJECTIONS ******\n");
fprintf(file_ptr, "# Total number of simulated x rays: %lld\n", total_histories*((unsigned long long int)num_projections));
fprintf(file_ptr, "# Simulated x rays per projection: %lld\n", total_histories);
fprintf(file_ptr, "# Total simulation time [s]: %.2f\n", time_total_MC_init_report);
if (time_total_MC_init_report>0.000001)
fprintf(file_ptr, "# Total speed [x-rays/s]: %.2f\n", (double)(total_histories*((unsigned long long int)num_projections))/time_total_MC_init_report);
fprintf(file_ptr, "\n# Total energy absorved inside the dose ROI: %.5lf keV/hist\n\n", 0.001*((double)total_energy_deposited)*inv_N*inv_SCALE_eV);
// Output data to standard input:
printf("\n Total energy absorved inside the dose deposition ROI: %.5lf keV/hist\n", 0.001*((double)total_energy_deposited)*inv_N*inv_SCALE_eV);
printf( " Maximum voxel dose (+-2 sigma): %lf +- %lf eV/g per history.\n", max_voxel_dose_all_mat, max_voxel_dose_std_dev_all_mat);
// -- Report dose deposited in each material:
printf(" Dose deposited in the different materials inside the input ROI computed post-processing the 3D voxel dose results:\n\n");
// OLD reporting without peak dose (v1.3): printf(" [MATERIAL] [DOSE_ROI, eV/g/hist] [2*std_dev] [Rel error 2*std_dev, %%] [E_dep [eV/hist] [MASS_ROI, g] [NUM_VOXELS_ROI]\n");
printf(" [MAT] [DOSE_ROI eV/g/hist] [2*std_dev] [Rel error %%] [Peak voxel dose] [2*std_dev] [Rel error %%] [Peak voxel coord] [E_dep eV/hist] [MASS_ROI g] [NUM_VOXELS_ROI]\n");
printf(" ===============================================================================================================================================================================\n");
for(i=0; i<MAX_MATERIALS; i++)
{
if(mat_voxels[i]>0) // Report only for materials found at least in 1 voxel of the input geometry (prevent dividing by 0 mass).
{
double Edep = mat_Edep[i] * inv_N * inv_SCALE_eV; // [dose == Edep/Mass/N_hist]
// !!DeBuG!! BUG in version 1.2: I have to divide by mass after computing the mean and sigma!!!
// !!DeBuG!! WRONG code: double material_dose = mat_Edep[i] * inv_N * inv_SCALE_eV / mat_mass_ROI[i]; // [dose == Edep/Mass/N_hist]
// !!DeBuG!! WRONG code: double material_std_dev = (mat_Edep2[i] * inv_N * inv_SCALE_eV / mat_mass_ROI[i] - material_dose*material_dose) * inv_N; // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist]
double material_std_dev = (mat_Edep2[i] * inv_N - Edep*Edep) * inv_N; // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist] (mat_Edep2 not scaled by SCALE_eV in kernel to prevent overflow)
if (material_std_dev>0.0)
material_std_dev = sqrt(material_std_dev);
double material_dose = Edep / mat_mass_ROI[i];
material_std_dev = material_std_dev / mat_mass_ROI[i];
double rel_diff=0.0, rel_diff_peak=0.0;
if (material_dose>0.0)
{
rel_diff = material_std_dev/material_dose;
rel_diff_peak = max_voxel_dose_std_dev[i]/max_voxel_dose[i];
}
printf("\t%d\t%.5lf\t%.5lf\t%.3lf\t\t%.5lf\t%.5lf\t%.3lf\t(%d,%d,%d)\t\t%.5lf\t%.5lf\t%u\n", (i+1), material_dose, 2.0*material_std_dev, (200.0*rel_diff), max_voxel_dose[i], 2.0*max_voxel_dose_std_dev[i], (200.0*rel_diff_peak), max_voxel_dose_x[i], max_voxel_dose_y[i], max_voxel_dose_z[i], Edep, mat_mass_ROI[i], mat_voxels[i]);
// OLD reporting without peak dose (v1.3): printf("\t%d\t%.5lf\t\t%.5lf\t\t%.2lf\t\t%.2lf\t\t%.5lf\t%u\n", (i+1), material_dose, 2.0*material_std_dev, (2.0*100.0*rel_diff), Edep, mat_mass_ROI[i], mat_voxels[i]);
}
}
printf("\n");
fflush(stdout);
fclose(file_ptr); // Close output file and flush stream
fclose(file_binary_mean_ptr);
fclose(file_binary_sigma_ptr);
return 0; // Report could return not 0 to continue the simulation...
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Report the tallied dose to each material number, accounting for different
//! densities in different regions with the same material number.
//!
//! @param[in] num_projections Number of projections simulated
//! @param[in] total_histories Total number of x-rays simulated per projection
//! @param[out] density_nominal Array with the nominal densities of materials given in the input file; -1 for materials not defined. Used to report only defined materials.
//! @param[in] materials_dose Tallied dose and dose^2 arrays
////////////////////////////////////////////////////////////////////////////////
int report_materials_dose(int num_projections, unsigned long long int total_histories, float *density_nominal, ulonglong2 *materials_dose, double *mass_materials, char file_name_materials[MAX_MATERIALS][250]) // !!tally_materials_dose!!
{
printf("\n\n *** MATERIALS TOTAL DOSE TALLY REPORT ***\n\n");
printf(" Dose deposited in each material defined in the input file (tallied directly per material, not per voxel):\n");
printf(" The results of this tally should be equal to the voxel tally doses for an ROI covering all voxels.\n");
printf(" Total number of simulated x rays: %lld\n", total_histories*((unsigned long long int)num_projections)); // !!DBT!!
if (num_projections>1)
printf(" Simulated x rays for each of %d projections: %lld\n\n", num_projections, total_histories);
printf("\t [MAT] [DOSE eV/g/hist] [2*std_dev] [Rel_error 2*std_dev, %%] [E_dep eV/hist] [DOSE mGy] [Material mass g] [Material file name]\n");
printf("\t======================================================================================================================================\n");
double dose, Edep, std_dev, rel_diff, inv_N = 1.0 / (double)(total_histories*((unsigned long long int)num_projections));
int i, flag=0, max_mat=0;
for(i=0; i<MAX_MATERIALS; i++)
{
if (density_nominal[i]<0.0f)
break; // Skip report for materials not defined in the input file
// Report the material file names removing the absolute file system path for clarity:
char file_name_material_without_path[250];
char* last_slash = strrchr(file_name_materials[i],'/'); // Return a pointer to the last character '/' in the input name, or NULL if not found
if (last_slash==NULL)
strcpy(file_name_material_without_path, file_name_materials[i]);
else
strcpy(file_name_material_without_path, (last_slash+1));
Edep = ((double)materials_dose[i].x) / SCALE_eV * inv_N;
std_dev = sqrt( (((double)materials_dose[i].y)*inv_N - Edep*Edep) * inv_N ); // [sigma^2 = (<Edep^2> - <Edep>^2) / N_hist] (not scaling "materials_dose[i].y" by SCALE_eV in kernel to prevent overflow).
if (Edep>0.0)
rel_diff = std_dev/Edep;
else
rel_diff = 0.0;
dose = Edep / max_value(mass_materials[i], 0.00001); // Prevent division by 0
std_dev = std_dev / max_value(mass_materials[i], 0.00001);
printf("\t%d\t%.5lf\t\t%.5lf\t\t%.2lf\t\t%.2lf\t\t%.5lf\t\t%.5lf\t\t%s\n", (i+1), dose, 2.0*std_dev, 2.0*100.0*rel_diff, Edep, ((double)materials_dose[i].x)/SCALE_eV/max_value(mass_materials[i], 0.00001)*(1.0e3/6.2415e15), mass_materials[i], file_name_material_without_path);
if (materials_dose[i].x>1e16 || dose!=abs(dose) || std_dev!=abs(std_dev)) // !!DeBuG!! Try to detect a possible overflow in any material: large counter or negative, nan value
{
flag = 1;
if (materials_dose[i].x>materials_dose[max_mat].x)
max_mat = i;
}
}
if (flag!=0) // !!DeBuG!! Try to detect a possible overflow: large counter or negative, nan value. The value of SCALE_eV can be reduced to prevent this overflow in some cases.
{
printf("\n WARNING: it is possible that the unsigned long long int counter used to tally the standard deviation overflowed (>2^64).\n"); // !!DeBuG!!
printf(" The standard deviation may be incorrectly measured, but it will surely be very small (<< 1%%).\n");
printf(" Max counter (mat=%d): E_dep = %llu , E_dep^2 = %llu\n\n", max_mat+1, materials_dose[max_mat].x, materials_dose[max_mat].y);
}
fflush(stdout);
return 0;
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
//! Sets the tomographic acquisition trajectory: store in memory the source and detector
//! positions and rotation matrices that are needed to simulate the multiple projections.
//! The first projection ([0] = 0 degrees) was previously initialized in function "read_input".
//! The antiscatter grid is disabled after projection [0]; the motion blur is disabled for projection [0].
///////////////////////////////////////////////////////////////////////////////
void set_CT_trajectory(int myID, int num_projections, struct source_struct* source_data, struct detector_struct* detector_data, double translation_helical, bool flag_detectorFixed) // !!DBTv1.4!! NEW VERSION with general rotations!!
{
MASTER_THREAD
{
printf("\n -- Setting the parameters of the sources and detectors for the %d tomographic projections (MAX_NUM_PROJECTIONS=%d):\n\n", num_projections, MAX_NUM_PROJECTIONS);
if (flag_detectorFixed)
{
printf(" \"flag_detectorFixed==true\": the detector will stay fixed at 0 deg position (as in DBT acquisition).\n\n");
}
}
int i;
for (i=1; i<=num_projections; i++) // The first projection (i=0) was initialized in function "read_input"
{
double angle = source_data[0].angle_offset + (i-1)*source_data[0].angle_per_projection;
// --Initialize the source and detector structures to the values in projection 0:
source_data[i] = source_data[0];
detector_data[i] = detector_data[0];
// --Set "source_data[i]->rot_fan" multiplying the rotation matrix for the original 0 degrees with the new rotation of "angle" degrees around "axis_of_rotation". // !!DBTv1.4!!
// The new matrix multiplies "from the left side" (will be applied last; non-commutative):
float m[9];
m[0] = 1; m[1] = 0; m[2] = 0; // Init rotation matrix to identity
m[3] = 0; m[4] = 1; m[5] = 0;
m[6] = 0; m[7] = 0; m[8] = 1;
create_rotation_matrix_around_axis(angle, source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z, m);
multiply_3x3(source_data[i].rot_fan, m, source_data[0].rot_fan);
// --Set "source_data[i].direction" multiplying by the rotation matrix:
apply_rotation(&source_data[i].direction, m);
// --Translate the source position and rotation_point according to the input translation along the axis_of_rotation for an helical scan:
source_data[i].position.x += (i-1)*translation_helical*source_data[0].axis_of_rotation.x;
source_data[i].position.y += (i-1)*translation_helical*source_data[0].axis_of_rotation.y;
source_data[i].position.z += (i-1)*translation_helical*source_data[0].axis_of_rotation.z;
source_data[i].rotation_point.x += (i-1)*translation_helical*source_data[0].axis_of_rotation.x;
source_data[i].rotation_point.y += (i-1)*translation_helical*source_data[0].axis_of_rotation.y;
source_data[i].rotation_point.z += (i-1)*translation_helical*source_data[0].axis_of_rotation.z;
// --Set "source_data[i].position" multiplying by the rotation matrix in the reference system where the "rotation_point" is the origin of coordinates:
source_data[i].position.x -= source_data[i].rotation_point.x; source_data[i].position.y -= source_data[i].rotation_point.y; source_data[i].position.z -= source_data[i].rotation_point.z;
apply_rotation(&source_data[i].position, m);
source_data[i].position.x += source_data[i].rotation_point.x; source_data[i].position.y += source_data[i].rotation_point.y; source_data[i].position.z += source_data[i].rotation_point.z;
if (flag_detectorFixed==false) // Check if the detector rotates with the source or if it stays static as in DBT
{
// --Set "detector_data[i].center" multiplying by the rotation matrix in the reference system where the "rotation_point" is the origin of coordinates:
detector_data[i].center.x -= source_data[i].rotation_point.x; detector_data[i].center.y -= source_data[i].rotation_point.y; detector_data[i].center.z -= source_data[i].rotation_point.z;
apply_rotation(&detector_data[i].center, m);
detector_data[i].center.x += source_data[i].rotation_point.x; detector_data[i].center.y += source_data[i].rotation_point.y; detector_data[i].center.z += source_data[i].rotation_point.z;
// --Set "detector_data[i].rot_inv" multiplying the inverse rotation matrix by "-angle". The new matrix multiplies "from the right side" (will be applied first): // !!DBTv1.4!!
m[0] = 1; m[1] = 0; m[2] = 0; // Init rotation matrix to identity
m[3] = 0; m[4] = 1; m[5] = 0;
m[6] = 0; m[7] = 0; m[8] = 1;
create_rotation_matrix_around_axis(-angle, source_data[0].axis_of_rotation.x, source_data[0].axis_of_rotation.y, source_data[0].axis_of_rotation.z, m);
multiply_3x3(detector_data[i].rot_inv, detector_data[0].rot_inv, m);
}
MASTER_THREAD printf(" << Projection #%d >>\t Angle=%.5f degrees\n", i, angle*RAD2DEG);
MASTER_THREAD printf(" \t Source position=(%.8f,%.8f,%.8f), direction=(%.8f,%.8f,%.8f)\n", source_data[i].position.x,source_data[i].position.y,source_data[i].position.z, source_data[i].direction.x,source_data[i].direction.y,source_data[i].direction.z);
MASTER_THREAD printf(" \t Detector center=(%.8f,%.8f,%.8f)\n", detector_data[i].center.x, detector_data[i].center.y, detector_data[i].center.z);
if (detector_data[0].grid_freq>0.0f)
detector_data[i].grid_freq = -detector_data[0].grid_freq; // Disable grid after projection [0], if used !!DBTv1.5!!
}
source_data[0].rotation_blur = 0.0f; // Motion blur disabled for the 0 deg projection (eg, mammo). // !!DBTv1.4!!
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Initialize the first seed of the pseudo-random number generator (PRNG)
//! RANECU to a position far away from the previous history (leap frog technique).
//! This function is equivalent to "init_PRNG" but only updates one of the seeds.
//!
//! Note that if we use the same seed number to initialize the 2 MLCGs of the PRNG
//! we can only warranty that the first MLCG will be uncorrelated for each value
//! generated by "update_seed_PRNG". There is a tiny chance that the final PRNs will
//! be correlated because the leap frog on the first MLCG will probably go over the
//! repetition cycle of the MLCG, which is much smaller than the full RANECU. But any
//! correlataion is extremely unlikely. Function "init_PRNG" doesn't have this issue.
//!
//! @param[in] batch_number Elements to skip (eg, MPI thread_number).
//! @param[in] total_histories Histories to skip.
//! @param[in,out] seed Initial PRNG seeds; returns the updated seed.
////////////////////////////////////////////////////////////////////////////////
inline void update_seed_PRNG(int batch_number, unsigned long long int total_histories, int* seed)
{
if (0==batch_number)
return;
unsigned long long int leap = total_histories * (batch_number * LEAP_DISTANCE);
int y = 1;
int z = a1_RANECU;
// -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j
// -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m):
// S(i+j) = [(a**j MOD m)*S(i)] MOD m
// S_i = abMODm(m,S_i,AjMODm)
*seed = abMODm(m1_RANECU, *seed, y);
}
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Read the energy spectrum file and initialize the Walker aliasing sampling.
//!
//! @param[in] file_name_espc File containing the energy spectrum (lower energy value in each bin and its emission probability).
//! @param[in,out] source_energy_data Energy spectrum and other source data. The Walker alias and cutoffs are initialized in this function.
//! @param[out] mean_energy_spectrum Mean energy in the input x-ray energy spectrum.
////////////////////////////////////////////////////////////////////////////////
void init_energy_spectrum(char* file_name_espc, struct source_energy_struct* source_energy_data, float *mean_energy_spectrum)
{
char *new_line_ptr = NULL, new_line[250];
float lower_energy_bin, prob;
float prob_espc_bin[MAX_ENERGY_BINS]; // The input probabilities of each energy bin will be discarded after Walker is initialized
// -- Read spectrum from file:
FILE* file_ptr = fopen(file_name_espc, "r");
if (NULL==file_ptr)
{
printf("\n\n !!init_energy_spectrum ERROR!! Error trying to read the energy spectrum input file \"%s\".\n\n", file_name_espc);
exit(-1);
}
int current_bin = -1;
do
{
current_bin++; // Update bin counter
if (current_bin >= MAX_ENERGY_BINS)
{
printf("\n !!init_energy_spectrum ERROR!!: too many energy bins in the input spectrum. Increase the value of MAX_ENERGY_BINS=%d.\n", MAX_ENERGY_BINS);
printf( " A negative probability marks the end of the spectrum.\n\n");
exit(-1);
}
new_line_ptr = fgets_trimmed(new_line, 250, file_ptr); // Read the following line of text skipping comments and extra spaces
if (new_line_ptr==NULL)
{
printf("\n\n !!init_energy_spectrum ERROR!! The input file for the x ray spectrum (%s) is not readable or incomplete (a negative probability marks the end of the spectrum).\n", file_name_espc);
exit(-1);
}
prob = -123456789.0f;
sscanf(new_line, "%f %f", &lower_energy_bin, &prob); // Extract the lowest energy in the bin and the corresponding emission probability from the line read
prob_espc_bin[current_bin] = prob;
source_energy_data->espc[current_bin] = lower_energy_bin;
if (prob == -123456789.0f)
{
printf("\n !!init_energy_spectrum ERROR!!: invalid energy bin number %d?\n\n", current_bin);
exit(-1);
}
else if (lower_energy_bin < source_energy_data->espc[max_value(current_bin-1,0)]) // (Avoid a negative index using the macro "max_value" defined in the header file)
{
printf("\n !!init_energy_spectrum ERROR!!: input energy bins with decreasing energy? espc(%d)=%f, espc(%d)=%f\n\n", current_bin-1, source_energy_data->espc[max_value(current_bin-1,0)], current_bin, lower_energy_bin);
exit(-1);
}
}
while (prob > -1.0e-11f); // A negative probability marks the end of the spectrum
// Store the number of bins read from the input energy spectrum file:
source_energy_data->num_bins_espc = current_bin;
// Init the remaining bins (which will not be used) with the last energy read (will be assumed as the highest energy in the last bin) and 0 probability of emission.
register int i;
for (i=current_bin; i<MAX_ENERGY_BINS; i++)
{
source_energy_data->espc[i] = lower_energy_bin;
prob_espc_bin[i] = 0.0f;
}
// Compute the mean energy in the spectrum, taking into account the energy and prob of each bin:
float all_energy = 0.0f;
float all_prob = 0.0f;
for(i=0; i<source_energy_data->num_bins_espc; i++)
{
all_energy += 0.5f*(source_energy_data->espc[i]+source_energy_data->espc[i+1])*prob_espc_bin[i];
all_prob += prob_espc_bin[i];
}
*mean_energy_spectrum = all_energy/all_prob;
// -- Init the Walker aliasing sampling method (as it is done in PENELOPE):
IRND0(prob_espc_bin, source_energy_data->espc_cutoff, source_energy_data->espc_alias, source_energy_data->num_bins_espc); //!!Walker!! Calling PENELOPE's function to init the Walker method
// !!Verbose!! Test sampling
// Sampling the x ray energy using the Walker aliasing algorithm from PENELOPE:
// int sampled_bin = seeki_walker(source_energy_data->espc_cutoff, source_energy_data->espc_alias, 0.5, source_energy_data->num_bins_espc);
// float e = source_energy_data->espc[sampled_bin] + ranecu(seed) * (source_energy_data->espc[sampled_bin+1] - source_energy_data->espc[sampled_bin]); // Linear interpolation of the final energy within the sampled energy bin
// printf("\n\n !!Walker!! Energy center bin %d = %f keV\n", sampled_bin, 0.001f*e);
}
//********************************************************************
//! Finds the interval (x(i),x(i+1)] containing the input value
//! using Walker's aliasing method.
//!
//! Input:
//! cutoff(1..n) -> interval cutoff values for the Walker method
//! cutoff(1..n) -> alias for the upper part of each interval
//! randno -> point to be located
//! n -> no. of data points
//! Output:
//! index i of the semiopen interval where randno lies
//! Comments:
//! -> The cutoff and alias values have to be previously
//! initialised calling the penelope subroutine IRND0.
//!
//!
//! Algorithm implementation based on the PENELOPE code developed
//! by Francesc Salvat at the University of Barcelona. For more
//! info: www.oecd-nea.org/science/pubs/2009/nea6416-penelope.pdf
//!
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//C PENELOPE/PENGEOM (version 2006) C
//C Copyright (c) 2001-2006 C
//C Universitat de Barcelona C
//C C
//C Permission to use, copy, modify, distribute and sell this software C
//C and its documentation for any purpose is hereby granted without C
//C fee, provided that the above copyright notice appears in all C
//C copies and that both that copyright notice and this permission C
//C notice appear in all supporting documentation. The Universitat de C
//C Barcelona makes no representations about the suitability of this C
//C software for any purpose. It is provided "as is" without express C
//C or implied warranty. C
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
inline int seeki_walker(float *cutoff, short int *alias, float randno, int n)
{
float RN = randno * n; // Find initial interval (array starting at 0):
int int_part = (int)(RN); // -- Integer part
float fraction_part = RN - ((float)int_part); // -- Fractional part
if (fraction_part < cutoff[int_part]) // Check if we are in the aliased part
return int_part; // Below the cutoff: return current value
else
return (int)alias[int_part]; // Above the cutoff: return alias
}
//****************************************************************** *
//* SUBROUTINE IRND0 *
//********************************************************************
//*
//! Initialisation of Walker's aliasing algorithm for random
//! sampling from discrete probability distributions.
//!
//! Input arguments:
//! N ........ number of different values of the random variable.
//! W(1:N) ... corresponding point probabilities (not necessarily
//! normalised to unity).
//! Output arguments:
//! F(1:N) ... cutoff values.
//! K(1:N) ... alias values.
//!
//!
//! This subroutine is part of the PENELOPE 2006 code developed
//! by Francesc Salvat at the University of Barcelona. For more
//! info: www.oecd-nea.org/science/pubs/2009/nea6416-penelope.pdf
//*
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//C PENELOPE/PENGEOM (version 2006) C
//C Copyright (c) 2001-2006 C
//C Universitat de Barcelona C
//C C
//C Permission to use, copy, modify, distribute and sell this software C
//C and its documentation for any purpose is hereby granted without C
//C fee, provided that the above copyright notice appears in all C
//C copies and that both that copyright notice and this permission C
//C notice appear in all supporting documentation. The Universitat de C
//C Barcelona makes no representations about the suitability of this C
//C software for any purpose. It is provided "as is" without express C
//C or implied warranty. C
//CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
void IRND0(float *W, float *F, short int *K, int N)
{
register int I;
// **** Renormalisation.
double WS=0.0;
for (I=0; I<N; I++)
{
if(W[I] < 0.0f)
{
printf("\n\n !!ERROR!! IRND0: Walker sampling initialization. Negative point probability? W(%d)=%f\n\n", I, W[I]);
exit(-1);
}
WS = WS + W[I];
}
WS = ((double)N) / WS;
for (I=0; I<N; I++)
{
K[I] = I;
F[I] = W[I] * WS;
}
if (N==1)
return;
// **** Cutoff and alias values.
float HLOW, HIGH;
int ILOW, IHIGH, J;
for (I=0; I<N-1; I++)
{
HLOW = 1.0f;
HIGH = 1.0f;
ILOW = -1;
IHIGH= -1;
for (J=0; J<N; J++)
{
if(K[J]==J)
{
if(F[J]<HLOW)
{
HLOW = F[J];
ILOW = J;
}
else if(F[J]>HIGH)
{
HIGH = F[J];
IHIGH = J;
}
}
}
if((ILOW==-1) || (IHIGH==-1))
return;
K[ILOW] = IHIGH;
F[IHIGH]= HIGH + HLOW - 1.0f;
}
return;
}
///////////////////////////////////////////////////////////////////////////////
// !!DBTv1.4!!
//!* Create the rotation matrix that will rotate a vector the input angle around the input axis
//!* (using Rodrigues' formula: http://mathworld.wolfram.com/RodriguesRotationFormula.html)
//!* The new rotation matrix is multiplied with the input matrix m[9] to create compounded rotations
//!* (input the identity matrix I[3x3] if no previous rotation exists!).
void create_rotation_matrix_around_axis(float angle, float wx, float wy, float wz, float *m)
{
float mm[9];
float nn[9];
int i, flag=0;
for (i=0; i<9; i++)
{
mm[i] = m[i]; // Create temporary copy of input matrix
if (fabsf(m[i])>1.0e-7f)
flag += 1; // Detect non-null elements in the matrix
}
if (0==flag)
{
printf("\n\n !!WARNING!! Null rotation matrix input to \'create_rotation_matrix_around_axis\'???\n");
printf( " Reseting the matrix to identity to preserve the following rotations.\n\n\n");
mm[0] = mm[4] = mm[8] = 1.0f;
mm[1] = mm[2] = mm[3] = mm[5] = mm[6] = mm[7] = 0.0f;
}
double c = cos(angle);
double s = sin(angle);
nn[0] = (float) c+wx*wx*(1.0-c);
nn[3] = (float) wz*s+wx*wy*(1.0-c);
nn[6] = (float)-wy*s+wx*wz*(1.0-c);
nn[1] = (float)-wz*s+wx*wy*(1.0-c);
nn[4] = (float) c+wy*wy*(1.0-c);
nn[7] = (float) wx*s+wy*wz*(1.0-c);
nn[2] = (float) wy*s+wx*wz*(1.0-c);
nn[5] = (float)-wx*s+wy*wz*(1.0-c);
nn[8] = (float) c+wz*wz*(1.0-c);
multiply_3x3(m, nn, mm); // Multiply new rotation matrix with the input one (which should be the identity if no previous rotations)
}
//! Multiply two input 3x3 matrices: m_out[9] = n[9] x m[9]
void multiply_3x3(float *m_out, float *n, float *m)
{
if (m_out==m)
printf("\n\n !!ERROR in \"multiply_3x3\"!! Input and output pointes are the same; the matrix multiplication will be wrong!!??\n\n");
m_out[0] = n[0]*m[0] + n[1]*m[3] + n[2]*m[6];
m_out[1] = n[0]*m[1] + n[1]*m[4] + n[2]*m[7];
m_out[2] = n[0]*m[2] + n[1]*m[5] + n[2]*m[8];
m_out[3] = n[3]*m[0] + n[4]*m[3] + n[5]*m[6];
m_out[4] = n[3]*m[1] + n[4]*m[4] + n[5]*m[7];
m_out[5] = n[3]*m[2] + n[4]*m[5] + n[5]*m[8];
m_out[6] = n[6]*m[0] + n[7]*m[3] + n[8]*m[6];
m_out[7] = n[6]*m[1] + n[7]*m[4] + n[8]*m[7];
m_out[8] = n[6]*m[2] + n[7]*m[5] + n[8]*m[8];
}
///////////////////////////////////////////////////////////////////////////////
|
801cbf4b6fc923a99e2824eadf370eaa431574ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011-2021, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief
* This file contains postprocessors a common format for computational kernels
* to raw image. It also does color space transformations.
*/
#include "gpujpeg_colorspace.h"
#include "gpujpeg_preprocessor_common.h"
#include "gpujpeg_postprocessor.h"
#include "gpujpeg_util.h"
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load_comp
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load_comp<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<int comp_count,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load {
};
template<
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load <3, s_comp1_samp_factor_h, s_comp1_samp_factor_v,
s_comp2_samp_factor_h, s_comp2_samp_factor_v,
s_comp3_samp_factor_h, s_comp3_samp_factor_v,
s_comp4_samp_factor_h, s_comp4_samp_factor_v> {
static __device__ void perform(uchar4 & value, int position_x, int position_y, struct gpujpeg_preprocessor_data & data) {
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(value.x, position_x, position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(value.y, position_x, position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(value.z, position_x, position_y, data.comp[2]);
}
};
template<
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load <4, s_comp1_samp_factor_h, s_comp1_samp_factor_v,
s_comp2_samp_factor_h, s_comp2_samp_factor_v,
s_comp3_samp_factor_h, s_comp3_samp_factor_v,
s_comp4_samp_factor_h, s_comp4_samp_factor_v> {
static __device__ void perform(uchar4 & value, int position_x, int position_y, struct gpujpeg_preprocessor_data & data) {
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(value.x, position_x, position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(value.y, position_x, position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(value.z, position_x, position_y, data.comp[2]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp4_samp_factor_h, s_comp4_samp_factor_v>::perform(value.w, position_x, position_y, data.comp[3]);
}
};
template<enum gpujpeg_pixel_format pixel_format>
inline __device__ void gpujpeg_comp_to_raw_store(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r);
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_U8>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 3;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012A>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
d_data_raw[image_position + 3] = r.w;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012Z>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
d_data_raw[image_position + 3] = 0x0;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
d_data_raw[image_width * image_height + image_position] = r.y;
d_data_raw[2 * image_width * image_height + image_position] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
if ( (x % 2) == 0 ) {
d_data_raw[image_width * image_height + image_position / 2] = r.y;
d_data_raw[image_width * image_height + image_height * ((image_width + 1) / 2) + image_position / 2] = r.z;
}
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P1020>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 2;
d_data_raw[image_position + 1] = r.x;
if ( (x % 2) == 0 )
d_data_raw[image_position + 0] = r.y;
else
d_data_raw[image_position + 0] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_420_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
if ( (image_position % 2) == 0 && (y % 2) == 0 ) {
d_data_raw[image_width * image_height + y / 2 * ((image_width + 1) / 2) + x / 2] = r.y;
d_data_raw[image_width * image_height + ((image_height + 1) / 2 + y / 2) * ((image_width + 1) / 2) + x / 2] = r.z;
}
}
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
enum gpujpeg_pixel_format pixel_format,
int comp_count,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uchar4 r;
gpujpeg_preprocessor_comp_to_raw_load<comp_count, s_comp1_samp_factor_h, s_comp1_samp_factor_v, s_comp2_samp_factor_h, s_comp2_samp_factor_v, s_comp3_samp_factor_h, s_comp3_samp_factor_v, s_comp4_samp_factor_h, s_comp4_samp_factor_v>::perform(r, image_position_x, image_position_y, data);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r);
// Save
if (pixel_format == GPUJPEG_444_U8_P012A && comp_count == 3) {
r.w = 0xFF;
}
gpujpeg_comp_to_raw_store<pixel_format>(d_data_raw, image_width, image_height, image_position, image_position_x, image_position_y, r);
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical,
coder->param_image.comp_count == 4 ? coder->sampling_factor.horizontal / coder->component[3].sampling_factor.horizontal : 1,
coder->param_image.comp_count == 4 ? coder->sampling_factor.vertical / coder->component[3].sampling_factor.vertical : 1
);
#define RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
switch ( PIXEL_FORMAT ) { \
case GPUJPEG_U8: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012A: return coder->param_image.comp_count == 4 ? &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, 4, P1, P2, P3, P4, P5, P6, P7, P8> : &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012Z: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_422_U8_P1020: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_422_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_420_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_PIXFMT_NONE: GPUJPEG_ASSERT(0 && "Postprocess to GPUJPEG_PIXFMT_NONE not allowed"); \
} \
#define RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6, P7, P8) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose >= 1 ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
}
#define RETURN_KERNEL(PIXEL_FORMAT, COLOR) \
RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 1, 1, 1, 1, 1) /* 4:4:4 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 2, 2, 2, 1, 1) /* 4:2:0 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 2, 1, 2, 1, 1) /* 4:4:0 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 1, 2, 1, 1, 1) /* 4:2:2 */ \
else { \
if ( coder->param.verbose >= 0 ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC) \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_NONE)
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_RGB)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601_256LVLS)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT709)
}
#ifndef ENABLE_YUV
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YUV)
}
#endif
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_SWITCH
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
static int gpujpeg_preprocessor_decode_no_transform(struct gpujpeg_coder * coder)
{
if (coder->param_image.comp_count == 3 && coder->param_image.color_space != coder->param.color_space_internal) {
/*fprintf(stderr, "Decoding JPEG to a planar pixel format is supported only when no color transformation is required. "
"JPEG internal color space is set to \"%s\", image is \"%s\".\n",
gpujpeg_color_space_get_name(coder->param.color_space_internal),
gpujpeg_color_space_get_name(coder->param_image.color_space));*/
return 0;
}
const int *sampling_factors = gpujpeg_pixel_format_get_sampling_factor(coder->param_image.pixel_format);
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].sampling_factor.horizontal != sampling_factors[i * 2]
|| coder->component[i].sampling_factor.vertical != sampling_factors[i * 2 + 1]) {
//const char *name = gpujpeg_pixel_format_get_name(coder->param_image.pixel_format);
/*fprintf(stderr, "Decoding JPEG to a planar pixel format cannot change subsampling (%s to %s).\n",
gpujpeg_subsampling_get_name(coder->param_image.comp_count, coder->component),
gpujpeg_pixel_format_get_name(coder->param_image.pixel_format));*/
return 0;
}
}
return 1;
}
static int gpujpeg_preprocessor_decode_aligned(struct gpujpeg_coder * coder)
{
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].data_width != coder->component[i].width) {
return 0;
}
}
return 1;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
coder->preprocessor = NULL;
if (!gpujpeg_pixel_format_is_interleaved(coder->param_image.pixel_format) &&
gpujpeg_preprocessor_decode_no_transform(coder) &&
gpujpeg_preprocessor_decode_aligned(coder)) {
if ( coder->param.verbose >= 2 ) {
printf("Matching format detected - not using postprocessor, using memcpy instead.");
}
return 0;
}
if (coder->param_image.comp_count == 1 && gpujpeg_pixel_format_get_comp_count(coder->param_image.pixel_format) > 1) {
coder->param.verbose >= 0 && fprintf(stderr, "[GPUJPEG] [Error] Decoding single component JPEG allowed only to single component output format!\n");
return -1;
}
assert(coder->param_image.comp_count == 3 || coder->param_image.comp_count == 4);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT709) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT709>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/**
* Copies raw data GPU memory without running any postprocessor kernel.
*
* This assumes that the JPEG has same color space as input raw image and
* currently also that the component subsampling correspond between raw and
* JPEG (although at least different horizontal subsampling can be quite
* easily done).
*
* @invariant gpujpeg_preprocessor_decode_no_transform(coder) != 0
*/
static int
gpujpeg_preprocessor_decoder_copy_planar_data(struct gpujpeg_coder * coder, hipStream_t stream)
{
assert(coder->param_image.comp_count == 1 ||
coder->param_image.comp_count == 3);
size_t data_raw_offset = 0;
bool needs_stride = false; // true if width is not divisible by MCU width
for (int i = 0; i < coder->param_image.comp_count; ++i) {
needs_stride = needs_stride || coder->component[i].width != coder->component[i].data_width;
}
if (!needs_stride) {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
size_t component_size = coder->component[i].width * coder->component[i].height;
hipMemcpyAsync(coder->d_data_raw + data_raw_offset, coder->component[i].d_data, component_size, hipMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
} else {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
int spitch = coder->component[i].data_width;
int dpitch = coder->component[i].width;
size_t component_size = spitch * coder->component[i].height;
hipMemcpy2DAsync(coder->d_data_raw + data_raw_offset, dpitch, coder->component[i].d_data, spitch, coder->component[i].width, coder->component[i].height, hipMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
}
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, hipStream_t stream)
{
if (!coder->preprocessor) {
return gpujpeg_preprocessor_decoder_copy_planar_data(coder, stream);
}
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
/// @todo this stuff doesn't look correct - we multiply by unitSize and then divide by it
int unitSize = gpujpeg_pixel_format_get_unit_size(coder->param_image.pixel_format);
if (unitSize == 0) {
unitSize = 1;
}
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, stream,
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4: */
| 801cbf4b6fc923a99e2824eadf370eaa431574ec.cu | /*
* Copyright (c) 2011-2021, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief
* This file contains postprocessors a common format for computational kernels
* to raw image. It also does color space transformations.
*/
#include "gpujpeg_colorspace.h"
#include "gpujpeg_preprocessor_common.h"
#include "gpujpeg_postprocessor.h"
#include "gpujpeg_util.h"
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load_comp
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load_comp<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<int comp_count,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load {
};
template<
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load <3, s_comp1_samp_factor_h, s_comp1_samp_factor_v,
s_comp2_samp_factor_h, s_comp2_samp_factor_v,
s_comp3_samp_factor_h, s_comp3_samp_factor_v,
s_comp4_samp_factor_h, s_comp4_samp_factor_v> {
static __device__ void perform(uchar4 & value, int position_x, int position_y, struct gpujpeg_preprocessor_data & data) {
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(value.x, position_x, position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(value.y, position_x, position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(value.z, position_x, position_y, data.comp[2]);
}
};
template<
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
struct gpujpeg_preprocessor_comp_to_raw_load <4, s_comp1_samp_factor_h, s_comp1_samp_factor_v,
s_comp2_samp_factor_h, s_comp2_samp_factor_v,
s_comp3_samp_factor_h, s_comp3_samp_factor_v,
s_comp4_samp_factor_h, s_comp4_samp_factor_v> {
static __device__ void perform(uchar4 & value, int position_x, int position_y, struct gpujpeg_preprocessor_data & data) {
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(value.x, position_x, position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(value.y, position_x, position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(value.z, position_x, position_y, data.comp[2]);
gpujpeg_preprocessor_comp_to_raw_load_comp<s_comp4_samp_factor_h, s_comp4_samp_factor_v>::perform(value.w, position_x, position_y, data.comp[3]);
}
};
template<enum gpujpeg_pixel_format pixel_format>
inline __device__ void gpujpeg_comp_to_raw_store(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r);
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_U8>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 3;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012A>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
d_data_raw[image_position + 3] = r.w;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P012Z>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 4;
d_data_raw[image_position + 0] = r.x;
d_data_raw[image_position + 1] = r.y;
d_data_raw[image_position + 2] = r.z;
d_data_raw[image_position + 3] = 0x0;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_444_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
d_data_raw[image_width * image_height + image_position] = r.y;
d_data_raw[2 * image_width * image_height + image_position] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
if ( (x % 2) == 0 ) {
d_data_raw[image_width * image_height + image_position / 2] = r.y;
d_data_raw[image_width * image_height + image_height * ((image_width + 1) / 2) + image_position / 2] = r.z;
}
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_422_U8_P1020>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
image_position = image_position * 2;
d_data_raw[image_position + 1] = r.x;
if ( (x % 2) == 0 )
d_data_raw[image_position + 0] = r.y;
else
d_data_raw[image_position + 0] = r.z;
}
template<>
inline __device__ void gpujpeg_comp_to_raw_store<GPUJPEG_420_U8_P0P1P2>(uint8_t *d_data_raw, int &image_width, int &image_height, int &image_position, int &x, int &y, uchar4 &r)
{
d_data_raw[image_position] = r.x;
if ( (image_position % 2) == 0 && (y % 2) == 0 ) {
d_data_raw[image_width * image_height + y / 2 * ((image_width + 1) / 2) + x / 2] = r.y;
d_data_raw[image_width * image_height + ((image_height + 1) / 2 + y / 2) * ((image_width + 1) / 2) + x / 2] = r.z;
}
}
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
enum gpujpeg_pixel_format pixel_format,
int comp_count,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v,
uint8_t s_comp4_samp_factor_h, uint8_t s_comp4_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uchar4 r;
gpujpeg_preprocessor_comp_to_raw_load<comp_count, s_comp1_samp_factor_h, s_comp1_samp_factor_v, s_comp2_samp_factor_h, s_comp2_samp_factor_v, s_comp3_samp_factor_h, s_comp3_samp_factor_v, s_comp4_samp_factor_h, s_comp4_samp_factor_v>::perform(r, image_position_x, image_position_y, data);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r);
// Save
if (pixel_format == GPUJPEG_444_U8_P012A && comp_count == 3) {
r.w = 0xFF;
}
gpujpeg_comp_to_raw_store<pixel_format>(d_data_raw, image_width, image_height, image_position, image_position_x, image_position_y, r);
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical,
coder->param_image.comp_count == 4 ? coder->sampling_factor.horizontal / coder->component[3].sampling_factor.horizontal : 1,
coder->param_image.comp_count == 4 ? coder->sampling_factor.vertical / coder->component[3].sampling_factor.vertical : 1
);
#define RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
switch ( PIXEL_FORMAT ) { \
case GPUJPEG_U8: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_U8, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012A: return coder->param_image.comp_count == 4 ? &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, 4, P1, P2, P3, P4, P5, P6, P7, P8> : &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012A, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P012Z: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P012Z, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_422_U8_P1020: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P1020, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_444_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_444_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_422_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_422_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_420_U8_P0P1P2: return &gpujpeg_preprocessor_comp_to_raw_kernel<color_space_internal, COLOR, GPUJPEG_420_U8_P0P1P2, 3, P1, P2, P3, P4, P5, P6, P7, P8>; \
case GPUJPEG_PIXFMT_NONE: GPUJPEG_ASSERT(0 && "Postprocess to GPUJPEG_PIXFMT_NONE not allowed"); \
} \
#define RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6, P7, P8) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose >= 1 ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, P1, P2, P3, P4, P5, P6, P7, P8) \
}
#define RETURN_KERNEL(PIXEL_FORMAT, COLOR) \
RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 1, 1, 1, 1, 1) /* 4:4:4 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 2, 2, 2, 1, 1) /* 4:2:0 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 1, 2, 1, 2, 1, 1) /* 4:4:0 */ \
else RETURN_KERNEL_IF(PIXEL_FORMAT, COLOR, 1, 1, 2, 1, 2, 1, 1, 1) /* 4:2:2 */ \
else { \
if ( coder->param.verbose >= 0 ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
RETURN_KERNEL_SWITCH(PIXEL_FORMAT, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC) \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_NONE)
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_RGB)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT601_256LVLS)
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YCBCR_BT709)
}
#ifndef ENABLE_YUV
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
RETURN_KERNEL(coder->param_image.pixel_format, GPUJPEG_YUV)
}
#endif
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_SWITCH
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
static int gpujpeg_preprocessor_decode_no_transform(struct gpujpeg_coder * coder)
{
if (coder->param_image.comp_count == 3 && coder->param_image.color_space != coder->param.color_space_internal) {
/*fprintf(stderr, "Decoding JPEG to a planar pixel format is supported only when no color transformation is required. "
"JPEG internal color space is set to \"%s\", image is \"%s\".\n",
gpujpeg_color_space_get_name(coder->param.color_space_internal),
gpujpeg_color_space_get_name(coder->param_image.color_space));*/
return 0;
}
const int *sampling_factors = gpujpeg_pixel_format_get_sampling_factor(coder->param_image.pixel_format);
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].sampling_factor.horizontal != sampling_factors[i * 2]
|| coder->component[i].sampling_factor.vertical != sampling_factors[i * 2 + 1]) {
//const char *name = gpujpeg_pixel_format_get_name(coder->param_image.pixel_format);
/*fprintf(stderr, "Decoding JPEG to a planar pixel format cannot change subsampling (%s to %s).\n",
gpujpeg_subsampling_get_name(coder->param_image.comp_count, coder->component),
gpujpeg_pixel_format_get_name(coder->param_image.pixel_format));*/
return 0;
}
}
return 1;
}
static int gpujpeg_preprocessor_decode_aligned(struct gpujpeg_coder * coder)
{
for (int i = 0; i < coder->param_image.comp_count; ++i) {
if (coder->component[i].data_width != coder->component[i].width) {
return 0;
}
}
return 1;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
coder->preprocessor = NULL;
if (!gpujpeg_pixel_format_is_interleaved(coder->param_image.pixel_format) &&
gpujpeg_preprocessor_decode_no_transform(coder) &&
gpujpeg_preprocessor_decode_aligned(coder)) {
if ( coder->param.verbose >= 2 ) {
printf("Matching format detected - not using postprocessor, using memcpy instead.");
}
return 0;
}
if (coder->param_image.comp_count == 1 && gpujpeg_pixel_format_get_comp_count(coder->param_image.pixel_format) > 1) {
coder->param.verbose >= 0 && fprintf(stderr, "[GPUJPEG] [Error] Decoding single component JPEG allowed only to single component output format!\n");
return -1;
}
assert(coder->param_image.comp_count == 3 || coder->param_image.comp_count == 4);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT709) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT709>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/**
* Copies raw data GPU memory without running any postprocessor kernel.
*
* This assumes that the JPEG has same color space as input raw image and
* currently also that the component subsampling correspond between raw and
* JPEG (although at least different horizontal subsampling can be quite
* easily done).
*
* @invariant gpujpeg_preprocessor_decode_no_transform(coder) != 0
*/
static int
gpujpeg_preprocessor_decoder_copy_planar_data(struct gpujpeg_coder * coder, cudaStream_t stream)
{
assert(coder->param_image.comp_count == 1 ||
coder->param_image.comp_count == 3);
size_t data_raw_offset = 0;
bool needs_stride = false; // true if width is not divisible by MCU width
for (int i = 0; i < coder->param_image.comp_count; ++i) {
needs_stride = needs_stride || coder->component[i].width != coder->component[i].data_width;
}
if (!needs_stride) {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
size_t component_size = coder->component[i].width * coder->component[i].height;
cudaMemcpyAsync(coder->d_data_raw + data_raw_offset, coder->component[i].d_data, component_size, cudaMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
} else {
for (int i = 0; i < coder->param_image.comp_count; ++i) {
int spitch = coder->component[i].data_width;
int dpitch = coder->component[i].width;
size_t component_size = spitch * coder->component[i].height;
cudaMemcpy2DAsync(coder->d_data_raw + data_raw_offset, dpitch, coder->component[i].d_data, spitch, coder->component[i].width, coder->component[i].height, cudaMemcpyDeviceToDevice, stream);
data_raw_offset += component_size;
}
}
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
/* Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, cudaStream_t stream)
{
if (!coder->preprocessor) {
return gpujpeg_preprocessor_decoder_copy_planar_data(coder, stream);
}
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
/// @todo this stuff doesn't look correct - we multiply by unitSize and then divide by it
int unitSize = gpujpeg_pixel_format_get_unit_size(coder->param_image.pixel_format);
if (unitSize == 0) {
unitSize = 1;
}
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
kernel<<<grid, threads, 0, stream>>>(
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/* vi: set expandtab sw=4: */
|
2f34e98ea9ef40315d6a2e0d1966d384e4e13842.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <time.h>
#include <hip/hip_runtime.h>
#define MAXNUM 10000
// gpu loop rolling
void __global__ loop_kernel_rolling(int *a,int *b,int *c,int n)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
for(int i=index; i<n; i+=gridStride)
c[i]=a[i]+b[i];
}
// gpu loop unrolling
void __global__ loop_kernel_unrolling(int *a,int *b,int *c,int n)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
int i = index;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 100 times
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 1000 times
}
// cpu kernel
void add_cpu(int *a,int *b,int *c,int n)
{
for(int i=0;i<n;i++)
c[i]=a[i]+b[i];
}
// test cpu and gpu array result
bool resultcompare(int *h_c,int *d_c,int n)
{
for(int i=0;i<n;i++)
{
if(h_c[i]!=d_c[i])
{
printf("There is ERROR in c[%d]: cpu:%d gpu:%d!\n",i,h_c[i],d_c[i]);
return false;
}
}
return true;
}
int main()
{
srand(time(0));
// Get array size
int n ;
printf("Input array size:\n");
scanf("%d",&n);
// Host memory
int *a;
int *b;
int *c_gpu_u;
int *c_gpu_r;
int *c_cpu=new int[n];
int size = n*sizeof(int);
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c_gpu_u, size);
hipMallocManaged(&c_gpu_r, size);
// Srand number into array
for(int i=0;i<n;i++)
{
a[i]=rand()%MAXNUM;
b[i]=rand()%MAXNUM;
}
// CPU add reference
clock_t begin,end;
double cpu_timer;
begin=clock();
add_cpu(a,b,c_cpu,n);
end=clock();
cpu_timer=(double)(end-begin)/CLOCKS_PER_SEC;
cpu_timer*=1000;
printf("The total cpu run time is %f ms.\n",cpu_timer);
// GPU add runtime
size_t threads_per_block = 256;
size_t number_of_blocks = 4;
// record time & begin time
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// gpu loop rolling runtime
hipEventRecord(start,0);
hipLaunchKernelGGL(( loop_kernel_rolling), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a,b,c_gpu_r,n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
float gpu_timer1;
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer1,start,stop);
printf("The total gpu rolling run time is %f ms.\n",gpu_timer1);
// gpu loop unrolling runtime
hipEventRecord(start,0);
hipLaunchKernelGGL(( loop_kernel_unrolling), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a,b,c_gpu_u,n);
hipDeviceSynchronize();
hipEventRecord(stop,0);
float gpu_timer2;
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_timer2,start,stop);
printf("The total gpu unrolling run time is %f ms.\n",gpu_timer2);
// Check result
bool ret=resultcompare(c_cpu,c_gpu_r,n);
if(ret) printf("Test rolling Success!\n");
ret=resultcompare(c_cpu,c_gpu_u,n);
if(ret) printf("Test unrolling Success!\n");
// Free memory
hipFree(a);
hipFree(b);
hipFree(c_gpu_r);
hipFree(c_gpu_u);
} | 2f34e98ea9ef40315d6a2e0d1966d384e4e13842.cu | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <time.h>
#include <cuda_runtime.h>
#define MAXNUM 10000
// gpu loop rolling
void __global__ loop_kernel_rolling(int *a,int *b,int *c,int n)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
for(int i=index; i<n; i+=gridStride)
c[i]=a[i]+b[i];
}
// gpu loop unrolling
void __global__ loop_kernel_unrolling(int *a,int *b,int *c,int n)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
int i = index;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 100 times
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 10 time
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return;
c[i]=a[i]+b[i];
i+=gridStride;
if(i>=n) return; // 1000 times
}
// cpu kernel
void add_cpu(int *a,int *b,int *c,int n)
{
for(int i=0;i<n;i++)
c[i]=a[i]+b[i];
}
// test cpu and gpu array result
bool resultcompare(int *h_c,int *d_c,int n)
{
for(int i=0;i<n;i++)
{
if(h_c[i]!=d_c[i])
{
printf("There is ERROR in c[%d]: cpu:%d gpu:%d!\n",i,h_c[i],d_c[i]);
return false;
}
}
return true;
}
int main()
{
srand(time(0));
// Get array size
int n ;
printf("Input array size:\n");
scanf("%d",&n);
// Host memory
int *a;
int *b;
int *c_gpu_u;
int *c_gpu_r;
int *c_cpu=new int[n];
int size = n*sizeof(int);
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c_gpu_u, size);
cudaMallocManaged(&c_gpu_r, size);
// Srand number into array
for(int i=0;i<n;i++)
{
a[i]=rand()%MAXNUM;
b[i]=rand()%MAXNUM;
}
// CPU add reference
clock_t begin,end;
double cpu_timer;
begin=clock();
add_cpu(a,b,c_cpu,n);
end=clock();
cpu_timer=(double)(end-begin)/CLOCKS_PER_SEC;
cpu_timer*=1000;
printf("The total cpu run time is %f ms.\n",cpu_timer);
// GPU add runtime
size_t threads_per_block = 256;
size_t number_of_blocks = 4;
// record time & begin time
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// gpu loop rolling runtime
cudaEventRecord(start,0);
loop_kernel_rolling<<<number_of_blocks, threads_per_block>>>(a,b,c_gpu_r,n);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
float gpu_timer1;
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer1,start,stop);
printf("The total gpu rolling run time is %f ms.\n",gpu_timer1);
// gpu loop unrolling runtime
cudaEventRecord(start,0);
loop_kernel_unrolling<<<number_of_blocks, threads_per_block>>>(a,b,c_gpu_u,n);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
float gpu_timer2;
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_timer2,start,stop);
printf("The total gpu unrolling run time is %f ms.\n",gpu_timer2);
// Check result
bool ret=resultcompare(c_cpu,c_gpu_r,n);
if(ret) printf("Test rolling Success!\n");
ret=resultcompare(c_cpu,c_gpu_u,n);
if(ret) printf("Test unrolling Success!\n");
// Free memory
cudaFree(a);
cudaFree(b);
cudaFree(c_gpu_r);
cudaFree(c_gpu_u);
} |
55d21984a87ebc445c25e1a79bc2075e4907fb69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include<device_launch_parameters.h>
#include<hip/device_functions.h>
//#define SHARED // shared is slower?
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float result = 0.f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) {
// (image saturation)
//if (filter_r + thread_2D_pos.y < 0 || filter_r + thread_2D_pos.y >= numRows) {
// continue;
//}
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
// Same
//if (filter_c + thread_2D_pos.x < 0 || filter_c + thread_2D_pos.x >= numCols) {
// continue;
//}
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(filter_c + thread_2D_pos.x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
__global__
void gaussian_blur_shared(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//not work: __shared__ float sh_filter[filterWidth*filterWidth];
// First threads in each block copy the filter into shared mem
extern __shared__ float sh_filter[]; // dynamic length of shared mem
if (threadIdx.x < filterWidth && threadIdx.y < filterWidth) {
int lin_index = threadIdx.x * filterWidth + threadIdx.y;
sh_filter[lin_index] = filter[lin_index];
}
__syncthreads();
float result = 0.f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) {
// (image saturation)
//if (filter_r + thread_2D_pos.y < 0 || filter_r + thread_2D_pos.y >= numRows) {
// continue;
//}
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
// Same
//if (filter_c + thread_2D_pos.x < 0 || filter_c + thread_2D_pos.x >= numCols) {
// continue;
//}
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(filter_c + thread_2D_pos.x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = sh_filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char)* numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char)* numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char)* numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
int numBytes = sizeof(float)* filterWidth * filterWidth;
checkCudaErrors(hipMalloc(&d_filter, numBytes));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, numBytes, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const int m = 32;
const dim3 blockSize(m, m, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << <gridSize, blockSize >> >(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
#ifdef SHARED
int shared_size = filterWidth * filterWidth * sizeof(float); // declare first!
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
#else
gaussian_blur << <gridSize, blockSize >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
#endif // SHARED
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels << <gridSize, blockSize >> >(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
| 55d21984a87ebc445c25e1a79bc2075e4907fb69.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include<device_launch_parameters.h>
#include<device_functions.h>
//#define SHARED // shared is slower?
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
float result = 0.f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) {
// 会有黑边,应该取到值,按中心值代替(image saturation)
//if (filter_r + thread_2D_pos.y < 0 || filter_r + thread_2D_pos.y >= numRows) {
// continue;
//}
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
// Same
//if (filter_c + thread_2D_pos.x < 0 || filter_c + thread_2D_pos.x >= numCols) {
// continue;
//}
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(filter_c + thread_2D_pos.x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
__global__
void gaussian_blur_shared(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//not work: __shared__ float sh_filter[filterWidth*filterWidth];
// First threads in each block copy the filter into shared mem
extern __shared__ float sh_filter[]; // dynamic length of shared mem
if (threadIdx.x < filterWidth && threadIdx.y < filterWidth) {
int lin_index = threadIdx.x * filterWidth + threadIdx.y;
sh_filter[lin_index] = filter[lin_index];
}
__syncthreads();
float result = 0.f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) {
// 会有黑边,应该取到值,按中心值代替(image saturation)
//if (filter_r + thread_2D_pos.y < 0 || filter_r + thread_2D_pos.y >= numRows) {
// continue;
//}
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
// Same
//if (filter_c + thread_2D_pos.x < 0 || filter_c + thread_2D_pos.x >= numCols) {
// continue;
//}
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(filter_c + thread_2D_pos.x, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = sh_filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2];
result += image_value * filter_value;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char)* numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char)* numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char)* numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
int numBytes = sizeof(float)* filterWidth * filterWidth;
checkCudaErrors(cudaMalloc(&d_filter, numBytes));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, numBytes, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const int m = 32;
const dim3 blockSize(m, m, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels << <gridSize, blockSize >> >(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
#ifdef SHARED
int shared_size = filterWidth * filterWidth * sizeof(float); // declare first!
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur_shared << <gridSize, blockSize, shared_size >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
#else
gaussian_blur << <gridSize, blockSize >> >(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> >(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur << <gridSize, blockSize >> >(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
#endif // SHARED
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels << <gridSize, blockSize >> >(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
3f3077c0d628b3df4371d31ad8aee8279516c6ad.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Name : Main.cpp
// Author : Wenzhao Sun
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//============================================================================
#include <iostream>
//#include "MeshGen.h"
#include "commonData.h"
#include "CellInitHelper.h"
#include <vector>
#include "SimulationDomainGPU.h"
using namespace std;
GlobalConfigVars globalConfigVars;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort =
true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
//test here
void initializeSlurmConfig(int argc, char* argv[]) {
// read configuration.
ConfigParser parser;
std::string configFileNameDefault = "./resources/disc_M.cfg";
globalConfigVars = parser.parseConfigFile(configFileNameDefault);
std::string configFileNameBaseL = "./resources/disc_";
std::string configFileNameBaseR = ".cfg";
// Unknown number of input arguments.
if (argc != 1 && argc != 3) {
std::cout << "ERROR: Incorrect input argument count.\n"
<< "Expect either no command line argument or three arguments"
<< std::endl;
exit(0);
}
// one input argument. It has to be "-slurm".
else if (argc == 3) {
if (strcmp(argv[1], "-slurm") != 0) {
std::cout
<< "ERROR: one argument received from commandline but it's not recognized.\n"
<< "Currently, the argument value must be -slurm"
<< std::endl;
exit(0);
} else {
std::string configFileNameM(argv[2]);
std::string configFileNameCombined = configFileNameBaseL
+ configFileNameM + configFileNameBaseR;
parser.updateConfigFile(globalConfigVars, configFileNameCombined);
}
}
// no input argument. Take default.
else if (argc == 1) {
// set GPU device.
int myDeviceID =
globalConfigVars.getConfigValue("GPUDeviceNumber").toInt();
gpuErrchk(hipSetDevice(myDeviceID));
}
}
void updateDivThres(double& curDivThred, uint& i, double& curTime, //Ali
double& decayCoeff, double& divThreshold) {
cout<<"The value of initial time stage in updateDivThres is"<<curTime<<endl ;
//double decay = exp(-curTime * decayCoeff);
//curDivThred = 1.0 - (1.0 - divThreshold) * decay;
curDivThred = divThreshold ; // not active for now
}
int main(int argc, char* argv[]) {
// initialize random seed.
srand(time(NULL));
// Slurm is computer-cluster management system.
initializeSlurmConfig(argc, argv);
cout<< "I am in main file after slurm "<<endl;
// initialize simulation control related parameters from config file.
SimulationGlobalParameter mainPara;
cout<< "I am in main file after simulation global parameter "<<endl;
mainPara.initFromConfig();
cout<< "I am in main file before Cell IniHelper instance creation"<<endl;
// initialize simulation initialization helper.
CellInitHelper initHelper;
cout<< "I am in main file after Cell IniHelper instance creation"<<endl;
// initialize simulation domain.
SimulationDomainGPU simuDomain;
cout<< "I am in main file after simulationDomainGPU instance creation"<<endl;
SimulationInitData_V2_M initData = initHelper.initInput_M();
cout<< "I am in main file after initInput_M creation"<<endl;
simuDomain.initialize_v2_M(initData,mainPara.InitTimeStage);
cout<< "I am in main file after initInput_v2_M creation"<<endl;
std::string polyStatFileNameBase = globalConfigVars.getConfigValue(
"PolygonStatFileName").toString();
std::string uniqueSymbol =
globalConfigVars.getConfigValue("UniqueSymbol").toString();
std::string polyStatFileName = polyStatFileNameBase + uniqueSymbol + ".txt";
std::remove(polyStatFileName.c_str());
std::string detailStatFileNameBase = globalConfigVars.getConfigValue(
"DetailStatFileNameBase").toString() + uniqueSymbol;
double divThreshold =
globalConfigVars.getConfigValue("DivThreshold").toDouble();
double decayCoeff =
globalConfigVars.getConfigValue("ProlifDecayCoeff").toDouble();
double curDivThred;
int maxStepTraceBack =
globalConfigVars.getConfigValue("MaxStepTraceBack").toInt();
// preparation.
//Ali
//CellsStatsData polyData ;
//polyData2.FileName1.open("StressStrain.txt");
//polyData2.FileName1<<"Single cell data"<< "\n" ;
//Ali
double Init_Displace=0.0 ;
std:: string FileName2= "StressStrain.CSV" ;
uint aniFrame = 0;
// main simulation steps.
bool FirstData=false ;
for (uint i = 0; i <= (uint) (mainPara.totalTimeSteps); i++) {
if (i % mainPara.aniAuxVar == 0) {
std::cout << "substep 1 " << std::endl;
std::cout << "substep 1_confirm " << std::flush;
CellsStatsData polyData = simuDomain.outputPolyCountData(); //Ali comment
// CellsStatsData polyData = simuDomain.outputPolyCountData();
double curTime=i*mainPara.dt + mainPara.InitTimeStage; //Ali - Abu
//Ali
cout<<"Th value of initial time stage is"<<mainPara.InitTimeStage<<endl ;
if (FirstData==true) {
Init_Displace=polyData.Cells_Extrem_Loc[1]-polyData.Cells_Extrem_Loc[0] ;
cout << "Init_Displace="<< Init_Displace<< endl ;
FirstData=false ;
}
if (i==0){
polyData.printStressStrain_Ini( FileName2) ;
FirstData=true ;
cout <<"I am in i=0"<< endl;
}
if (i !=0 && FirstData==false){
polyData.printStressStrain( FileName2,curTime,Init_Displace) ;
cout<<"I am in writing and i is equal to"<<i<<endl ;
}
std::cout << "substep 2 " << std::endl;
//////// update division threshold //////
updateDivThres(curDivThred, i, curTime, decayCoeff, //Ali
divThreshold);
std::cout << "substep 3 " << std::endl;
// prints brief polygon counting statistics to file
polyData.printPolyCountToFile(polyStatFileName, curDivThred);
// prints detailed individual cell statistics to file
polyData.printDetailStatsToFile(detailStatFileNameBase, aniFrame);
// prints the animation frames to file. They can be open by Paraview
//std::cout << "substep 4 " << std::endl;
//if (i != 0) {
//simuDomain.processT1Info(maxStepTraceBack, polyData);
//}
std::cout << "substep 5 " << std::endl;
//simuDomain.outputVtkFilesWithCri_M(mainPara.animationNameBase,
// aniFrame, mainPara.aniCri);
//simuDomain.outputVtkColorByCell_T1(mainPara.animationNameBase,
// aniFrame, mainPara.aniCri);
if (i !=0){
simuDomain.outputVtkColorByCell_polySide(mainPara.animationNameBase,
aniFrame, mainPara.aniCri);
// std::cout << "in ani step " << aniFrame << std::endl;
std::cout << "substep 6 " << std::endl;
}
aniFrame++;
}
//start matlab engine here::::::
//Ali simuDomain.runAllLogic_M(mainPara.dt);
simuDomain.runAllLogic_M(mainPara.dt,mainPara.Damp_Coef,mainPara.InitTimeStage); //Ali
}
return 0;
}
| 3f3077c0d628b3df4371d31ad8aee8279516c6ad.cu | //============================================================================
// Name : Main.cpp
// Author : Wenzhao Sun
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//============================================================================
#include <iostream>
//#include "MeshGen.h"
#include "commonData.h"
#include "CellInitHelper.h"
#include <vector>
#include "SimulationDomainGPU.h"
using namespace std;
GlobalConfigVars globalConfigVars;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort =
true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
//test here
void initializeSlurmConfig(int argc, char* argv[]) {
// read configuration.
ConfigParser parser;
std::string configFileNameDefault = "./resources/disc_M.cfg";
globalConfigVars = parser.parseConfigFile(configFileNameDefault);
std::string configFileNameBaseL = "./resources/disc_";
std::string configFileNameBaseR = ".cfg";
// Unknown number of input arguments.
if (argc != 1 && argc != 3) {
std::cout << "ERROR: Incorrect input argument count.\n"
<< "Expect either no command line argument or three arguments"
<< std::endl;
exit(0);
}
// one input argument. It has to be "-slurm".
else if (argc == 3) {
if (strcmp(argv[1], "-slurm") != 0) {
std::cout
<< "ERROR: one argument received from commandline but it's not recognized.\n"
<< "Currently, the argument value must be -slurm"
<< std::endl;
exit(0);
} else {
std::string configFileNameM(argv[2]);
std::string configFileNameCombined = configFileNameBaseL
+ configFileNameM + configFileNameBaseR;
parser.updateConfigFile(globalConfigVars, configFileNameCombined);
}
}
// no input argument. Take default.
else if (argc == 1) {
// set GPU device.
int myDeviceID =
globalConfigVars.getConfigValue("GPUDeviceNumber").toInt();
gpuErrchk(cudaSetDevice(myDeviceID));
}
}
void updateDivThres(double& curDivThred, uint& i, double& curTime, //Ali
double& decayCoeff, double& divThreshold) {
cout<<"The value of initial time stage in updateDivThres is"<<curTime<<endl ;
//double decay = exp(-curTime * decayCoeff);
//curDivThred = 1.0 - (1.0 - divThreshold) * decay;
curDivThred = divThreshold ; // not active for now
}
int main(int argc, char* argv[]) {
// initialize random seed.
srand(time(NULL));
// Slurm is computer-cluster management system.
initializeSlurmConfig(argc, argv);
cout<< "I am in main file after slurm "<<endl;
// initialize simulation control related parameters from config file.
SimulationGlobalParameter mainPara;
cout<< "I am in main file after simulation global parameter "<<endl;
mainPara.initFromConfig();
cout<< "I am in main file before Cell IniHelper instance creation"<<endl;
// initialize simulation initialization helper.
CellInitHelper initHelper;
cout<< "I am in main file after Cell IniHelper instance creation"<<endl;
// initialize simulation domain.
SimulationDomainGPU simuDomain;
cout<< "I am in main file after simulationDomainGPU instance creation"<<endl;
SimulationInitData_V2_M initData = initHelper.initInput_M();
cout<< "I am in main file after initInput_M creation"<<endl;
simuDomain.initialize_v2_M(initData,mainPara.InitTimeStage);
cout<< "I am in main file after initInput_v2_M creation"<<endl;
std::string polyStatFileNameBase = globalConfigVars.getConfigValue(
"PolygonStatFileName").toString();
std::string uniqueSymbol =
globalConfigVars.getConfigValue("UniqueSymbol").toString();
std::string polyStatFileName = polyStatFileNameBase + uniqueSymbol + ".txt";
std::remove(polyStatFileName.c_str());
std::string detailStatFileNameBase = globalConfigVars.getConfigValue(
"DetailStatFileNameBase").toString() + uniqueSymbol;
double divThreshold =
globalConfigVars.getConfigValue("DivThreshold").toDouble();
double decayCoeff =
globalConfigVars.getConfigValue("ProlifDecayCoeff").toDouble();
double curDivThred;
int maxStepTraceBack =
globalConfigVars.getConfigValue("MaxStepTraceBack").toInt();
// preparation.
//Ali
//CellsStatsData polyData ;
//polyData2.FileName1.open("StressStrain.txt");
//polyData2.FileName1<<"Single cell data"<< "\n" ;
//Ali
double Init_Displace=0.0 ;
std:: string FileName2= "StressStrain.CSV" ;
uint aniFrame = 0;
// main simulation steps.
bool FirstData=false ;
for (uint i = 0; i <= (uint) (mainPara.totalTimeSteps); i++) {
if (i % mainPara.aniAuxVar == 0) {
std::cout << "substep 1 " << std::endl;
std::cout << "substep 1_confirm " << std::flush;
CellsStatsData polyData = simuDomain.outputPolyCountData(); //Ali comment
// CellsStatsData polyData = simuDomain.outputPolyCountData();
double curTime=i*mainPara.dt + mainPara.InitTimeStage; //Ali - Abu
//Ali
cout<<"Th value of initial time stage is"<<mainPara.InitTimeStage<<endl ;
if (FirstData==true) {
Init_Displace=polyData.Cells_Extrem_Loc[1]-polyData.Cells_Extrem_Loc[0] ;
cout << "Init_Displace="<< Init_Displace<< endl ;
FirstData=false ;
}
if (i==0){
polyData.printStressStrain_Ini( FileName2) ;
FirstData=true ;
cout <<"I am in i=0"<< endl;
}
if (i !=0 && FirstData==false){
polyData.printStressStrain( FileName2,curTime,Init_Displace) ;
cout<<"I am in writing and i is equal to"<<i<<endl ;
}
std::cout << "substep 2 " << std::endl;
//////// update division threshold //////
updateDivThres(curDivThred, i, curTime, decayCoeff, //Ali
divThreshold);
std::cout << "substep 3 " << std::endl;
// prints brief polygon counting statistics to file
polyData.printPolyCountToFile(polyStatFileName, curDivThred);
// prints detailed individual cell statistics to file
polyData.printDetailStatsToFile(detailStatFileNameBase, aniFrame);
// prints the animation frames to file. They can be open by Paraview
//std::cout << "substep 4 " << std::endl;
//if (i != 0) {
//simuDomain.processT1Info(maxStepTraceBack, polyData);
//}
std::cout << "substep 5 " << std::endl;
//simuDomain.outputVtkFilesWithCri_M(mainPara.animationNameBase,
// aniFrame, mainPara.aniCri);
//simuDomain.outputVtkColorByCell_T1(mainPara.animationNameBase,
// aniFrame, mainPara.aniCri);
if (i !=0){
simuDomain.outputVtkColorByCell_polySide(mainPara.animationNameBase,
aniFrame, mainPara.aniCri);
// std::cout << "in ani step " << aniFrame << std::endl;
std::cout << "substep 6 " << std::endl;
}
aniFrame++;
}
//start matlab engine here::::::
//Ali simuDomain.runAllLogic_M(mainPara.dt);
simuDomain.runAllLogic_M(mainPara.dt,mainPara.Damp_Coef,mainPara.InitTimeStage); //Ali
}
return 0;
}
|
5fa626800896a4cfd606ffd0ab3e27ca966243a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev,
int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float stepLimit[NP] = {0.03f, 0.03f, 0.04f, 400, 2}; // x,y,z step limits are in micron
float x0_next[NPL];
float dL_pos = 0, dL2_pos = 0;
float dL_I, dL2_I; // photon and background
float step[NPL];
float rate = 1/(1 + lambda);
float tmp;
int s, p, k;
// x,y,z
for (p = 0; p < NP; p++)
{
for (s = 0; s < 4; s++)
{
dL_pos += FirstDev[s*NP*Nfit + j*NP + p];
dL2_pos += SecondDev[s*NP*Nfit + j*NP + p];
}
tmp = -1 * dL_pos / dL2_pos * rate;
step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]);
}
x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int);
x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int);
for (k = 2; k < NPL; k++)
{
x0_next[k] = ParamIn[NPL*j + k] + step[k];
}
x0_next[3] = (x0_next[3] <= 100 ? 100 : x0_next[3]); // intensity is not less than 100
x0_next[4] = (x0_next[4] <= 0 ? 0.01f : x0_next[4]);// bg is not less than 0
x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box
x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4);
x0_next[2] = fminf(fmaxf(x0_next[2], -1.4), 1.4);//z position is within -1.4 to 1.4 um
for (k = 0; k < NPL; k++) {
ParamNext[NPL*j + k] = x0_next[k];
Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k];
}
}
__global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize,
float *FirstDev, float *SecondDev)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float dL[NP], dL2[NP];
float psfI;
int k, i;
for (k = 0; k < NP; k++)
{
dL[k] = 0;
dL2[k] = 0;
}
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize);
for (int i = 0; i < PSFsize; i++)
{
psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i];
dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1;
dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI;
}
for (int k = 0; k < NP; k++)
{
FirstDev[NP * j + k] = dL[k];
SecondDev[NP * j + k] = dL2[k];
}
}
__device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize)
{
float psfI;
for (int i = 0; i < PSFsize; i++)
{
psfI = psf[i] * I + bg + gainR[i];
dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id;
dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI;
}
} | 5fa626800896a4cfd606ffd0ab3e27ca966243a1.cu | #include "cuda_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_Localization(float *ParamIn, float *ParamNext, float *Convergence, float *FirstDev, float *SecondDev,
int Nfit, int N_int, int FitBoxsize, float lambda, float SampleSpacingXY)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float stepLimit[NP] = {0.03f, 0.03f, 0.04f, 400, 2}; // x,y,z step limits are in micron
float x0_next[NPL];
float dL_pos = 0, dL2_pos = 0;
float dL_I, dL2_I; // photon and background
float step[NPL];
float rate = 1/(1 + lambda);
float tmp;
int s, p, k;
// x,y,z
for (p = 0; p < NP; p++)
{
for (s = 0; s < 4; s++)
{
dL_pos += FirstDev[s*NP*Nfit + j*NP + p];
dL2_pos += SecondDev[s*NP*Nfit + j*NP + p];
}
tmp = -1 * dL_pos / dL2_pos * rate;
step[p] = fminf(fmaxf(tmp, -stepLimit[p]), stepLimit[p]);
}
x0_next[0] = ParamIn[NPL*j + 0] + step[0] * (-1 / SampleSpacingXY / N_int);
x0_next[1] = ParamIn[NPL*j + 1] + step[1] * (-1 / SampleSpacingXY / N_int);
for (k = 2; k < NPL; k++)
{
x0_next[k] = ParamIn[NPL*j + k] + step[k];
}
x0_next[3] = (x0_next[3] <= 100 ? 100 : x0_next[3]); // intensity is not less than 100
x0_next[4] = (x0_next[4] <= 0 ? 0.01f : x0_next[4]);// bg is not less than 0
x0_next[0] = fminf(fmaxf(x0_next[0], 4), FitBoxsize - 4);// xy shift is within fitting box
x0_next[1] = fminf(fmaxf(x0_next[1], 4), FitBoxsize - 4);
x0_next[2] = fminf(fmaxf(x0_next[2], -1.4), 1.4);//z position is within -1.4 to 1.4 um
for (k = 0; k < NPL; k++) {
ParamNext[NPL*j + k] = x0_next[k];
Convergence[NPL*j + k] = x0_next[k] - ParamIn[NPL*j + k];
}
}
__global__ void kernel_getdev(float *data, float *gainR, float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, int Nfit, int PSFsize,
float *FirstDev, float *SecondDev)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
float dL[NP], dL2[NP];
float psfI;
int k, i;
for (k = 0; k < NP; k++)
{
dL[k] = 0;
dL2[k] = 0;
}
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFx[j*PSFsize], I[j], I[j], bg[j], &dL[0], &dL2[0], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFy[j*PSFsize], I[j], I[j], bg[j], &dL[1], &dL2[1], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &dPSFz[j*PSFsize], I[j], I[j], bg[j], &dL[2], &dL2[2], PSFsize);
fundev(&data[j*PSFsize], &gainR[j*PSFsize], &PSF[j*PSFsize], &PSF[j*PSFsize], I[j], 1.0, bg[j], &dL[3], &dL2[3], PSFsize);
for (int i = 0; i < PSFsize; i++)
{
psfI = PSF[j*PSFsize + i] * I[j] + bg[j] + gainR[j*PSFsize + i];
dL[4] += (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI - 1;
dL2[4] += -1 * (data[j*PSFsize + i] + gainR[j*PSFsize + i]) / psfI / psfI;
}
for (int k = 0; k < NP; k++)
{
FirstDev[NP * j + k] = dL[k];
SecondDev[NP * j + k] = dL2[k];
}
}
__device__ void fundev(float *data, float *gainR, float *psf, float *dpsf, float I, float Id, float bg, float *dL, float *dL2, int PSFsize)
{
float psfI;
for (int i = 0; i < PSFsize; i++)
{
psfI = psf[i] * I + bg + gainR[i];
dL[0] += ((data[i] + gainR[i]) / psfI - 1) * dpsf[i] * Id;
dL2[0] += -1 * Id * Id * dpsf[i] * dpsf[i] * (data[i] + gainR[i]) / psfI / psfI;
}
} |
3b960e541e568c591dbb9b824cf35d23f16dd10a.hip | // !!! This is a file automatically generated by hipify!!!
/*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
uint *L,
int p_num,
int t_num,
int rows_num,
int loop,
int table_type
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int x = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int part[SHARED_MAX];
for(uint i=threadIdx.x; i<SHARED_MAX ; i+=blockDim.x){
part[i] = 0;
}
__syncthreads();
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int Dim = (gridDim.x-1 == blockIdx.x) ? (t_n - blockIdx.x*blockDim.x):blockDim.x;
//int Dim = blockDim.x;
// Matching phase
int hash = 0;
/*
__shared__ uint temp[PART_C_NUM*PART_C_NUM];
for(uint i=0 ; i<PER_TH/Dim ; i++){
for(uint k=0 ; k<Dim; k++){
temp[k+threadIdx.x*Dim] = t[DEF+i*Dim+k*PER_TH+threadIdx.x].val;
}
__syncthreads();
for(uint j=0; j<Dim; j++){
//hash = (t[DEF+i*Dim+j*PER_TH+threadIdx.x].val>>(8*loop)) % p_n;
hash = (temp[j+threadIdx.x*Dim]>>(8*loop)) % p_n;
part[hash*Dim + threadIdx.x]++;
}
}
*/
if(x < t_n){
for(uint i=0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n; i++){
//hash = (i>>(8*loop)) % p_n;
hash = (t[DEF+threadIdx.x*PER_TH+i].val>>(RADIX*loop)) % p_n;
part[hash*Dim + threadIdx.x]++;
}
for(uint j=0 ; j*Dim+threadIdx.x<p_n*Dim ; j++){
L[t_n*j + blockIdx.x*Dim + threadIdx.x] = part[j*Dim+threadIdx.x];
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
uint *L,
int p_num,
int t_num,
int rows_num,
int loop,
int table_type
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int Dim = (gridDim.x-1 == blockIdx.x) ? (t_n - blockIdx.x*blockDim.x):blockDim.x;
__shared__ int part[SHARED_MAX];
for(uint j=0 ; j*blockDim.x+threadIdx.x<p_n*blockDim.x ; j++){
part[j*blockDim.x+threadIdx.x]=L[t_n*j+blockIdx.x*blockDim.x+threadIdx.x];
}
__syncthreads();
// Matching phase
int hash = 0;
int temp = 0;
TUPLE tt;
/*
__shared__ int2 ttemp[PART_C_NUM*PART_C_NUM];
for(uint i=0 ; i<PER_TH/Dim ; i++){
for(uint k=0 ; k<Dim; k++){
ttemp[k+threadIdx.x*Dim].x = t[DEF+i*Dim+k*PER_TH+threadIdx.x].x;
ttemp[k+threadIdx.x*Dim].y = t[DEF+i*Dim+k*PER_TH+threadIdx.x].y;
}
__syncthreads();
for(uint j=0; j<Dim; j++){
tt.x = ttemp[j+threadIdx.x*Dim].x;
tt.y = ttemp[j+threadIdx.x*Dim].y;
hash = (tt.y>>(8*loop)) % p_n;
temp = part[hash*Dim + threadIdx.x]++;
pt[temp].x = tt.x;
pt[temp].y = tt.y;
}
}
*/
if(x < t_n){
for(uint i=0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n; i++){
tt = t[DEF+threadIdx.x*PER_TH+i];
hash = (tt.val>>loop*RADIX) % p_n;
temp = part[hash*Dim + threadIdx.x]++;
pt[temp] = tt;
}
}
}
__global__
void countPartition(
TUPLE *t,
uint *startpos,
int p_num,
int rows_num
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
if(x<rows_num){
int p = t[x].val%p_num;
atomicAdd(&(startpos[p]),1);
}
if(x==blockIdx.x*blockDim.x-1){
startpos[x+1]=0;
}
}
}
| 3b960e541e568c591dbb9b824cf35d23f16dd10a.cu | /*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
uint *L,
int p_num,
int t_num,
int rows_num,
int loop,
int table_type
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int x = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int part[SHARED_MAX];
for(uint i=threadIdx.x; i<SHARED_MAX ; i+=blockDim.x){
part[i] = 0;
}
__syncthreads();
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int Dim = (gridDim.x-1 == blockIdx.x) ? (t_n - blockIdx.x*blockDim.x):blockDim.x;
//int Dim = blockDim.x;
// Matching phase
int hash = 0;
/*
__shared__ uint temp[PART_C_NUM*PART_C_NUM];
for(uint i=0 ; i<PER_TH/Dim ; i++){
for(uint k=0 ; k<Dim; k++){
temp[k+threadIdx.x*Dim] = t[DEF+i*Dim+k*PER_TH+threadIdx.x].val;
}
__syncthreads();
for(uint j=0; j<Dim; j++){
//hash = (t[DEF+i*Dim+j*PER_TH+threadIdx.x].val>>(8*loop)) % p_n;
hash = (temp[j+threadIdx.x*Dim]>>(8*loop)) % p_n;
part[hash*Dim + threadIdx.x]++;
}
}
*/
if(x < t_n){
for(uint i=0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n; i++){
//hash = (i>>(8*loop)) % p_n;
hash = (t[DEF+threadIdx.x*PER_TH+i].val>>(RADIX*loop)) % p_n;
part[hash*Dim + threadIdx.x]++;
}
for(uint j=0 ; j*Dim+threadIdx.x<p_n*Dim ; j++){
L[t_n*j + blockIdx.x*Dim + threadIdx.x] = part[j*Dim+threadIdx.x];
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
uint *L,
int p_num,
int t_num,
int rows_num,
int loop,
int table_type
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int PER_TH = (table_type==LEFT) ? LEFT_PER_TH:RIGHT_PER_TH;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int Dim = (gridDim.x-1 == blockIdx.x) ? (t_n - blockIdx.x*blockDim.x):blockDim.x;
__shared__ int part[SHARED_MAX];
for(uint j=0 ; j*blockDim.x+threadIdx.x<p_n*blockDim.x ; j++){
part[j*blockDim.x+threadIdx.x]=L[t_n*j+blockIdx.x*blockDim.x+threadIdx.x];
}
__syncthreads();
// Matching phase
int hash = 0;
int temp = 0;
TUPLE tt;
/*
__shared__ int2 ttemp[PART_C_NUM*PART_C_NUM];
for(uint i=0 ; i<PER_TH/Dim ; i++){
for(uint k=0 ; k<Dim; k++){
ttemp[k+threadIdx.x*Dim].x = t[DEF+i*Dim+k*PER_TH+threadIdx.x].x;
ttemp[k+threadIdx.x*Dim].y = t[DEF+i*Dim+k*PER_TH+threadIdx.x].y;
}
__syncthreads();
for(uint j=0; j<Dim; j++){
tt.x = ttemp[j+threadIdx.x*Dim].x;
tt.y = ttemp[j+threadIdx.x*Dim].y;
hash = (tt.y>>(8*loop)) % p_n;
temp = part[hash*Dim + threadIdx.x]++;
pt[temp].x = tt.x;
pt[temp].y = tt.y;
}
}
*/
if(x < t_n){
for(uint i=0; i<PER_TH&&(DEF+threadIdx.x*PER_TH+i)<rows_n; i++){
tt = t[DEF+threadIdx.x*PER_TH+i];
hash = (tt.val>>loop*RADIX) % p_n;
temp = part[hash*Dim + threadIdx.x]++;
pt[temp] = tt;
}
}
}
__global__
void countPartition(
TUPLE *t,
uint *startpos,
int p_num,
int rows_num
)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
if(x<rows_num){
int p = t[x].val%p_num;
atomicAdd(&(startpos[p]),1);
}
if(x==blockIdx.x*blockDim.x-1){
startpos[x+1]=0;
}
}
}
|
73205fc5da80e1792bf3f5437e74a0f0fda7006b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
hipLaunchKernelGGL(( increment_naive), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
//increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
| 73205fc5da80e1792bf3f5437e74a0f0fda7006b.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
//increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
ca1a894ca2d7053c1f8da0ec777ca53ab193ea7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <limits>
#include "../configuration.h"
#include "../soa_no_cell/wator.h"
static const int kNumBlockSize = 256;
static const char kAgentTypeNone = 0;
static const char kAgentTypeFish = 1;
static const char kAgentTypeShark = 2;
using IndexT = int;
struct Cell {
hiprandState_t random_state;
hiprandState_t agent_random_state;
DeviceArray<bool, 5> neighbor_request;
IndexT agent_new_position;
uint32_t agent_egg_counter;
uint32_t agent_energy;
bool agent_active;
char agent_type;
};
__device__ Cell* dev_cells;
__device__ void Cell_prepare(IndexT cell_id) {
for (int i = 0; i < 5; ++i) {
dev_cells[cell_id].neighbor_request[i] = false;
}
}
__device__ IndexT Cell_neighbor(IndexT cell_id, uint8_t nid) {
int x, y;
int self_x = cell_id % kSizeX;
int self_y = cell_id / kSizeX;
if (nid == 0) {
// left
x = self_x == 0 ? kSizeX - 1 : self_x - 1;
y = self_y;
} else if (nid == 1) {
// top
x = self_x;
y = self_y == 0 ? kSizeY - 1 : self_y - 1;
} else if (nid == 2) {
// right
x = self_x == kSizeX - 1 ? 0 : self_x + 1;
y = self_y;
} else if (nid == 3) {
// bottom
x = self_x;
y = self_y == kSizeY - 1 ? 0 : self_y + 1;
} else {
assert(false);
}
return y*kSizeX + x;
}
__device__ void new_Cell(IndexT cell_id) {
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
hiprand_init(kSeed, cell_id, 0, &dev_cells[cell_id].random_state);
Cell_prepare(cell_id);
}
template<bool(*predicate)(IndexT)>
__device__ bool Cell_request_random_neighbor(
IndexT cell_id, hiprandState_t& random_state) {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (predicate(Cell_neighbor(cell_id, i))) {
candidates[num_candidates++] = i;
}
}
if (num_candidates == 0) {
return false;
} else {
uint32_t selected_index = hiprand(&random_state) % num_candidates;
uint8_t selected = candidates[selected_index];
uint8_t neighbor_index = (selected + 2) % 4;
dev_cells[Cell_neighbor(cell_id, selected)].neighbor_request[neighbor_index] = true;
// Check correctness of neighbor calculation.
assert(Cell_neighbor(Cell_neighbor(cell_id, selected), neighbor_index) == cell_id);
return true;
}
}
__device__ void Cell_decide(IndexT cell_id) {
if (dev_cells[cell_id].neighbor_request[4]) {
// This cell has priority.
dev_cells[cell_id].agent_new_position = cell_id;
} else {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (dev_cells[cell_id].neighbor_request[i]) {
candidates[num_candidates++] = i;
}
}
if (num_candidates > 0) {
assert(dev_cells[cell_id].agent_type != kAgentTypeShark);
uint32_t selected_index = hiprand(&dev_cells[cell_id].random_state) % num_candidates;
dev_cells[Cell_neighbor(cell_id, candidates[selected_index])].agent_new_position = cell_id;
}
}
}
__device__ void Cell_enter(IndexT cell_id, IndexT agent) {
assert(dev_cells[cell_id].agent_type == kAgentTypeNone);
assert(dev_cells[agent].agent_type != kAgentTypeNone);
// TODO: Assign agent but do not commit yet!
dev_cells[cell_id].agent_random_state = dev_cells[agent].agent_random_state;
dev_cells[cell_id].agent_type = dev_cells[agent].agent_type;
dev_cells[cell_id].agent_energy = dev_cells[agent].agent_energy;
dev_cells[cell_id].agent_egg_counter = dev_cells[agent].agent_egg_counter;
dev_cells[cell_id].agent_new_position = dev_cells[agent].agent_new_position;
}
__device__ void Cell_kill(IndexT cell_id) {
assert(dev_cells[cell_id].agent_type != kAgentTypeNone);
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
}
__device__ bool Cell_has_fish(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeFish;
}
__device__ bool Cell_has_shark(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeShark;
}
__device__ bool Cell_is_free(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeNone;
}
__device__ void Cell_leave(IndexT cell_id) {
assert(dev_cells[cell_id].agent_type != kAgentTypeNone);
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
}
__device__ void Cell_request_random_fish_neighbor(IndexT cell_id) {
if (!Cell_request_random_neighbor<&Cell_has_fish>(
cell_id, dev_cells[cell_id].agent_random_state)) {
// No fish found. Look for free cell.
if (!Cell_request_random_neighbor<&Cell_is_free>(
cell_id, dev_cells[cell_id].agent_random_state)) {
dev_cells[cell_id].neighbor_request[4] = true;
}
}
}
__device__ void Cell_request_random_free_neighbor(IndexT cell_id) {
if (!Cell_request_random_neighbor<&Cell_is_free>(
cell_id, dev_cells[cell_id].agent_random_state)) {
dev_cells[cell_id].neighbor_request[4] = true;
}
}
__device__ void new_Agent(int cell_id, int seed) {
hiprand_init(seed, 0, 0, &dev_cells[cell_id].agent_random_state);
dev_cells[cell_id].agent_active = false;
}
__device__ void new_Fish(int cell_id, int seed) {
new_Agent(cell_id, seed);
dev_cells[cell_id].agent_type = kAgentTypeFish;
dev_cells[cell_id].agent_egg_counter = seed % kSpawnThreshold;
}
__device__ void Fish_prepare(int cell_id) {
dev_cells[cell_id].agent_egg_counter++;
// Fallback: Stay on current cell.
dev_cells[cell_id].agent_new_position = cell_id;
Cell_request_random_free_neighbor(cell_id);
}
__device__ void Fish_update(int cell_id) {
auto new_pos = dev_cells[cell_id].agent_new_position;
if (cell_id != new_pos) {
Cell_enter(new_pos, cell_id);
Cell_leave(cell_id);
if (kOptionFishSpawn && dev_cells[new_pos].agent_egg_counter > kSpawnThreshold) {
new_Fish(cell_id, hiprand(&dev_cells[new_pos].agent_random_state));
dev_cells[new_pos].agent_egg_counter = 0;
}
}
}
__device__ void new_Shark(int cell_id, int seed) {
new_Agent(cell_id, seed);
dev_cells[cell_id].agent_type = kAgentTypeShark;
dev_cells[cell_id].agent_energy = kEngeryStart;
dev_cells[cell_id].agent_egg_counter = seed % kSpawnThreshold;
}
__device__ void Shark_prepare(int cell_id) {
dev_cells[cell_id].agent_egg_counter++;
dev_cells[cell_id].agent_energy--;
if (kOptionSharkDie && dev_cells[cell_id].agent_energy == 0) {
// Do nothing. Shark will die.
} else {
// Fallback: Stay on current cell.
dev_cells[cell_id].agent_new_position = cell_id;
Cell_request_random_fish_neighbor(cell_id);
}
}
__device__ void Shark_update(int cell_id) {
auto new_pos = dev_cells[cell_id].agent_new_position;
if (kOptionSharkDie && dev_cells[cell_id].agent_energy == 0) {
Cell_kill(cell_id);
} else {
if (cell_id != new_pos) {
if (Cell_has_fish(new_pos)) {
dev_cells[cell_id].agent_energy += kEngeryBoost;
Cell_kill(new_pos);
}
assert(dev_cells[new_pos].agent_type != kAgentTypeFish);
assert(dev_cells[new_pos].agent_type == kAgentTypeNone);
Cell_enter(new_pos, cell_id);
Cell_leave(cell_id);
if (kOptionSharkSpawn && dev_cells[new_pos].agent_egg_counter > kSpawnThreshold) {
new_Shark(cell_id, hiprand(&dev_cells[new_pos].agent_random_state));
dev_cells[new_pos].agent_egg_counter = 0;
}
}
}
}
// ----- KERNELS -----
__device__ int d_checksum;
__device__ void Cell_add_to_checksum(IndexT cell_id) {
if (Cell_has_fish(cell_id)) {
atomicAdd(&d_checksum, 3);
} else if (Cell_has_shark(cell_id)) {
atomicAdd(&d_checksum, 7);
}
}
__global__ void reset_checksum() {
d_checksum = 0;
}
__global__ void create_cells() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
new_Cell(i);
}
}
__global__ void setup_cells() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
// Initialize with random agent.
auto& rand_state = dev_cells[i].random_state;
uint32_t agent_type = hiprand(&rand_state) % 4;
if (agent_type == 0) {
new_Fish(i, hiprand(&rand_state));
} else if (agent_type == 1) {
new_Shark(i, hiprand(&rand_state));
} else {
// Free cell.
}
}
}
__global__ void kernel_Cell_add_to_checksum() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_add_to_checksum(i);
}
}
__global__ void kernel_Cell_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_prepare(i);
}
}
__global__ void kernel_Cell_decide() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_decide(i);
}
}
__global__ void kernel_Agent_set_active() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type != kAgentTypeNone) {
dev_cells[i].agent_active = true;
}
}
}
__global__ void kernel_Fish_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeFish && dev_cells[i].agent_active) {
Fish_prepare(i);
}
}
}
__global__ void kernel_Fish_update() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeFish && dev_cells[i].agent_active) {
Fish_update(i);
}
}
}
__global__ void kernel_Shark_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeShark && dev_cells[i].agent_active) {
Shark_prepare(i);
}
}
}
__global__ void kernel_Shark_update() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeShark && dev_cells[i].agent_active) {
Shark_update(i);
}
}
}
__global__ void print_checksum() {
printf("%i,%u,%u,%u,%u\n",
d_checksum, 0, 0, 0, 0);
}
void print_stats() {
hipLaunchKernelGGL(( reset_checksum), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Cell_add_to_checksum),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());;
hipLaunchKernelGGL(( print_checksum), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void step() {
// --- FISH ---
hipLaunchKernelGGL(( kernel_Cell_prepare),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Fish_prepare),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Cell_decide),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Fish_update),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Agent_set_active),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// --- SHARKS ---
hipLaunchKernelGGL(( kernel_Cell_prepare),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Shark_prepare),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Cell_decide),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Shark_update),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Agent_set_active),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void initialize() {
Cell* h_cells;
hipMalloc(&h_cells, sizeof(Cell)*kSizeX*kSizeY);
hipMemcpyToSymbol(dev_cells, &h_cells,
sizeof(Cell*), 0, hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( create_cells), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( setup_cells), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Agent_set_active),
dim3((kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__device__ uint32_t d_gui_map[kSizeY * kSizeX];
uint32_t gui_map[kSizeY * kSizeX];
__global__ void fill_gui_map() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < kSizeY*kSizeX) {
if (dev_cells[tid].agent_type != kAgentTypeNone) {
d_gui_map[tid] = dev_cells[tid].agent_type;
} else {
d_gui_map[tid] = 0;
}
}
}
void update_gui_map() {
hipLaunchKernelGGL(( fill_gui_map), dim3(kSizeX*kSizeY/1024 + 1), dim3(1024), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*kSizeX*kSizeY,
0, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
int main(int /*argc*/, char*[] /*arvg[]*/) {
initialize();
int total_time = 0;
auto time_before = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
#ifndef NDEBUG
printf("%i\n", i);
#endif // NDEBUG
if (kOptionPrintStats) {
print_stats();
}
step();
}
auto time_after = std::chrono::system_clock::now();
int time_running = std::chrono::duration_cast<std::chrono::milliseconds>(
time_after - time_before).count();
total_time = time_running;
#ifndef NDEBUG
print_stats();
#endif // NDEBUG
printf("%i\n", total_time);
return 0;
}
| ca1a894ca2d7053c1f8da0ec777ca53ab193ea7b.cu | #include <chrono>
#include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <limits>
#include "../configuration.h"
#include "../soa_no_cell/wator.h"
static const int kNumBlockSize = 256;
static const char kAgentTypeNone = 0;
static const char kAgentTypeFish = 1;
static const char kAgentTypeShark = 2;
using IndexT = int;
struct Cell {
curandState_t random_state;
curandState_t agent_random_state;
DeviceArray<bool, 5> neighbor_request;
IndexT agent_new_position;
uint32_t agent_egg_counter;
uint32_t agent_energy;
bool agent_active;
char agent_type;
};
__device__ Cell* dev_cells;
__device__ void Cell_prepare(IndexT cell_id) {
for (int i = 0; i < 5; ++i) {
dev_cells[cell_id].neighbor_request[i] = false;
}
}
__device__ IndexT Cell_neighbor(IndexT cell_id, uint8_t nid) {
int x, y;
int self_x = cell_id % kSizeX;
int self_y = cell_id / kSizeX;
if (nid == 0) {
// left
x = self_x == 0 ? kSizeX - 1 : self_x - 1;
y = self_y;
} else if (nid == 1) {
// top
x = self_x;
y = self_y == 0 ? kSizeY - 1 : self_y - 1;
} else if (nid == 2) {
// right
x = self_x == kSizeX - 1 ? 0 : self_x + 1;
y = self_y;
} else if (nid == 3) {
// bottom
x = self_x;
y = self_y == kSizeY - 1 ? 0 : self_y + 1;
} else {
assert(false);
}
return y*kSizeX + x;
}
__device__ void new_Cell(IndexT cell_id) {
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
curand_init(kSeed, cell_id, 0, &dev_cells[cell_id].random_state);
Cell_prepare(cell_id);
}
template<bool(*predicate)(IndexT)>
__device__ bool Cell_request_random_neighbor(
IndexT cell_id, curandState_t& random_state) {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (predicate(Cell_neighbor(cell_id, i))) {
candidates[num_candidates++] = i;
}
}
if (num_candidates == 0) {
return false;
} else {
uint32_t selected_index = curand(&random_state) % num_candidates;
uint8_t selected = candidates[selected_index];
uint8_t neighbor_index = (selected + 2) % 4;
dev_cells[Cell_neighbor(cell_id, selected)].neighbor_request[neighbor_index] = true;
// Check correctness of neighbor calculation.
assert(Cell_neighbor(Cell_neighbor(cell_id, selected), neighbor_index) == cell_id);
return true;
}
}
__device__ void Cell_decide(IndexT cell_id) {
if (dev_cells[cell_id].neighbor_request[4]) {
// This cell has priority.
dev_cells[cell_id].agent_new_position = cell_id;
} else {
uint8_t candidates[4];
uint8_t num_candidates = 0;
for (int i = 0; i < 4; ++i) {
if (dev_cells[cell_id].neighbor_request[i]) {
candidates[num_candidates++] = i;
}
}
if (num_candidates > 0) {
assert(dev_cells[cell_id].agent_type != kAgentTypeShark);
uint32_t selected_index = curand(&dev_cells[cell_id].random_state) % num_candidates;
dev_cells[Cell_neighbor(cell_id, candidates[selected_index])].agent_new_position = cell_id;
}
}
}
__device__ void Cell_enter(IndexT cell_id, IndexT agent) {
assert(dev_cells[cell_id].agent_type == kAgentTypeNone);
assert(dev_cells[agent].agent_type != kAgentTypeNone);
// TODO: Assign agent but do not commit yet!
dev_cells[cell_id].agent_random_state = dev_cells[agent].agent_random_state;
dev_cells[cell_id].agent_type = dev_cells[agent].agent_type;
dev_cells[cell_id].agent_energy = dev_cells[agent].agent_energy;
dev_cells[cell_id].agent_egg_counter = dev_cells[agent].agent_egg_counter;
dev_cells[cell_id].agent_new_position = dev_cells[agent].agent_new_position;
}
__device__ void Cell_kill(IndexT cell_id) {
assert(dev_cells[cell_id].agent_type != kAgentTypeNone);
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
}
__device__ bool Cell_has_fish(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeFish;
}
__device__ bool Cell_has_shark(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeShark;
}
__device__ bool Cell_is_free(IndexT cell_id) {
return dev_cells[cell_id].agent_type == kAgentTypeNone;
}
__device__ void Cell_leave(IndexT cell_id) {
assert(dev_cells[cell_id].agent_type != kAgentTypeNone);
dev_cells[cell_id].agent_type = kAgentTypeNone;
dev_cells[cell_id].agent_active = false;
}
__device__ void Cell_request_random_fish_neighbor(IndexT cell_id) {
if (!Cell_request_random_neighbor<&Cell_has_fish>(
cell_id, dev_cells[cell_id].agent_random_state)) {
// No fish found. Look for free cell.
if (!Cell_request_random_neighbor<&Cell_is_free>(
cell_id, dev_cells[cell_id].agent_random_state)) {
dev_cells[cell_id].neighbor_request[4] = true;
}
}
}
__device__ void Cell_request_random_free_neighbor(IndexT cell_id) {
if (!Cell_request_random_neighbor<&Cell_is_free>(
cell_id, dev_cells[cell_id].agent_random_state)) {
dev_cells[cell_id].neighbor_request[4] = true;
}
}
__device__ void new_Agent(int cell_id, int seed) {
curand_init(seed, 0, 0, &dev_cells[cell_id].agent_random_state);
dev_cells[cell_id].agent_active = false;
}
__device__ void new_Fish(int cell_id, int seed) {
new_Agent(cell_id, seed);
dev_cells[cell_id].agent_type = kAgentTypeFish;
dev_cells[cell_id].agent_egg_counter = seed % kSpawnThreshold;
}
__device__ void Fish_prepare(int cell_id) {
dev_cells[cell_id].agent_egg_counter++;
// Fallback: Stay on current cell.
dev_cells[cell_id].agent_new_position = cell_id;
Cell_request_random_free_neighbor(cell_id);
}
__device__ void Fish_update(int cell_id) {
auto new_pos = dev_cells[cell_id].agent_new_position;
if (cell_id != new_pos) {
Cell_enter(new_pos, cell_id);
Cell_leave(cell_id);
if (kOptionFishSpawn && dev_cells[new_pos].agent_egg_counter > kSpawnThreshold) {
new_Fish(cell_id, curand(&dev_cells[new_pos].agent_random_state));
dev_cells[new_pos].agent_egg_counter = 0;
}
}
}
__device__ void new_Shark(int cell_id, int seed) {
new_Agent(cell_id, seed);
dev_cells[cell_id].agent_type = kAgentTypeShark;
dev_cells[cell_id].agent_energy = kEngeryStart;
dev_cells[cell_id].agent_egg_counter = seed % kSpawnThreshold;
}
__device__ void Shark_prepare(int cell_id) {
dev_cells[cell_id].agent_egg_counter++;
dev_cells[cell_id].agent_energy--;
if (kOptionSharkDie && dev_cells[cell_id].agent_energy == 0) {
// Do nothing. Shark will die.
} else {
// Fallback: Stay on current cell.
dev_cells[cell_id].agent_new_position = cell_id;
Cell_request_random_fish_neighbor(cell_id);
}
}
__device__ void Shark_update(int cell_id) {
auto new_pos = dev_cells[cell_id].agent_new_position;
if (kOptionSharkDie && dev_cells[cell_id].agent_energy == 0) {
Cell_kill(cell_id);
} else {
if (cell_id != new_pos) {
if (Cell_has_fish(new_pos)) {
dev_cells[cell_id].agent_energy += kEngeryBoost;
Cell_kill(new_pos);
}
assert(dev_cells[new_pos].agent_type != kAgentTypeFish);
assert(dev_cells[new_pos].agent_type == kAgentTypeNone);
Cell_enter(new_pos, cell_id);
Cell_leave(cell_id);
if (kOptionSharkSpawn && dev_cells[new_pos].agent_egg_counter > kSpawnThreshold) {
new_Shark(cell_id, curand(&dev_cells[new_pos].agent_random_state));
dev_cells[new_pos].agent_egg_counter = 0;
}
}
}
}
// ----- KERNELS -----
__device__ int d_checksum;
__device__ void Cell_add_to_checksum(IndexT cell_id) {
if (Cell_has_fish(cell_id)) {
atomicAdd(&d_checksum, 3);
} else if (Cell_has_shark(cell_id)) {
atomicAdd(&d_checksum, 7);
}
}
__global__ void reset_checksum() {
d_checksum = 0;
}
__global__ void create_cells() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
new_Cell(i);
}
}
__global__ void setup_cells() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
// Initialize with random agent.
auto& rand_state = dev_cells[i].random_state;
uint32_t agent_type = curand(&rand_state) % 4;
if (agent_type == 0) {
new_Fish(i, curand(&rand_state));
} else if (agent_type == 1) {
new_Shark(i, curand(&rand_state));
} else {
// Free cell.
}
}
}
__global__ void kernel_Cell_add_to_checksum() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_add_to_checksum(i);
}
}
__global__ void kernel_Cell_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_prepare(i);
}
}
__global__ void kernel_Cell_decide() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
Cell_decide(i);
}
}
__global__ void kernel_Agent_set_active() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type != kAgentTypeNone) {
dev_cells[i].agent_active = true;
}
}
}
__global__ void kernel_Fish_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeFish && dev_cells[i].agent_active) {
Fish_prepare(i);
}
}
}
__global__ void kernel_Fish_update() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeFish && dev_cells[i].agent_active) {
Fish_update(i);
}
}
}
__global__ void kernel_Shark_prepare() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeShark && dev_cells[i].agent_active) {
Shark_prepare(i);
}
}
}
__global__ void kernel_Shark_update() {
for (int i = threadIdx.x + blockDim.x*blockIdx.x;
i < kSizeX*kSizeY; i += blockDim.x * gridDim.x) {
if (dev_cells[i].agent_type == kAgentTypeShark && dev_cells[i].agent_active) {
Shark_update(i);
}
}
}
__global__ void print_checksum() {
printf("%i,%u,%u,%u,%u\n",
d_checksum, 0, 0, 0, 0);
}
void print_stats() {
reset_checksum<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Cell_add_to_checksum<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());;
print_checksum<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void step() {
// --- FISH ---
kernel_Cell_prepare<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Fish_prepare<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Cell_decide<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Fish_update<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Agent_set_active<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
// --- SHARKS ---
kernel_Cell_prepare<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Shark_prepare<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Cell_decide<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Shark_update<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Agent_set_active<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void initialize() {
Cell* h_cells;
cudaMalloc(&h_cells, sizeof(Cell)*kSizeX*kSizeY);
cudaMemcpyToSymbol(dev_cells, &h_cells,
sizeof(Cell*), 0, cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
create_cells<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
setup_cells<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Agent_set_active<<<
(kSizeX*kSizeY + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__device__ uint32_t d_gui_map[kSizeY * kSizeX];
uint32_t gui_map[kSizeY * kSizeX];
__global__ void fill_gui_map() {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < kSizeY*kSizeX) {
if (dev_cells[tid].agent_type != kAgentTypeNone) {
d_gui_map[tid] = dev_cells[tid].agent_type;
} else {
d_gui_map[tid] = 0;
}
}
}
void update_gui_map() {
fill_gui_map<<<kSizeX*kSizeY/1024 + 1, 1024>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(gui_map, d_gui_map, sizeof(uint32_t)*kSizeX*kSizeY,
0, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
int main(int /*argc*/, char*[] /*arvg[]*/) {
initialize();
int total_time = 0;
auto time_before = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
#ifndef NDEBUG
printf("%i\n", i);
#endif // NDEBUG
if (kOptionPrintStats) {
print_stats();
}
step();
}
auto time_after = std::chrono::system_clock::now();
int time_running = std::chrono::duration_cast<std::chrono::milliseconds>(
time_after - time_before).count();
total_time = time_running;
#ifndef NDEBUG
print_stats();
#endif // NDEBUG
printf("%i\n", total_time);
return 0;
}
|
8a11707d95b63e0be3f2efb156862489e3b9759b.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam_cuda;
__constant__ double bc_mach_cuda;
__constant__ double bc_alpha_cuda;
__constant__ double bc_p_cuda;
__constant__ double bc_r_cuda;
__constant__ double bc_u_cuda;
__constant__ double bc_v_cuda;
__constant__ double bc_e_cuda;
__constant__ double ones_cuda[15];
__constant__ double r_cuda[15];
__constant__ double s_cuda[15];
__constant__ double Dr_cuda[225];
__constant__ double Ds_cuda[225];
__constant__ double Drw_cuda[225];
__constant__ double Dsw_cuda[225];
__constant__ int FMASK_cuda[15];
__constant__ double LIFT_cuda[225];
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"gam")) {
cutilSafeCall(hipMemcpyToSymbol(gam_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_mach")) {
cutilSafeCall(hipMemcpyToSymbol(bc_mach_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_alpha")) {
cutilSafeCall(hipMemcpyToSymbol(bc_alpha_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_p")) {
cutilSafeCall(hipMemcpyToSymbol(bc_p_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_r")) {
cutilSafeCall(hipMemcpyToSymbol(bc_r_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_u")) {
cutilSafeCall(hipMemcpyToSymbol(bc_u_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_v")) {
cutilSafeCall(hipMemcpyToSymbol(bc_v_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_e")) {
cutilSafeCall(hipMemcpyToSymbol(bc_e_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ones")) {
cutilSafeCall(hipMemcpyToSymbol(ones_cuda, dat, dim*size));
}
else
if (!strcmp(name,"r")) {
cutilSafeCall(hipMemcpyToSymbol(r_cuda, dat, dim*size));
}
else
if (!strcmp(name,"s")) {
cutilSafeCall(hipMemcpyToSymbol(s_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Dr")) {
cutilSafeCall(hipMemcpyToSymbol(Dr_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Ds")) {
cutilSafeCall(hipMemcpyToSymbol(Ds_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Drw")) {
cutilSafeCall(hipMemcpyToSymbol(Drw_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Dsw")) {
cutilSafeCall(hipMemcpyToSymbol(Dsw_cuda, dat, dim*size));
}
else
if (!strcmp(name,"FMASK")) {
cutilSafeCall(hipMemcpyToSymbol(FMASK_cuda, dat, dim*size));
}
else
if (!strcmp(name,"LIFT")) {
cutilSafeCall(hipMemcpyToSymbol(LIFT_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "init_grid_kernel.cu"
#include "set_ic_kernel.cu"
#include "neighbour_zero_kernel.cu"
#include "calc_dt_kernel.cu"
#include "get_neighbour_q_kernel.cu"
#include "get_bedge_q_kernel.cu"
#include "euler_rhs_kernel.cu"
#include "set_workingQ_kernel.cu"
#include "update_Q_kernel.cu"
| 8a11707d95b63e0be3f2efb156862489e3b9759b.cu | //
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double gam_cuda;
__constant__ double bc_mach_cuda;
__constant__ double bc_alpha_cuda;
__constant__ double bc_p_cuda;
__constant__ double bc_r_cuda;
__constant__ double bc_u_cuda;
__constant__ double bc_v_cuda;
__constant__ double bc_e_cuda;
__constant__ double ones_cuda[15];
__constant__ double r_cuda[15];
__constant__ double s_cuda[15];
__constant__ double Dr_cuda[225];
__constant__ double Ds_cuda[225];
__constant__ double Drw_cuda[225];
__constant__ double Dsw_cuda[225];
__constant__ int FMASK_cuda[15];
__constant__ double LIFT_cuda[225];
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"gam")) {
cutilSafeCall(cudaMemcpyToSymbol(gam_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_mach")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_mach_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_alpha")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_alpha_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_p")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_p_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_r")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_r_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_u")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_u_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_v")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_v_cuda, dat, dim*size));
}
else
if (!strcmp(name,"bc_e")) {
cutilSafeCall(cudaMemcpyToSymbol(bc_e_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ones")) {
cutilSafeCall(cudaMemcpyToSymbol(ones_cuda, dat, dim*size));
}
else
if (!strcmp(name,"r")) {
cutilSafeCall(cudaMemcpyToSymbol(r_cuda, dat, dim*size));
}
else
if (!strcmp(name,"s")) {
cutilSafeCall(cudaMemcpyToSymbol(s_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Dr")) {
cutilSafeCall(cudaMemcpyToSymbol(Dr_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Ds")) {
cutilSafeCall(cudaMemcpyToSymbol(Ds_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Drw")) {
cutilSafeCall(cudaMemcpyToSymbol(Drw_cuda, dat, dim*size));
}
else
if (!strcmp(name,"Dsw")) {
cutilSafeCall(cudaMemcpyToSymbol(Dsw_cuda, dat, dim*size));
}
else
if (!strcmp(name,"FMASK")) {
cutilSafeCall(cudaMemcpyToSymbol(FMASK_cuda, dat, dim*size));
}
else
if (!strcmp(name,"LIFT")) {
cutilSafeCall(cudaMemcpyToSymbol(LIFT_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "init_grid_kernel.cu"
#include "set_ic_kernel.cu"
#include "neighbour_zero_kernel.cu"
#include "calc_dt_kernel.cu"
#include "get_neighbour_q_kernel.cu"
#include "get_bedge_q_kernel.cu"
#include "euler_rhs_kernel.cu"
#include "set_workingQ_kernel.cu"
#include "update_Q_kernel.cu"
|
8f80d4a119790cc6ce920a0740128f31e8db2fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
template <typename scalar_t>
__global__ void norm_cuda_kernel(torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> norm,
int n1, int n2,
int Wx,
int Wy,
int Nx,
int Ny )
{
int startX = threadIdx.x * n1;
int startY = threadIdx.y * n2;
if(startX<Wx && startY<Wy){//valid thread
scalar_t sum=0;
scalar_t num=0;
for(int i=0;i<n1;i++){
for (int j=0;j<n2;j++){
if(startX+i<Wx && startY+j<Wy){
sum+=weight[startX+i][startY+j]*weight[startX+i][startY+j];
num++;
}
}
}
norm[threadIdx.x][threadIdx.y]=sum/num;
}
}
void norm_cuda(
torch::Tensor weights,
torch::Tensor out_norm,
int n1,
int n2)
{
const auto WeightsSizeX = weights.size(0);
const auto WeightsSizeY = weights.size(1);
auto normSizeX=(WeightsSizeX+n1-1)/n1;
auto normSizeY=(WeightsSizeY+n2-1)/n2;
dim3 threadDim(8,8);
dim3 blockDim((normSizeX+7/8),(normSizeY+7/8));
AT_DISPATCH_FLOATING_TYPES(weights.type(), "norm_cuda", ([&] {
hipLaunchKernelGGL(( norm_cuda_kernel<scalar_t>), dim3(blockDim), dim3(threadDim), 0, 0,
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
out_norm.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
n1,
n2,
WeightsSizeX,
WeightsSizeY,
normSizeX,
normSizeY);
}));
} | 8f80d4a119790cc6ce920a0740128f31e8db2fc6.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
template <typename scalar_t>
__global__ void norm_cuda_kernel(torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> norm,
int n1, int n2,
int Wx,
int Wy,
int Nx,
int Ny )
{
int startX = threadIdx.x * n1;
int startY = threadIdx.y * n2;
if(startX<Wx && startY<Wy){//valid thread
scalar_t sum=0;
scalar_t num=0;
for(int i=0;i<n1;i++){
for (int j=0;j<n2;j++){
if(startX+i<Wx && startY+j<Wy){
sum+=weight[startX+i][startY+j]*weight[startX+i][startY+j];
num++;
}
}
}
norm[threadIdx.x][threadIdx.y]=sum/num;
}
}
void norm_cuda(
torch::Tensor weights,
torch::Tensor out_norm,
int n1,
int n2)
{
const auto WeightsSizeX = weights.size(0);
const auto WeightsSizeY = weights.size(1);
auto normSizeX=(WeightsSizeX+n1-1)/n1;
auto normSizeY=(WeightsSizeY+n2-1)/n2;
dim3 threadDim(8,8);
dim3 blockDim((normSizeX+7/8),(normSizeY+7/8));
AT_DISPATCH_FLOATING_TYPES(weights.type(), "norm_cuda", ([&] {
norm_cuda_kernel<scalar_t><<<blockDim, threadDim>>>(
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
out_norm.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
n1,
n2,
WeightsSizeX,
WeightsSizeY,
normSizeX,
normSizeY);
}));
} |
a1170757a3378c8bb5aeb6c2e13d8518fedaa9ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_fp16.h>
extern "C" __global__ void packBuf_halfAdd(
const int N,
const int Nentries,
const int stride,
const int * __restrict__ gatherStarts,
const int * __restrict__ gatherIds,
const int * __restrict__ scatterStarts,
const int * __restrict__ scatterIds,
float * __restrict__ q,
half * __restrict__ qout)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * Nentries) {
const int sid = id % N;
const int k = id / N;
const int startGather = gatherStarts[sid];
const int endGather = gatherStarts[sid + 1];
const int startScatter = scatterStarts[sid];
const int endScatter= scatterStarts[sid + 1];
float gq = 0.0f;
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
gq += q[id+k*stride];
}
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
q[id+k*stride] = gq;
}
for(dlong n=startScatter;n<endScatter;++n){
const dlong id = scatterIds[n];
qout[id*Nentries+k] = __float2half(gq);
}
}
}
extern "C" __global__ void unpackBuf_halfAdd(
const int N,
const int Nentries,
const int stride,
const int * __restrict__ gatherStarts,
const int * __restrict__ gatherIds,
const int * __restrict__ scatterStarts,
const int * __restrict__ scatterIds,
const half * __restrict__ q,
float * __restrict__ qout)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * Nentries) {
const int gid = id % N;
const int k = id / N;
const dlong startGather = gatherStarts[gid];
const dlong endGather = gatherStarts[gid+1];
const dlong startScatter = scatterStarts[gid];
const dlong endScatter = scatterStarts[gid+1];
float gq = 0.0f;
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
gq += __half2float(q[id*Nentries+k]);
}
for(dlong n=startScatter;n<endScatter;++n){
const dlong id = scatterIds[n];
qout[id+k*stride] += gq;
}
}
}
| a1170757a3378c8bb5aeb6c2e13d8518fedaa9ea.cu | #include <cuda_fp16.h>
extern "C" __global__ void packBuf_halfAdd(
const int N,
const int Nentries,
const int stride,
const int * __restrict__ gatherStarts,
const int * __restrict__ gatherIds,
const int * __restrict__ scatterStarts,
const int * __restrict__ scatterIds,
float * __restrict__ q,
half * __restrict__ qout)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * Nentries) {
const int sid = id % N;
const int k = id / N;
const int startGather = gatherStarts[sid];
const int endGather = gatherStarts[sid + 1];
const int startScatter = scatterStarts[sid];
const int endScatter= scatterStarts[sid + 1];
float gq = 0.0f;
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
gq += q[id+k*stride];
}
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
q[id+k*stride] = gq;
}
for(dlong n=startScatter;n<endScatter;++n){
const dlong id = scatterIds[n];
qout[id*Nentries+k] = __float2half(gq);
}
}
}
extern "C" __global__ void unpackBuf_halfAdd(
const int N,
const int Nentries,
const int stride,
const int * __restrict__ gatherStarts,
const int * __restrict__ gatherIds,
const int * __restrict__ scatterStarts,
const int * __restrict__ scatterIds,
const half * __restrict__ q,
float * __restrict__ qout)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < N * Nentries) {
const int gid = id % N;
const int k = id / N;
const dlong startGather = gatherStarts[gid];
const dlong endGather = gatherStarts[gid+1];
const dlong startScatter = scatterStarts[gid];
const dlong endScatter = scatterStarts[gid+1];
float gq = 0.0f;
for(dlong n=startGather;n<endGather;++n){
const dlong id = gatherIds[n];
gq += __half2float(q[id*Nentries+k]);
}
for(dlong n=startScatter;n<endScatter;++n){
const dlong id = scatterIds[n];
qout[id+k*stride] += gq;
}
}
}
|
436f9f74fcd3dbdb43f6a7d0e2ef3becda3f12e9.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
#include "timer.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/advance.h>
#include <cmath>
#include <limits>
#include <thrust/fill.h>
#include "../include/cuda_utils.h"
namespace h2o4gpu {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data; // pointer to data on CPU
hipblasHandle_t handle; // handle for data on GPU
GpuData(const T *orig_data) : orig_data(orig_data) {
hipblasCreate(&handle);
// fprintf(stderr,"HEREstart: %ld\n",handle); fflush(stderr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
// fprintf(stderr,"HEREend: %ld\n",handle); fflush(stderr);
if(handle!=NULL) hipblasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
hipblasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// original MatrixDense where only trainX and no trainY or validX or validY
// Used by elastic_net.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_me=_wDev; // assume thread same as wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense1: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// unlike CPU case, input pointer is always CPU so have to always allocate on GPU when calling this function. So no use of sharedA related to pointer copy like in CPU case.
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: MatrixDense<T>(0, 0, ord, m, n, data){} // assume sharedA=0 and thread=wDev=0 if not given
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, int datatype, char ord, size_t m, size_t n, T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0),_de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_me=_wDev; // assume thread=wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense2: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
if(datatype==1){
// input data pointer is already on GPU on this wDev, so just copy pointer
// no info->orig_data, so send 0 to GpuData
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("MDnew",MDnew,1);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
// source pointer is on this GPU
// just copy GPU pointer
_data = data;
if(!this->_done_alloc){
this->_done_alloc = true;
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
else{
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy as going from CPU to GPU
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense3: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy even if sharedA!=0
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
hipMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
hipMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
hipMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
if(vinfo->orig_data){
hipMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfo->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(vinfoy->orig_data){
hipMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfoy->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(weightinfo->orig_data){
hipMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{// if no weights, set as unity weights
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: MatrixDense<T>(0,wDev,wDev,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and source thread=wDev if not given
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
// datatype=0: CPU pointer to data
// datatype=1: GPU pointer to data
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"%d\n", ord == 'r');
DEBUG_FPRINTF(stderr,"%d\n", ord == 'c');
DEBUG_FPRINTF(stderr,"ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
DEBUG_FPRINTF(stderr,"MatrixDense4: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
if(datatype==1){
// source pointer is on GPU already
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
// Just copy GPU pointer
_data = data;
_datay = datay;
_vdata = vdata;
_vdatay = vdatay;
_weight = weight;
if(_datay) _dopredict=0;
else _dopredict=1;
if(_weight==NULL){
DEBUG_FPRINTF(stderr,"datatype=1: making up unity weights: %d %p\n",m,&_weight);
CUDACHECK(hipMalloc(&_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
if(!this->_done_alloc){
CUDACHECK(hipMalloc(&_de, (m + n) * sizeof(T)));
CUDACHECK(hipDeviceSynchronize());
CUDACHECK(hipGetLastError());
const thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(_de);
const T fill_value=0.0;
thrust::fill_n(dev_ptr, m + n, fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
this->_done_alloc = true;
}
}
else{
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
hipMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
hipMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
hipMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
hipMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
hipMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(weightinfo->orig_data){
hipMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"datatype=0: making up unity weights: %d\n",m);
CUDACHECK(hipMalloc(&_weight, this->_m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + this->_m, fill_value);
}
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: MatrixDense<T>(0,wDev,wDev,datatype,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and thread=wDev if not given
// MatrixDense where input actual A object that contains all CPU information, but need to go from 1 GPU to multiple GPU
// Used by elastic_net_ptr.cpp inside openmp loop for each core
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n, A._mvalid), _sharedA(sharedA), _me(me), _wDev(wDev), _data(0),_de(0), _ord(A._ord) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"MatrixDense5: ord=%c m=%d n=%d mValid=%d\n",A._ord,A._m,A._n,A._mvalid);
PUSH_RANGE("MDnew",MDnew,2);
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info); // cast from void to GpuData
GpuData<T> *infoy_A = reinterpret_cast<GpuData<T>*>(A._infoy); // cast from void to GpuData
GpuData<T> *vinfo_A = reinterpret_cast<GpuData<T>*>(A._vinfo); // cast from void to GpuData
GpuData<T> *vinfoy_A = reinterpret_cast<GpuData<T>*>(A._vinfoy); // cast from void to GpuData
GpuData<T> *weightinfo_A = reinterpret_cast<GpuData<T>*>(A._weightinfo); // cast from void to GpuData
GpuData<T> *info;
GpuData<T> *infoy;
GpuData<T> *vinfo;
GpuData<T> *vinfoy;
GpuData<T> *weightinfo;
if(info_A->orig_data) info = new GpuData<T>(info_A->orig_data); // create new GpuData structure with point to CPU data
else info = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(infoy_A->orig_data) infoy = new GpuData<T>(infoy_A->orig_data); // create new GpuData structure with point to CPU data
else infoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfo_A->orig_data) vinfo = new GpuData<T>(vinfo_A->orig_data); // create new GpuData structure with point to CPU data
else vinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfoy_A->orig_data) vinfoy = new GpuData<T>(vinfoy_A->orig_data); // create new GpuData structure with point to CPU data
else vinfoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(weightinfo_A->orig_data) weightinfo = new GpuData<T>(weightinfo_A->orig_data); // create new GpuData structure with point to CPU data
else weightinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
this->_info = reinterpret_cast<void*>(info); // back to cast as void
this->_infoy = reinterpret_cast<void*>(infoy); // back to cast as void
this->_vinfo = reinterpret_cast<void*>(vinfo); // back to cast as void
this->_vinfoy = reinterpret_cast<void*>(vinfoy); // back to cast as void
this->_weightinfo = reinterpret_cast<void*>(weightinfo); // back to cast as void
POP_RANGE("MDnew",MDnew,2);
if(!this->_done_alloc){
this->_done_alloc = true;
if(A._wDev == _wDev && A._me == _me && (A._sharedA==0 || _sharedA==0)){ // if on same device and same thread, just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",0);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
// Init();
// this->_done_equil=1;
}
else if(A._wDev == _wDev && A._sharedA!=0 && _sharedA!=0){ // if on same device and sharing memory, then just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",1);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
Init();
this->_done_equil=1;
}
else{
DEBUG_FPRINTF(stderr,"ATYPE%d\n",2);
// Copy Matrix to from source GPU to this GPU
PUSH_RANGE("MDcopy",MDcopy,1);
//GpuData<T> *info = reinterpret_cast<GpuData<T>*>(_info); // cast void -> GpuData
double t0 = timer<double>();
if(A._data) hipMalloc(&_data, A._m * A._n * sizeof(T)); // allocate on GPU
if(A._datay) hipMalloc(&_datay, A._m * sizeof(T)); // allocate on GPU
if(A._vdata) hipMalloc(&_vdata, A._mvalid * A._n * sizeof(T)); // allocate on GPU
if(A._vdatay) hipMalloc(&_vdatay, A._mvalid * sizeof(T)); // allocate on GPU
if(A._weight) hipMalloc(&_weight, A._m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
if(A._data) hipMemcpyPeer(_data, _wDev, A._data, A._wDev, A._m * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._datay){
hipMemcpyPeer(_datay, _wDev, A._datay, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
_dopredict=0;
}
else{
_dopredict=1;
}
if(A._vdata) hipMemcpyPeer(_vdata, _wDev, A._vdata, A._wDev, A._mvalid * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._vdatay) hipMemcpyPeer(_vdatay, _wDev, A._vdatay, A._wDev, A._mvalid * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._weight) hipMemcpyPeer(_weight, _wDev, A._weight, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._de) hipMalloc(&_de, (A._m + A._n) * sizeof(T)); hipMemcpyPeer(_de, _wDev, A._de, A._wDev, (A._m + A._n) * sizeof(T));
if(sharedA>0){
Init();
Equil(1);
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDcopy",MDcopy,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int me, int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(0, me, wDev, A){} // then assume not sharing memory
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(wDev, wDev, A){} // then assume thread=wDev for the new matrix (i.e. not input A)
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: MatrixDense<T>(A._wDev, A){} // then assume same device as input A
template <typename T>
MatrixDense<T>::~MatrixDense() {
// return;//TODO: Some deconstructor issue FIXME. Segfaults after adding weights. Can't find issue.
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
if(1){
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
GpuData<T> *infoy = reinterpret_cast<GpuData<T>*>(this->_infoy);
GpuData<T> *vinfo = reinterpret_cast<GpuData<T>*>(this->_vinfo);
GpuData<T> *vinfoy = reinterpret_cast<GpuData<T>*>(this->_vinfoy);
GpuData<T> *weightinfo = reinterpret_cast<GpuData<T>*>(this->_weightinfo);
if(info) delete info; this->_info = 0;
if(infoy) delete infoy; this->_infoy = 0;
if(vinfo) delete vinfo; this->_vinfo = 0;
if(vinfoy) delete vinfoy; this->_vinfoy = 0;
if(weightinfo) delete weightinfo; this->_weightinfo = 0;
}
// fprintf(stderr,"HERE1\n"); fflush(stderr);
if(0){ // Note that this frees these pointers as soon as MatrixDense constructor goes out of scope,
// and might want more fine-grained control over GPU memory if inside (say) high-level python API
// If 0 is used, then need to ensure user calls a finish() or something to free memory. If 0, also
// allows user to call (say) fit() or fitptr() multiple times
if (this->_done_init && _data) {
// fprintf(stderr,"Freeing _data: %p\n",(void*)_data); fflush(stderr);
hipFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE2\n"); fflush(stderr);
if (this->_done_init && _datay) {
// fprintf(stderr,"Freeing _datay: %p\n",(void*)_datay); fflush(stderr);
hipFree(_datay);
this->_datay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE3\n"); fflush(stderr);
if (this->_done_init && _vdata) {
// fprintf(stderr,"Freeing _vdata: %p\n",(void*)_vdata); fflush(stderr);
hipFree(_vdata);
this->_vdata = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE4\n"); fflush(stderr);
if (this->_done_init && _vdatay) {
// fprintf(stderr,"Freeing _vdatay: %p\n",(void*)_vdatay); fflush(stderr);
hipFree(_vdatay);
this->_vdatay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE5\n"); fflush(stderr);
if (this->_done_init && _weight) {
// fprintf(stderr,"Freeing _weight: %p\n",(void*)_weight); fflush(stderr);
hipFree(_weight);
this->_weight = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
// Risky, but looks like we don't use code that may become broken
// fprintf(stderr,"HERE6\n"); fflush(stderr);
if(this->_done_init && _de && !_sharedA){ // JONTODO: When sharedA=1, only free on sourceme thread and sourcewDev device (can store sourcethread for-- sourceme -- data and only free if on source thread)
// fprintf(stderr,"Freeing _de: %p\n",(void*)_weight); fflush(stderr);
hipFree(_de);
this->_de=0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
PUSH_RANGE("MDinit",MDinit,1);
POP_RANGE("MDinit",MDinit,1);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::GetTrainX(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_data){
if(datatype==1){
hipMemcpy(*data, _data, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _data, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetTrainY(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_datay){
if(datatype==1){
hipMemcpy(*data, _datay, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _datay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidX(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_vdata){
if(datatype==1){
hipMemcpy(*data, _vdata, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdata, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidY(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_vdatay){
if(datatype==1){
hipMemcpy(*data, _vdatay, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdatay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetWeight(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_weight){
if(datatype==1){
hipMemcpy(*data, _weight, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _weight, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(hipSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _data is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mulvalid(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(hipSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_mvalid);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _vdata is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
// col-major order (fortran) A, but still print as row major
template <typename T>
void printMatrix(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// row-major order (c) A printed as row major
template <typename T>
void printMatrix2(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[col + row*n];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include svd_example.cpp
* g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver
*
*/
inline cusolverStatus_t cusolverDngesvd ( hipsolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *work, int lwork, float *rwork, int *devInfo){
return(hipsolverDnSgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cusolverStatus_t cusolverDngesvd ( hipsolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *work, int lwork, double *rwork, int *devInfo){
return(hipsolverDnDgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline hipblasStatus_t cublasgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc){
return(hipblasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline hipblasStatus_t cublasgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc){
return(hipblasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline hipblasStatus_t cublasdgmm(hipblasHandle_t handle,
hipblasSideMode_t mode,
int m,
int n,
const float *A,
int lda,
const float *x,
int incx,
float *C,
int ldc){
return(hipblasSdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline hipblasStatus_t cublasdgmm(hipblasHandle_t handle,
hipblasSideMode_t mode,
int m,
int n,
const double *A,
int lda,
const double *x,
int incx,
double *C,
int ldc){
return(hipblasDdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline hipblasStatus_t cublasnrm2(hipblasHandle_t handle,
int n,
const double *x,
int incx,
double *result){
return(hipblasDnrm2(handle,
n,
x,
incx,
result));
}
inline hipblasStatus_t cublasnrm2(hipblasHandle_t handle,
int n,
const float *x,
int incx,
float *result){
return(hipblasSnrm2(handle,
n,
x,
incx,
result));
}
// // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// __global__ void transposeNaive(float *odata, float* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeNaive(double *odata, double* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeCoalesced(float *odata,
// float *idata, int width, int height)
// {
// __shared__ float tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// __global__ void transposeCoalesced(double *odata,
// double *idata, int width, int height)
// {
// __shared__ double tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// in-place transpose for row-major matrix on device of A[m][n]
void cudaintranspose(float *odata, float *idata, int m, int n){
hipError_t cudaStat1 = hipSuccess;
cudaStat1 = hipMemcpy(odata, idata, sizeof(float)*m*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
float const alpha(1.0);
float const beta(0.0);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgeam( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
hipblasDestroy(handle);
}
void cudaintranspose(double *odata, double *idata, int m, int n){
hipError_t cudaStat1 = hipSuccess;
cudaStat1 = hipMemcpy(odata, idata, sizeof(double)*m*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
double const alpha(1.0);
double const beta(0.0);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasDgeam( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
hipblasDestroy(handle);
}
#define MIN(a,b) ((a)<(b) ? (a) : (b))
template <typename T>
int MatrixDense<T>::svd1(void) {
fprintf(stderr,"begin svd inside0\n"); fflush(stderr); fflush(stdout);
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
Init();
fprintf(stderr,"begin svd inside\n"); fflush(stderr); fflush(stdout);
hipsolverDnHandle_t cusolverH = NULL;
hipblasHandle_t cublasH = NULL;
hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
hipError_t cudaStat1 = hipSuccess;
hipError_t cudaStat2 = hipSuccess;
hipError_t cudaStat3 = hipSuccess;
hipError_t cudaStat4 = hipSuccess;
hipError_t cudaStat5 = hipSuccess;
hipError_t cudaStat6 = hipSuccess;
int m = this->_m;
int n = this->_n;
// const int m = this->_m;
// const int n = this->_n;
int lda = m;
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
unsigned char ord='r'; // TODO; should be inputted
// original device vector
T *d_A0;
d_A0 = this->_data;
// device vectors
T *d_A = NULL;
T *d_S = NULL;
T *d_U = NULL;
T *d_VT = NULL;
int *devInfo = NULL;
T *d_work = NULL;
T *d_rwork = NULL;
T *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
const T h_one = 1;
const T h_minus_one = -1;
double t0 = timer<double>();
// step 1: create cusolverDn/cublas handle
cusolver_status = hipsolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
cublas_status = hipblasCreate(&cublasH);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
fprintf(stderr,"HERE1\n"); fflush(stderr); fflush(stdout);
// step 2: copy A to device
// cudaStat1 = hipMalloc ((void**)&d_A , sizeof(T)*lda*n);
// svd destroys d_A, so make copy for testing error // OPTMARK
cudaStat1 = hipMalloc ((void**)&d_A , sizeof(T)*lda*n);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipMemcpy(d_A, d_A0, sizeof(T)*lda*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
int ldu=m; //lda;
int ldureal=n; // actual storage
int ldvt=n;
if(ord=='r'){
// transpose
// execution configuration parameters
//dim3 grid(n/TILE_DIM, lda/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
// transposeCoalesced<<<grid, threads>>>(d_A, d_A0, n, lda);
// transposeNaive<<<grid, threads>>>(d_A, d_A0, n, lda);
cudaintranspose(d_A,d_A0,m,n); // OPTMARK
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
// below debug only for printMatrix2 to view, shouldn't actually swap for use.
if(0){
int temp=m;
m=n;
n=temp;
lda=m;
ldu=m; //lda;
ldureal=n; // actual storage
ldvt=n;
}
}
else{
d_A = d_A0;
}
fprintf(stderr,"HERE PRE\n"); fflush(stderr); fflush(stdout);
// old host side vectors
// T A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
// GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(this->_info); // cast from void to GpuData
// T *A = const_cast<T*>(info_A->orig_data);
#if(0)
T A[lda*n]; // for debug
T U[ldureal*m]; // m-by-m unitary matrix
T VT[ldvt*n]; // n-by-n unitary matrix
T S[MIN(n,m)]; // singular value
#endif
// T S_exact[n] = {7.065283497082729, 1.040081297712078};
fprintf(stderr,"HERE POST\n"); fflush(stderr); fflush(stdout);
// now d_A has column-major order matrix
fprintf(stderr,"HERE2\n"); fflush(stderr); fflush(stdout);
#if(0) // debug
cudaStat1 = hipMemcpy(A, d_A, sizeof(T)*lda*n, hipMemcpyDeviceToHost);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
printf("A = (matlab base-1)\n");
printMatrix(m, n, A, lda, "A");
printf("=====\n");
printf("A = (matlab base-1)\n");
printMatrix2(m, n, A, lda, "A");
printf("=====\n");
#endif
fprintf(stderr,"HERE3\n"); fflush(stderr); fflush(stdout);
cudaStat2 = hipMalloc ((void**)&d_S , sizeof(T)*MIN(n,m));
cudaStat3 = hipMalloc ((void**)&d_U , sizeof(T)*ldureal*m);
cudaStat4 = hipMalloc ((void**)&d_VT , sizeof(T)*ldvt*n);
cudaStat5 = hipMalloc ((void**)&devInfo, sizeof(int));
cudaStat6 = hipMalloc ((void**)&d_W , sizeof(T)*lda*n);
// assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
assert(hipSuccess == cudaStat5);
assert(hipSuccess == cudaStat6);
// host->device
// cudaStat1 = hipMemcpy(d_A, A, sizeof(T)*lda*n, hipMemcpyHostToDevice);
// assert(hipSuccess == cudaStat1);
// step 3: query working space of SVD
//The dense matrices are assumed to be stored in column-major order in memory.
cusolver_status = hipsolverDnDgesvd_bufferSize(
cusolverH,
m,
n,
&lwork );
assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
cudaStat1 = hipMalloc((void**)&d_work , sizeof(T)*lwork);
assert(hipSuccess == cudaStat1);
double t1 = timer<double>();
fprintf(stderr,"SVD init: %g\n",t1-t0); fflush(stderr); fflush(stdout);
// step 4: compute SVD
double t0c = timer<double>();
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = cusolverDngesvd(
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
ldu,
d_VT,
ldvt,
d_work,
lwork,
d_rwork,
devInfo);
cudaStat4 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost);
printf("after gesvd: info_gpu = %d\n", info_gpu); fflush(stdout);
assert(0 == info_gpu);
printf("=====\n"); fflush(stdout);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
fprintf(stderr,"BAD: %d\n",cusolver_status); fflush(stderr);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
double t1c = timer<double>();
fprintf(stderr,"SVD compute: %g\n",t1-t0); fflush(stderr); fflush(stdout);
#if(0)
/////////////////////////
// Copy solution device->host
double t0h = timer<double>();
cudaStat1 = hipMemcpy(U , d_U , sizeof(T)*ldureal*m, hipMemcpyDeviceToHost);
cudaStat2 = hipMemcpy(VT, d_VT, sizeof(T)*ldvt*n, hipMemcpyDeviceToHost);
cudaStat3 = hipMemcpy(S , d_S , sizeof(T)*MIN(n,m), hipMemcpyDeviceToHost);
assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
if(0){ // debug
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, ldureal, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, ldvt, "VT");
printf("=====\n");
/////////////////////////
// measure error of singular value
// T ds_sup = 0;
// for(int j = 0; j < n; j++){
// T err = fabs( S[j] - S_exact[j] );
// ds_sup = (ds_sup > err)? ds_sup : err;
// }
// printf("|S - S_exact| = %E \n", ds_sup);
}
double t1h = timer<double>();
fprintf(stderr,"SVD back to host: %g\n",t1h-t0h); fflush(stderr); fflush(stdout);
#endif
/////////////////////////
// now check
double t0c1 = timer<double>();
// step 5: |A - U*S*VT|
// W = S*VT
cublas_status = cublasdgmm(
cublasH,
HIPBLAS_SIDE_LEFT,
n,
n,
d_VT,
ldvt,
d_S,
1,
d_W,
lda);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
double t1c1 = timer<double>();
fprintf(stderr,"SVD check1: %g\n",t1c1-t0c1); fflush(stderr); fflush(stdout);
// A := -U*W + A
double t0c2 = timer<double>();
cudaStat1 = hipMemcpy(d_A, d_A0, sizeof(T)*lda*n, hipMemcpyDeviceToDevice); // copy because original d_A was destroyed
assert(hipSuccess == cudaStat1);
cublas_status = cublasgemm(
cublasH,
HIPBLAS_OP_N, // U
HIPBLAS_OP_N, // W
m, // number of rows of A
n, // number of columns of A
n, // number of columns of U
&h_minus_one, /* host pointer */
d_U, // U
ldu,
d_W, // W
lda,
&h_one, /* hostpointer */
d_A,
lda);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
double t1c2 = timer<double>();
fprintf(stderr,"SVD check2: %g\n",t1c2-t0c2); fflush(stderr); fflush(stdout);
double t0c3 = timer<double>();
T dR_fro = 0.0;
cublas_status = cublasnrm2(
cublasH, lda*n, d_A, 1, &dR_fro);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
printf("|A - U*S*VT| = %E \n", dR_fro); fflush(stdout);
double t1c3 = timer<double>();
fprintf(stderr,"SVD check3: %g\n",t1c3-t0c3); fflush(stderr); fflush(stdout);
// free resources
double t0f = timer<double>();
//if (d_A ) hipFree(d_A);
if (d_S ) hipFree(d_S);
if (d_U ) hipFree(d_U);
if (d_VT ) hipFree(d_VT);
if (devInfo) hipFree(devInfo);
if (d_work ) hipFree(d_work);
if (d_rwork) hipFree(d_rwork);
if (d_W ) hipFree(d_W);
if (cublasH ) hipblasDestroy(cublasH);
if (cusolverH) hipsolverDnDestroy(cusolverH);
// hipDeviceReset();
double t1f = timer<double>();
fprintf(stderr,"SVD free: %g\n",t1f-t0f); fflush(stderr); fflush(stdout);
fprintf(stderr,"end svd inside\n"); fflush(stderr); fflush(stdout);
return 0;
}
// Equilibration (precondition) matrix using Sinkhorn Knopp method wrapped to allow any norm
// See https://arxiv.org/pdf/1610.03871.pdf for more information
template <typename T>
int MatrixDense<T>::Equil(bool equillocal) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
if (this->_done_equil) return 0;
else this->_done_equil=1;
CUDACHECK(hipSetDevice(_wDev));
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
T *d = _de;
T *e = d + this->_m;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign = NULL;
size_t num_sign_bytes = (num_el + 7) / 8;
hipMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if(equillocal){
// Fill sign bits, assigning each thread a multiple of 8 elements.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Perform Sinkhorn-Knopp equilibration to obtain a doubly stochastic matrix.
SinkhornKnopp(this, d, e, equillocal);
wrapcudaDeviceSynchronize();
if(equillocal){
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
wrapcudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
wrapcudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
wrapcudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
hipFree(sign);
CUDA_CHECK_ERR();
return 0;
}
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
template <typename T>
struct summary_stats_data
{
T n;
T min;
T max;
T mean;
T M2;
T M3;
T M4;
// initialize to the identity element
void initialize()
{
n = mean = M2 = M3 = M4 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return M2 / (n - 1); }
T variance_n() { return M2 / n; }
T skewness() { return std::sqrt(n) * M3 / ::pow(M2, (T) 1.5); }
T kurtosis() { return n * M4 / (M2 * M2); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op
{
__host__ __device__
summary_stats_data<T> operator()(const T& x) const
{
summary_stats_data<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
result.M3 = 0;
result.M4 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two summary_stats_data
// structs and returns a new summary_stats_data which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const summary_stats_data<T>&,
const summary_stats_data<T>&,
summary_stats_data<T> >
{
__host__ __device__
summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const
{
summary_stats_data<T> result;
// precompute some common subexpressions
T n = x.n + y.n;
T n2 = n * n;
T n3 = n2 * n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
T delta3 = delta2 * delta;
T delta4 = delta3 * delta;
//Basic number of samples (n), min, and max
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
result.M3 = x.M3 + y.M3;
result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2;
result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n;
result.M4 = x.M4 + y.M4;
result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3;
result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2;
result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n;
return result;
}
};
template <typename Iterator>
void print_range(const std::string& name, Iterator first, Iterator last)
{
typedef typename std::iterator_traits<Iterator>::value_type T;
std::cout << name << ": ";
thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " "));
std::cout << "\n";
}
template<typename T>
struct absolute_value : public thrust::unary_function<T,T>
{
__host__ __device__ T operator()(const T &x) const
{
return x < T(0) ? -x : x;
}
};
// --- Operator for testing nan values
template<typename T>
struct isnan_test {
__host__ __device__ bool operator()(const T a) const {
return isnan(a) || isinf(a);
}
};
// check properties of input data
template <typename T>
int MatrixDense<T>::Stats(int intercept, T *min, T *max, T *mean, T *var, T *sd, T *skew, T *kurt, T &lambda_max0)
{
CUDACHECK(hipSetDevice(_wDev));
if(_data!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_data);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_data+this->_m*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data matrix (trainX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_datay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_datay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_datay+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data training predictions/labels (trainY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdata!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdata);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdata+this->_mvalid*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data matrix (validX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdatay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdatay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdatay+this->_mvalid);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data training predictions/labels (validY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_weight!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_weight);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_weight+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Weight Training Data has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
// nothing else to do if _datay==NULL
if(_datay==NULL) return(0);
// setup arguments
summary_stats_unary_op<T> unary_op;
summary_stats_binary_op<T> binary_op;
summary_stats_data<T> init;
init.initialize();
int len=0;
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> dataybegin=thrust::device_pointer_cast(_datay);
len=this->_m;
thrust::device_ptr<T> datayend=thrust::device_pointer_cast(_datay+len);
// compute summary statistics
summary_stats_data<T> resulty = thrust::transform_reduce(dataybegin, datayend, unary_op, init, binary_op);
min[0]=resulty.min;
max[0]=resulty.max;
mean[0]=resulty.mean;
var[0]=resulty.variance();
sd[0]=std::sqrt(resulty.variance_n());
skew[0]=resulty.skewness();
kurt[0]=resulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Train*****"<<std::endl;
// print_range("The data", dataybegin, datayend);
std::cout <<"Count : "<< resulty.n << std::endl;
std::cout <<"Minimum : "<< min[0]<<std::endl;
std::cout <<"Maximum : "<< max[0]<<std::endl;
std::cout <<"Mean : "<< mean[0]<< std::endl;
std::cout <<"Variance : "<< var[0]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[0]<< std::endl;
std::cout <<"Skewness : "<< skew[0]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[0]<< std::endl;
#endif
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> vdataybegin=thrust::device_pointer_cast(_vdatay);
len=this->_mvalid;
thrust::device_ptr<T> vdatayend=thrust::device_pointer_cast(_vdatay+len);
// compute summary statistics
summary_stats_data<T> vresulty = thrust::transform_reduce(vdataybegin, vdatayend, unary_op, init, binary_op);
min[1]=vresulty.min;
max[1]=vresulty.max;
mean[1]=vresulty.mean;
var[1]=vresulty.variance();
sd[1]=std::sqrt(vresulty.variance_n());
skew[1]=vresulty.skewness();
kurt[1]=vresulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Valid*****"<<std::endl;
// print_range("The data", vdataybegin, vdatayend);
std::cout <<"Count : "<< vresulty.n << std::endl;
std::cout <<"Minimum : "<< min[1]<<std::endl;
std::cout <<"Maximum : "<< max[1]<<std::endl;
std::cout <<"Mean : "<< mean[1]<< std::endl;
std::cout <<"Variance : "<< var[1]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[1]<< std::endl;
std::cout <<"Skewness : "<< skew[1]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[1]<< std::endl;
#endif
if(1){ // normal usage
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(_datay, this->_m); // b
cml::vector<T> weight_vec;
auto free_weight_vec = false;
if(_weight) weight_vec = cml::vector_view_array(_weight, this->_m); // weight
else{
weight_vec = cml::vector_calloc<T>(this->_m); // weight make up
free_weight_vec = true;
cml::vector_add_constant(&weight_vec, static_cast<T>(1.0)); // make unity weights
}
cml::vector<T> ytemp = cml::vector_calloc<T>(this->_m); // b
cml::vector<T> xtemp = cml::vector_calloc<T>(this->_n); // x
cml::vector_memcpy(&ytemp, &y_vec); // y_vec->ytemp
cml::vector_add_constant(&ytemp, -static_cast<T>(intercept)*mean[0]); // ytemp -> ytemp - intercept*mean[0]
cml::vector_mul(&ytemp,&weight_vec); // ytemp*weight -> ytemp
// Compute A^T . b
if (_ord == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
else{
const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(&xtemp.data[0]);
lambda_max0 = thrust::transform_reduce(thrust::device,
dev_ptr, dev_ptr + this->_n-intercept,
absolute_value<T>(),
static_cast<T>(0.0),
thrust::maximum<T>());
cml::vector_free(&ytemp);
cml::vector_free(&xtemp);
if(free_weight_vec) cml::vector_free(&weight_vec);
}
else{
lambda_max0 = 7000; // test
}
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class MatrixDense<float>;
#endif
// upload data function. Uploads to a single GPU.
// mimics otherwise similar MatrixDense constructor, but has no destruction of uploaded data pointers
template <typename T>
int makePtr_dense(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight, T **_data, T **_datay, T **_vdata, T **_vdatay, T **_weight){
checkwDev(wDev);
CUDACHECK(hipSetDevice(wDev));
DEBUG_FPRINTF(stderr,"makePtr_dense: %d\n",0);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
// Copy Matrix to GPU (unlike CPU case, cannot copy just pointer because always assume input is CPU and output is GPU)
double t0 = timer<double>();
PUSH_RANGE("MDsendsource",MDsendsource,1);
if(data){
CUDACHECK(hipMalloc(_data, m * n * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_data, data, m * n * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_data: %p\n",(void*)*_data); fflush(stderr);
}
else *_data=NULL;
if(datay){
CUDACHECK(hipMalloc(_datay, m * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_datay, datay, m * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_datay: %p\n",(void*)*_datay); fflush(stderr);
}
else *_datay=NULL;
if(vdata){
CUDACHECK(hipMalloc(_vdata, mValid * n * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_vdata, vdata, mValid * n * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdata: %p\n",(void*)*_vdata); fflush(stderr);
}
else *_vdata=NULL;
if(vdatay){
CUDACHECK(hipMalloc(_vdatay, mValid * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_vdatay, vdatay, mValid * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdatay: %p\n",(void*)*_vdatay); fflush(stderr);
}
else *_vdatay=NULL;
// fprintf(stderr,"weight=%p\n",weight); fflush(stderr);
if(weight){
CUDACHECK(hipMalloc(_weight, m * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_weight, weight, m * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"making up unity weights: %d\n",m);
CUDACHECK(hipMalloc(_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(*_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
// fprintf(stderr,"_weight: %p\n",(void*)*_weight); fflush(stderr);
}
POP_RANGE("MDsendsource",MDsendsource,1);
double t2 = timer<double>();
DEBUG_FPRINTF(stdout,"Time to allocate and copy the data matrix on the GPU: %f\n", t2-t0);
hipDeviceSynchronize();
DEBUG_FPRINTF(stderr,"pointer data %p\n",(void*)*_data);
DEBUG_FPRINTF(stderr,"pointer datay %p\n",(void*)*_datay);
DEBUG_FPRINTF(stderr,"pointer vdata %p\n",(void*)*_vdata);
DEBUG_FPRINTF(stderr,"pointer vdaty %p\n",(void*)*_vdatay);
DEBUG_FPRINTF(stderr,"pointer weight %p\n",(void*)*_weight);
return(0);
}
template int makePtr_dense<double>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const double *data, const double *datay, const double *vdata, const double *vdatay, const double *weight,
double **_data, double **_datay, double **_vdata, double **_vdatay, double **_weight);
template int makePtr_dense<float>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const float *data, const float *datay, const float *vdata, const float *vdatay, const float *weight,
float **_data, float **_datay, float **_vdata, float **_vdatay, float **_weight);
template <typename T>
int modelFree1(T *aptr){
if(aptr!=NULL){
// TODO: use T** instead everywhere to prevent a scenario when we keep an address of allocated memory
// TODO: flush cpu cache as it can be invoked by background GC thread
CUDACHECK(hipFree(aptr));
}
return(0);
}
template int modelFree1<float>(float *aptr);
template int modelFree1<double>(double *aptr);
} // namespace h2o4gpu
int modelfree1_double(double *aptr){
return h2o4gpu::modelFree1<double>(aptr);
}
int modelfree1_float(float *aptr){
return h2o4gpu::modelFree1<float>(aptr);
}
int make_ptr_double(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const double* trainX, const double* trainY, const double* validX, const double* validY, const double *weight,
double**a, double**b, double**c, double**d, double **e) {
return h2o4gpu::makePtr_dense<double>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
int make_ptr_float(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const float* trainX, const float* trainY, const float* validX, const float* validY, const float *weight,
float**a, float**b, float**c, float**d, float **e) {
return h2o4gpu::makePtr_dense<float>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
| 436f9f74fcd3dbdb43f6a7d0e2ef3becda3f12e9.cu | /*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
#include "timer.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/advance.h>
#include <cmath>
#include <limits>
#include <thrust/fill.h>
#include "../include/cuda_utils.h"
namespace h2o4gpu {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data; // pointer to data on CPU
cublasHandle_t handle; // handle for data on GPU
GpuData(const T *orig_data) : orig_data(orig_data) {
cublasCreate(&handle);
// fprintf(stderr,"HEREstart: %ld\n",handle); fflush(stderr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
// fprintf(stderr,"HEREend: %ld\n",handle); fflush(stderr);
if(handle!=NULL) cublasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
cublasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? CUBLAS_OP_N : CUBLAS_OP_T;
}
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// original MatrixDense where only trainX and no trainY or validX or validY
// Used by elastic_net.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_me=_wDev; // assume thread same as wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense1: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// unlike CPU case, input pointer is always CPU so have to always allocate on GPU when calling this function. So no use of sharedA related to pointer copy like in CPU case.
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: MatrixDense<T>(0, 0, ord, m, n, data){} // assume sharedA=0 and thread=wDev=0 if not given
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, int datatype, char ord, size_t m, size_t n, T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0),_de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_me=_wDev; // assume thread=wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense2: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
if(datatype==1){
// input data pointer is already on GPU on this wDev, so just copy pointer
// no info->orig_data, so send 0 to GpuData
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("MDnew",MDnew,1);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
// source pointer is on this GPU
// just copy GPU pointer
_data = data;
if(!this->_done_alloc){
this->_done_alloc = true;
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
else{
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy as going from CPU to GPU
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense3: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy even if sharedA!=0
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
if(vinfo->orig_data){
cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfo->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(vinfoy->orig_data){
cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfoy->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(weightinfo->orig_data){
cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{// if no weights, set as unity weights
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: MatrixDense<T>(0,wDev,wDev,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and source thread=wDev if not given
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
// datatype=0: CPU pointer to data
// datatype=1: GPU pointer to data
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"%d\n", ord == 'r');
DEBUG_FPRINTF(stderr,"%d\n", ord == 'c');
DEBUG_FPRINTF(stderr,"ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
DEBUG_FPRINTF(stderr,"MatrixDense4: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
if(datatype==1){
// source pointer is on GPU already
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
// Just copy GPU pointer
_data = data;
_datay = datay;
_vdata = vdata;
_vdatay = vdatay;
_weight = weight;
if(_datay) _dopredict=0;
else _dopredict=1;
if(_weight==NULL){
DEBUG_FPRINTF(stderr,"datatype=1: making up unity weights: %d %p\n",m,&_weight);
CUDACHECK(cudaMalloc(&_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
if(!this->_done_alloc){
CUDACHECK(cudaMalloc(&_de, (m + n) * sizeof(T)));
CUDACHECK(cudaDeviceSynchronize());
CUDACHECK(cudaGetLastError());
const thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(_de);
const T fill_value=0.0;
thrust::fill_n(dev_ptr, m + n, fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
this->_done_alloc = true;
}
}
else{
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(weightinfo->orig_data){
cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"datatype=0: making up unity weights: %d\n",m);
CUDACHECK(cudaMalloc(&_weight, this->_m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + this->_m, fill_value);
}
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: MatrixDense<T>(0,wDev,wDev,datatype,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and thread=wDev if not given
// MatrixDense where input actual A object that contains all CPU information, but need to go from 1 GPU to multiple GPU
// Used by elastic_net_ptr.cpp inside openmp loop for each core
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n, A._mvalid), _sharedA(sharedA), _me(me), _wDev(wDev), _data(0),_de(0), _ord(A._ord) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"MatrixDense5: ord=%c m=%d n=%d mValid=%d\n",A._ord,A._m,A._n,A._mvalid);
PUSH_RANGE("MDnew",MDnew,2);
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info); // cast from void to GpuData
GpuData<T> *infoy_A = reinterpret_cast<GpuData<T>*>(A._infoy); // cast from void to GpuData
GpuData<T> *vinfo_A = reinterpret_cast<GpuData<T>*>(A._vinfo); // cast from void to GpuData
GpuData<T> *vinfoy_A = reinterpret_cast<GpuData<T>*>(A._vinfoy); // cast from void to GpuData
GpuData<T> *weightinfo_A = reinterpret_cast<GpuData<T>*>(A._weightinfo); // cast from void to GpuData
GpuData<T> *info;
GpuData<T> *infoy;
GpuData<T> *vinfo;
GpuData<T> *vinfoy;
GpuData<T> *weightinfo;
if(info_A->orig_data) info = new GpuData<T>(info_A->orig_data); // create new GpuData structure with point to CPU data
else info = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(infoy_A->orig_data) infoy = new GpuData<T>(infoy_A->orig_data); // create new GpuData structure with point to CPU data
else infoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfo_A->orig_data) vinfo = new GpuData<T>(vinfo_A->orig_data); // create new GpuData structure with point to CPU data
else vinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfoy_A->orig_data) vinfoy = new GpuData<T>(vinfoy_A->orig_data); // create new GpuData structure with point to CPU data
else vinfoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(weightinfo_A->orig_data) weightinfo = new GpuData<T>(weightinfo_A->orig_data); // create new GpuData structure with point to CPU data
else weightinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
this->_info = reinterpret_cast<void*>(info); // back to cast as void
this->_infoy = reinterpret_cast<void*>(infoy); // back to cast as void
this->_vinfo = reinterpret_cast<void*>(vinfo); // back to cast as void
this->_vinfoy = reinterpret_cast<void*>(vinfoy); // back to cast as void
this->_weightinfo = reinterpret_cast<void*>(weightinfo); // back to cast as void
POP_RANGE("MDnew",MDnew,2);
if(!this->_done_alloc){
this->_done_alloc = true;
if(A._wDev == _wDev && A._me == _me && (A._sharedA==0 || _sharedA==0)){ // if on same device and same thread, just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",0);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
// Init();
// this->_done_equil=1;
}
else if(A._wDev == _wDev && A._sharedA!=0 && _sharedA!=0){ // if on same device and sharing memory, then just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",1);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
Init();
this->_done_equil=1;
}
else{
DEBUG_FPRINTF(stderr,"ATYPE%d\n",2);
// Copy Matrix to from source GPU to this GPU
PUSH_RANGE("MDcopy",MDcopy,1);
//GpuData<T> *info = reinterpret_cast<GpuData<T>*>(_info); // cast void -> GpuData
double t0 = timer<double>();
if(A._data) cudaMalloc(&_data, A._m * A._n * sizeof(T)); // allocate on GPU
if(A._datay) cudaMalloc(&_datay, A._m * sizeof(T)); // allocate on GPU
if(A._vdata) cudaMalloc(&_vdata, A._mvalid * A._n * sizeof(T)); // allocate on GPU
if(A._vdatay) cudaMalloc(&_vdatay, A._mvalid * sizeof(T)); // allocate on GPU
if(A._weight) cudaMalloc(&_weight, A._m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
if(A._data) cudaMemcpyPeer(_data, _wDev, A._data, A._wDev, A._m * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._datay){
cudaMemcpyPeer(_datay, _wDev, A._datay, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
_dopredict=0;
}
else{
_dopredict=1;
}
if(A._vdata) cudaMemcpyPeer(_vdata, _wDev, A._vdata, A._wDev, A._mvalid * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._vdatay) cudaMemcpyPeer(_vdatay, _wDev, A._vdatay, A._wDev, A._mvalid * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._weight) cudaMemcpyPeer(_weight, _wDev, A._weight, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._de) cudaMalloc(&_de, (A._m + A._n) * sizeof(T)); cudaMemcpyPeer(_de, _wDev, A._de, A._wDev, (A._m + A._n) * sizeof(T));
if(sharedA>0){
Init();
Equil(1);
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDcopy",MDcopy,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int me, int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(0, me, wDev, A){} // then assume not sharing memory
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(wDev, wDev, A){} // then assume thread=wDev for the new matrix (i.e. not input A)
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: MatrixDense<T>(A._wDev, A){} // then assume same device as input A
template <typename T>
MatrixDense<T>::~MatrixDense() {
// return;//TODO: Some deconstructor issue FIXME. Segfaults after adding weights. Can't find issue.
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
if(1){
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
GpuData<T> *infoy = reinterpret_cast<GpuData<T>*>(this->_infoy);
GpuData<T> *vinfo = reinterpret_cast<GpuData<T>*>(this->_vinfo);
GpuData<T> *vinfoy = reinterpret_cast<GpuData<T>*>(this->_vinfoy);
GpuData<T> *weightinfo = reinterpret_cast<GpuData<T>*>(this->_weightinfo);
if(info) delete info; this->_info = 0;
if(infoy) delete infoy; this->_infoy = 0;
if(vinfo) delete vinfo; this->_vinfo = 0;
if(vinfoy) delete vinfoy; this->_vinfoy = 0;
if(weightinfo) delete weightinfo; this->_weightinfo = 0;
}
// fprintf(stderr,"HERE1\n"); fflush(stderr);
if(0){ // Note that this frees these pointers as soon as MatrixDense constructor goes out of scope,
// and might want more fine-grained control over GPU memory if inside (say) high-level python API
// If 0 is used, then need to ensure user calls a finish() or something to free memory. If 0, also
// allows user to call (say) fit() or fitptr() multiple times
if (this->_done_init && _data) {
// fprintf(stderr,"Freeing _data: %p\n",(void*)_data); fflush(stderr);
cudaFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE2\n"); fflush(stderr);
if (this->_done_init && _datay) {
// fprintf(stderr,"Freeing _datay: %p\n",(void*)_datay); fflush(stderr);
cudaFree(_datay);
this->_datay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE3\n"); fflush(stderr);
if (this->_done_init && _vdata) {
// fprintf(stderr,"Freeing _vdata: %p\n",(void*)_vdata); fflush(stderr);
cudaFree(_vdata);
this->_vdata = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE4\n"); fflush(stderr);
if (this->_done_init && _vdatay) {
// fprintf(stderr,"Freeing _vdatay: %p\n",(void*)_vdatay); fflush(stderr);
cudaFree(_vdatay);
this->_vdatay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE5\n"); fflush(stderr);
if (this->_done_init && _weight) {
// fprintf(stderr,"Freeing _weight: %p\n",(void*)_weight); fflush(stderr);
cudaFree(_weight);
this->_weight = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
// Risky, but looks like we don't use code that may become broken
// fprintf(stderr,"HERE6\n"); fflush(stderr);
if(this->_done_init && _de && !_sharedA){ // JONTODO: When sharedA=1, only free on sourceme thread and sourcewDev device (can store sourcethread for-- sourceme -- data and only free if on source thread)
// fprintf(stderr,"Freeing _de: %p\n",(void*)_weight); fflush(stderr);
cudaFree(_de);
this->_de=0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
PUSH_RANGE("MDinit",MDinit,1);
POP_RANGE("MDinit",MDinit,1);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::GetTrainX(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_data){
if(datatype==1){
cudaMemcpy(*data, _data, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _data, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetTrainY(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_datay){
if(datatype==1){
cudaMemcpy(*data, _datay, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _datay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidX(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_vdata){
if(datatype==1){
cudaMemcpy(*data, _vdata, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdata, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidY(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_vdatay){
if(datatype==1){
cudaMemcpy(*data, _vdatay, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdatay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetWeight(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_weight){
if(datatype==1){
cudaMemcpy(*data, _weight, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _weight, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(cudaSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _data is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mulvalid(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(cudaSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_mvalid);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _vdata is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
// col-major order (fortran) A, but still print as row major
template <typename T>
void printMatrix(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// row-major order (c) A printed as row major
template <typename T>
void printMatrix2(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[col + row*n];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include svd_example.cpp
* g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver
*
*/
inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *work, int lwork, float *rwork, int *devInfo){
return(cusolverDnSgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *work, int lwork, double *rwork, int *devInfo){
return(cusolverDnDgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc){
return(cublasSgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc){
return(cublasDgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline cublasStatus_t cublasdgmm(cublasHandle_t handle,
cublasSideMode_t mode,
int m,
int n,
const float *A,
int lda,
const float *x,
int incx,
float *C,
int ldc){
return(cublasSdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline cublasStatus_t cublasdgmm(cublasHandle_t handle,
cublasSideMode_t mode,
int m,
int n,
const double *A,
int lda,
const double *x,
int incx,
double *C,
int ldc){
return(cublasDdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline cublasStatus_t cublasnrm2(cublasHandle_t handle,
int n,
const double *x,
int incx,
double *result){
return(cublasDnrm2_v2(handle,
n,
x,
incx,
result));
}
inline cublasStatus_t cublasnrm2(cublasHandle_t handle,
int n,
const float *x,
int incx,
float *result){
return(cublasSnrm2_v2(handle,
n,
x,
incx,
result));
}
// // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// __global__ void transposeNaive(float *odata, float* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeNaive(double *odata, double* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeCoalesced(float *odata,
// float *idata, int width, int height)
// {
// __shared__ float tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// __global__ void transposeCoalesced(double *odata,
// double *idata, int width, int height)
// {
// __shared__ double tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// in-place transpose for row-major matrix on device of A[m][n]
void cudaintranspose(float *odata, float *idata, int m, int n){
cudaError_t cudaStat1 = cudaSuccess;
cudaStat1 = cudaMemcpy(odata, idata, sizeof(float)*m*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
float const alpha(1.0);
float const beta(0.0);
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
cublasDestroy(handle);
}
void cudaintranspose(double *odata, double *idata, int m, int n){
cudaError_t cudaStat1 = cudaSuccess;
cudaStat1 = cudaMemcpy(odata, idata, sizeof(double)*m*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
double const alpha(1.0);
double const beta(0.0);
cublasHandle_t handle;
cublasCreate(&handle);
cublasDgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
cublasDestroy(handle);
}
#define MIN(a,b) ((a)<(b) ? (a) : (b))
template <typename T>
int MatrixDense<T>::svd1(void) {
fprintf(stderr,"begin svd inside0\n"); fflush(stderr); fflush(stdout);
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
Init();
fprintf(stderr,"begin svd inside\n"); fflush(stderr); fflush(stdout);
cusolverDnHandle_t cusolverH = NULL;
cublasHandle_t cublasH = NULL;
cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
cudaError_t cudaStat1 = cudaSuccess;
cudaError_t cudaStat2 = cudaSuccess;
cudaError_t cudaStat3 = cudaSuccess;
cudaError_t cudaStat4 = cudaSuccess;
cudaError_t cudaStat5 = cudaSuccess;
cudaError_t cudaStat6 = cudaSuccess;
int m = this->_m;
int n = this->_n;
// const int m = this->_m;
// const int n = this->_n;
int lda = m;
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
unsigned char ord='r'; // TODO; should be inputted
// original device vector
T *d_A0;
d_A0 = this->_data;
// device vectors
T *d_A = NULL;
T *d_S = NULL;
T *d_U = NULL;
T *d_VT = NULL;
int *devInfo = NULL;
T *d_work = NULL;
T *d_rwork = NULL;
T *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
const T h_one = 1;
const T h_minus_one = -1;
double t0 = timer<double>();
// step 1: create cusolverDn/cublas handle
cusolver_status = cusolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
cublas_status = cublasCreate(&cublasH);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
fprintf(stderr,"HERE1\n"); fflush(stderr); fflush(stdout);
// step 2: copy A to device
// cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n);
// svd destroys d_A, so make copy for testing error // OPTMARK
cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
int ldu=m; //lda;
int ldureal=n; // actual storage
int ldvt=n;
if(ord=='r'){
// transpose
// execution configuration parameters
//dim3 grid(n/TILE_DIM, lda/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
// transposeCoalesced<<<grid, threads>>>(d_A, d_A0, n, lda);
// transposeNaive<<<grid, threads>>>(d_A, d_A0, n, lda);
cudaintranspose(d_A,d_A0,m,n); // OPTMARK
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
// below debug only for printMatrix2 to view, shouldn't actually swap for use.
if(0){
int temp=m;
m=n;
n=temp;
lda=m;
ldu=m; //lda;
ldureal=n; // actual storage
ldvt=n;
}
}
else{
d_A = d_A0;
}
fprintf(stderr,"HERE PRE\n"); fflush(stderr); fflush(stdout);
// old host side vectors
// T A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
// GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(this->_info); // cast from void to GpuData
// T *A = const_cast<T*>(info_A->orig_data);
#if(0)
T A[lda*n]; // for debug
T U[ldureal*m]; // m-by-m unitary matrix
T VT[ldvt*n]; // n-by-n unitary matrix
T S[MIN(n,m)]; // singular value
#endif
// T S_exact[n] = {7.065283497082729, 1.040081297712078};
fprintf(stderr,"HERE POST\n"); fflush(stderr); fflush(stdout);
// now d_A has column-major order matrix
fprintf(stderr,"HERE2\n"); fflush(stderr); fflush(stdout);
#if(0) // debug
cudaStat1 = cudaMemcpy(A, d_A, sizeof(T)*lda*n, cudaMemcpyDeviceToHost);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
printf("A = (matlab base-1)\n");
printMatrix(m, n, A, lda, "A");
printf("=====\n");
printf("A = (matlab base-1)\n");
printMatrix2(m, n, A, lda, "A");
printf("=====\n");
#endif
fprintf(stderr,"HERE3\n"); fflush(stderr); fflush(stdout);
cudaStat2 = cudaMalloc ((void**)&d_S , sizeof(T)*MIN(n,m));
cudaStat3 = cudaMalloc ((void**)&d_U , sizeof(T)*ldureal*m);
cudaStat4 = cudaMalloc ((void**)&d_VT , sizeof(T)*ldvt*n);
cudaStat5 = cudaMalloc ((void**)&devInfo, sizeof(int));
cudaStat6 = cudaMalloc ((void**)&d_W , sizeof(T)*lda*n);
// assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
assert(cudaSuccess == cudaStat5);
assert(cudaSuccess == cudaStat6);
// host->device
// cudaStat1 = cudaMemcpy(d_A, A, sizeof(T)*lda*n, cudaMemcpyHostToDevice);
// assert(cudaSuccess == cudaStat1);
// step 3: query working space of SVD
//The dense matrices are assumed to be stored in column-major order in memory.
cusolver_status = cusolverDnDgesvd_bufferSize(
cusolverH,
m,
n,
&lwork );
assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
cudaStat1 = cudaMalloc((void**)&d_work , sizeof(T)*lwork);
assert(cudaSuccess == cudaStat1);
double t1 = timer<double>();
fprintf(stderr,"SVD init: %g\n",t1-t0); fflush(stderr); fflush(stdout);
// step 4: compute SVD
double t0c = timer<double>();
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = cusolverDngesvd(
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
ldu,
d_VT,
ldvt,
d_work,
lwork,
d_rwork,
devInfo);
cudaStat4 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
printf("after gesvd: info_gpu = %d\n", info_gpu); fflush(stdout);
assert(0 == info_gpu);
printf("=====\n"); fflush(stdout);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
fprintf(stderr,"BAD: %d\n",cusolver_status); fflush(stderr);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
double t1c = timer<double>();
fprintf(stderr,"SVD compute: %g\n",t1-t0); fflush(stderr); fflush(stdout);
#if(0)
/////////////////////////
// Copy solution device->host
double t0h = timer<double>();
cudaStat1 = cudaMemcpy(U , d_U , sizeof(T)*ldureal*m, cudaMemcpyDeviceToHost);
cudaStat2 = cudaMemcpy(VT, d_VT, sizeof(T)*ldvt*n, cudaMemcpyDeviceToHost);
cudaStat3 = cudaMemcpy(S , d_S , sizeof(T)*MIN(n,m), cudaMemcpyDeviceToHost);
assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
if(0){ // debug
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, ldureal, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, ldvt, "VT");
printf("=====\n");
/////////////////////////
// measure error of singular value
// T ds_sup = 0;
// for(int j = 0; j < n; j++){
// T err = fabs( S[j] - S_exact[j] );
// ds_sup = (ds_sup > err)? ds_sup : err;
// }
// printf("|S - S_exact| = %E \n", ds_sup);
}
double t1h = timer<double>();
fprintf(stderr,"SVD back to host: %g\n",t1h-t0h); fflush(stderr); fflush(stdout);
#endif
/////////////////////////
// now check
double t0c1 = timer<double>();
// step 5: |A - U*S*VT|
// W = S*VT
cublas_status = cublasdgmm(
cublasH,
CUBLAS_SIDE_LEFT,
n,
n,
d_VT,
ldvt,
d_S,
1,
d_W,
lda);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
double t1c1 = timer<double>();
fprintf(stderr,"SVD check1: %g\n",t1c1-t0c1); fflush(stderr); fflush(stdout);
// A := -U*W + A
double t0c2 = timer<double>();
cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice); // copy because original d_A was destroyed
assert(cudaSuccess == cudaStat1);
cublas_status = cublasgemm(
cublasH,
CUBLAS_OP_N, // U
CUBLAS_OP_N, // W
m, // number of rows of A
n, // number of columns of A
n, // number of columns of U
&h_minus_one, /* host pointer */
d_U, // U
ldu,
d_W, // W
lda,
&h_one, /* hostpointer */
d_A,
lda);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
double t1c2 = timer<double>();
fprintf(stderr,"SVD check2: %g\n",t1c2-t0c2); fflush(stderr); fflush(stdout);
double t0c3 = timer<double>();
T dR_fro = 0.0;
cublas_status = cublasnrm2(
cublasH, lda*n, d_A, 1, &dR_fro);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
printf("|A - U*S*VT| = %E \n", dR_fro); fflush(stdout);
double t1c3 = timer<double>();
fprintf(stderr,"SVD check3: %g\n",t1c3-t0c3); fflush(stderr); fflush(stdout);
// free resources
double t0f = timer<double>();
//if (d_A ) cudaFree(d_A);
if (d_S ) cudaFree(d_S);
if (d_U ) cudaFree(d_U);
if (d_VT ) cudaFree(d_VT);
if (devInfo) cudaFree(devInfo);
if (d_work ) cudaFree(d_work);
if (d_rwork) cudaFree(d_rwork);
if (d_W ) cudaFree(d_W);
if (cublasH ) cublasDestroy(cublasH);
if (cusolverH) cusolverDnDestroy(cusolverH);
// cudaDeviceReset();
double t1f = timer<double>();
fprintf(stderr,"SVD free: %g\n",t1f-t0f); fflush(stderr); fflush(stdout);
fprintf(stderr,"end svd inside\n"); fflush(stderr); fflush(stdout);
return 0;
}
// Equilibration (precondition) matrix using Sinkhorn Knopp method wrapped to allow any norm
// See https://arxiv.org/pdf/1610.03871.pdf for more information
template <typename T>
int MatrixDense<T>::Equil(bool equillocal) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
if (this->_done_equil) return 0;
else this->_done_equil=1;
CUDACHECK(cudaSetDevice(_wDev));
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
T *d = _de;
T *e = d + this->_m;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign = NULL;
size_t num_sign_bytes = (num_el + 7) / 8;
cudaMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if(equillocal){
// Fill sign bits, assigning each thread a multiple of 8 elements.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SquareF<T>());
} else {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Perform Sinkhorn-Knopp equilibration to obtain a doubly stochastic matrix.
SinkhornKnopp(this, d, e, equillocal);
wrapcudaDeviceSynchronize();
if(equillocal){
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SqrtF<T>());
} else {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
wrapcudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
wrapcudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
wrapcudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
cudaFree(sign);
CUDA_CHECK_ERR();
return 0;
}
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
template <typename T>
struct summary_stats_data
{
T n;
T min;
T max;
T mean;
T M2;
T M3;
T M4;
// initialize to the identity element
void initialize()
{
n = mean = M2 = M3 = M4 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return M2 / (n - 1); }
T variance_n() { return M2 / n; }
T skewness() { return std::sqrt(n) * M3 / std::pow(M2, (T) 1.5); }
T kurtosis() { return n * M4 / (M2 * M2); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op
{
__host__ __device__
summary_stats_data<T> operator()(const T& x) const
{
summary_stats_data<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
result.M3 = 0;
result.M4 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two summary_stats_data
// structs and returns a new summary_stats_data which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const summary_stats_data<T>&,
const summary_stats_data<T>&,
summary_stats_data<T> >
{
__host__ __device__
summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const
{
summary_stats_data<T> result;
// precompute some common subexpressions
T n = x.n + y.n;
T n2 = n * n;
T n3 = n2 * n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
T delta3 = delta2 * delta;
T delta4 = delta3 * delta;
//Basic number of samples (n), min, and max
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
result.M3 = x.M3 + y.M3;
result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2;
result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n;
result.M4 = x.M4 + y.M4;
result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3;
result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2;
result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n;
return result;
}
};
template <typename Iterator>
void print_range(const std::string& name, Iterator first, Iterator last)
{
typedef typename std::iterator_traits<Iterator>::value_type T;
std::cout << name << ": ";
thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " "));
std::cout << "\n";
}
template<typename T>
struct absolute_value : public thrust::unary_function<T,T>
{
__host__ __device__ T operator()(const T &x) const
{
return x < T(0) ? -x : x;
}
};
// --- Operator for testing nan values
template<typename T>
struct isnan_test {
__host__ __device__ bool operator()(const T a) const {
return isnan(a) || isinf(a);
}
};
// check properties of input data
template <typename T>
int MatrixDense<T>::Stats(int intercept, T *min, T *max, T *mean, T *var, T *sd, T *skew, T *kurt, T &lambda_max0)
{
CUDACHECK(cudaSetDevice(_wDev));
if(_data!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_data);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_data+this->_m*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data matrix (trainX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_datay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_datay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_datay+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data training predictions/labels (trainY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdata!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdata);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdata+this->_mvalid*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data matrix (validX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdatay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdatay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdatay+this->_mvalid);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data training predictions/labels (validY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_weight!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_weight);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_weight+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Weight Training Data has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
// nothing else to do if _datay==NULL
if(_datay==NULL) return(0);
// setup arguments
summary_stats_unary_op<T> unary_op;
summary_stats_binary_op<T> binary_op;
summary_stats_data<T> init;
init.initialize();
int len=0;
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> dataybegin=thrust::device_pointer_cast(_datay);
len=this->_m;
thrust::device_ptr<T> datayend=thrust::device_pointer_cast(_datay+len);
// compute summary statistics
summary_stats_data<T> resulty = thrust::transform_reduce(dataybegin, datayend, unary_op, init, binary_op);
min[0]=resulty.min;
max[0]=resulty.max;
mean[0]=resulty.mean;
var[0]=resulty.variance();
sd[0]=std::sqrt(resulty.variance_n());
skew[0]=resulty.skewness();
kurt[0]=resulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Train*****"<<std::endl;
// print_range("The data", dataybegin, datayend);
std::cout <<"Count : "<< resulty.n << std::endl;
std::cout <<"Minimum : "<< min[0]<<std::endl;
std::cout <<"Maximum : "<< max[0]<<std::endl;
std::cout <<"Mean : "<< mean[0]<< std::endl;
std::cout <<"Variance : "<< var[0]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[0]<< std::endl;
std::cout <<"Skewness : "<< skew[0]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[0]<< std::endl;
#endif
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> vdataybegin=thrust::device_pointer_cast(_vdatay);
len=this->_mvalid;
thrust::device_ptr<T> vdatayend=thrust::device_pointer_cast(_vdatay+len);
// compute summary statistics
summary_stats_data<T> vresulty = thrust::transform_reduce(vdataybegin, vdatayend, unary_op, init, binary_op);
min[1]=vresulty.min;
max[1]=vresulty.max;
mean[1]=vresulty.mean;
var[1]=vresulty.variance();
sd[1]=std::sqrt(vresulty.variance_n());
skew[1]=vresulty.skewness();
kurt[1]=vresulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Valid*****"<<std::endl;
// print_range("The data", vdataybegin, vdatayend);
std::cout <<"Count : "<< vresulty.n << std::endl;
std::cout <<"Minimum : "<< min[1]<<std::endl;
std::cout <<"Maximum : "<< max[1]<<std::endl;
std::cout <<"Mean : "<< mean[1]<< std::endl;
std::cout <<"Variance : "<< var[1]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[1]<< std::endl;
std::cout <<"Skewness : "<< skew[1]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[1]<< std::endl;
#endif
if(1){ // normal usage
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(_datay, this->_m); // b
cml::vector<T> weight_vec;
auto free_weight_vec = false;
if(_weight) weight_vec = cml::vector_view_array(_weight, this->_m); // weight
else{
weight_vec = cml::vector_calloc<T>(this->_m); // weight make up
free_weight_vec = true;
cml::vector_add_constant(&weight_vec, static_cast<T>(1.0)); // make unity weights
}
cml::vector<T> ytemp = cml::vector_calloc<T>(this->_m); // b
cml::vector<T> xtemp = cml::vector_calloc<T>(this->_n); // x
cml::vector_memcpy(&ytemp, &y_vec); // y_vec->ytemp
cml::vector_add_constant(&ytemp, -static_cast<T>(intercept)*mean[0]); // ytemp -> ytemp - intercept*mean[0]
cml::vector_mul(&ytemp,&weight_vec); // ytemp*weight -> ytemp
// Compute A^T . b
if (_ord == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
else{
const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(&xtemp.data[0]);
lambda_max0 = thrust::transform_reduce(thrust::device,
dev_ptr, dev_ptr + this->_n-intercept,
absolute_value<T>(),
static_cast<T>(0.0),
thrust::maximum<T>());
cml::vector_free(&ytemp);
cml::vector_free(&xtemp);
if(free_weight_vec) cml::vector_free(&weight_vec);
}
else{
lambda_max0 = 7000; // test
}
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultCol<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class MatrixDense<float>;
#endif
// upload data function. Uploads to a single GPU.
// mimics otherwise similar MatrixDense constructor, but has no destruction of uploaded data pointers
template <typename T>
int makePtr_dense(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight, T **_data, T **_datay, T **_vdata, T **_vdatay, T **_weight){
checkwDev(wDev);
CUDACHECK(cudaSetDevice(wDev));
DEBUG_FPRINTF(stderr,"makePtr_dense: %d\n",0);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
// Copy Matrix to GPU (unlike CPU case, cannot copy just pointer because always assume input is CPU and output is GPU)
double t0 = timer<double>();
PUSH_RANGE("MDsendsource",MDsendsource,1);
if(data){
CUDACHECK(cudaMalloc(_data, m * n * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_data, data, m * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_data: %p\n",(void*)*_data); fflush(stderr);
}
else *_data=NULL;
if(datay){
CUDACHECK(cudaMalloc(_datay, m * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_datay, datay, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_datay: %p\n",(void*)*_datay); fflush(stderr);
}
else *_datay=NULL;
if(vdata){
CUDACHECK(cudaMalloc(_vdata, mValid * n * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_vdata, vdata, mValid * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdata: %p\n",(void*)*_vdata); fflush(stderr);
}
else *_vdata=NULL;
if(vdatay){
CUDACHECK(cudaMalloc(_vdatay, mValid * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_vdatay, vdatay, mValid * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdatay: %p\n",(void*)*_vdatay); fflush(stderr);
}
else *_vdatay=NULL;
// fprintf(stderr,"weight=%p\n",weight); fflush(stderr);
if(weight){
CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_weight, weight, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"making up unity weights: %d\n",m);
CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(*_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
// fprintf(stderr,"_weight: %p\n",(void*)*_weight); fflush(stderr);
}
POP_RANGE("MDsendsource",MDsendsource,1);
double t2 = timer<double>();
DEBUG_FPRINTF(stdout,"Time to allocate and copy the data matrix on the GPU: %f\n", t2-t0);
cudaDeviceSynchronize();
DEBUG_FPRINTF(stderr,"pointer data %p\n",(void*)*_data);
DEBUG_FPRINTF(stderr,"pointer datay %p\n",(void*)*_datay);
DEBUG_FPRINTF(stderr,"pointer vdata %p\n",(void*)*_vdata);
DEBUG_FPRINTF(stderr,"pointer vdaty %p\n",(void*)*_vdatay);
DEBUG_FPRINTF(stderr,"pointer weight %p\n",(void*)*_weight);
return(0);
}
template int makePtr_dense<double>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const double *data, const double *datay, const double *vdata, const double *vdatay, const double *weight,
double **_data, double **_datay, double **_vdata, double **_vdatay, double **_weight);
template int makePtr_dense<float>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const float *data, const float *datay, const float *vdata, const float *vdatay, const float *weight,
float **_data, float **_datay, float **_vdata, float **_vdatay, float **_weight);
template <typename T>
int modelFree1(T *aptr){
if(aptr!=NULL){
// TODO: use T** instead everywhere to prevent a scenario when we keep an address of allocated memory
// TODO: flush cpu cache as it can be invoked by background GC thread
CUDACHECK(cudaFree(aptr));
}
return(0);
}
template int modelFree1<float>(float *aptr);
template int modelFree1<double>(double *aptr);
} // namespace h2o4gpu
int modelfree1_double(double *aptr){
return h2o4gpu::modelFree1<double>(aptr);
}
int modelfree1_float(float *aptr){
return h2o4gpu::modelFree1<float>(aptr);
}
int make_ptr_double(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const double* trainX, const double* trainY, const double* validX, const double* validY, const double *weight,
double**a, double**b, double**c, double**d, double **e) {
return h2o4gpu::makePtr_dense<double>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
int make_ptr_float(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const float* trainX, const float* trainY, const float* validX, const float* validY, const float *weight,
float**a, float**b, float**c, float**d, float **e) {
return h2o4gpu::makePtr_dense<float>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
|
7fca0bbe17f71eae3a33a9049316f2b33c5d204b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Mark Gates
@generated from zswap.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void cswap_kernel(
int n,
magmaFloatComplex *x, int incx,
magmaFloatComplex *y, int incy )
{
magmaFloatComplex tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx COMPLEX array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy COMPLEX array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@ingroup magma_cblas1
********************************************************************/
extern "C" void
magmablas_cswap_q(
magma_int_t n,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 grid( (n+NB-1) / NB );
dim3 threads( NB );
hipLaunchKernelGGL(( cswap_kernel), dim3(grid), dim3(threads), 0, queue , n, dx, incx, dy, incy );
}
/**
@see magmablas_cswap_q
@ingroup magma_cblas1
********************************************************************/
extern "C" void
magmablas_cswap(
magma_int_t n,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
magmablas_cswap_q( n, dx, incx, dy, incy, magma_stream );
}
| 7fca0bbe17f71eae3a33a9049316f2b33c5d204b.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@author Mark Gates
@generated from zswap.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void cswap_kernel(
int n,
magmaFloatComplex *x, int incx,
magmaFloatComplex *y, int incy )
{
magmaFloatComplex tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx COMPLEX array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy COMPLEX array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@ingroup magma_cblas1
********************************************************************/
extern "C" void
magmablas_cswap_q(
magma_int_t n,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 grid( (n+NB-1) / NB );
dim3 threads( NB );
cswap_kernel<<< grid, threads, 0, queue >>>( n, dx, incx, dy, incy );
}
/**
@see magmablas_cswap_q
@ingroup magma_cblas1
********************************************************************/
extern "C" void
magmablas_cswap(
magma_int_t n,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
magmablas_cswap_q( n, dx, incx, dy, incy, magma_stream );
}
|
3127009893bdb8e2002c99f17c07b0690cd1ee6a.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <arith.hpp>
#include <common/cast.hpp>
#include <common/err_common.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <hipDataType.hpp>
#include <cusparse.hpp>
#include <cusparseModule.hpp>
#include <cusparse_descriptor_helpers.hpp>
#include <handle.hpp>
#include <kernel/sparse.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
#include <stdexcept>
#include <string>
namespace arrayfire {
namespace cuda {
using namespace common;
// hipsparseStatus_t hipsparseZdense2csr(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerRow,
// hipDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t {
typedef hipsparseStatus_t (*dense2csr_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// hipsparseStatus_t hipsparseZdense2csc(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// const int *nnzPerCol,
// hipDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t {
typedef hipsparseStatus_t (*dense2csc_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// hipsparseStatus_t hipsparseZcsr2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t {
typedef hipsparseStatus_t (*csr2dense_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// hipsparseStatus_t hipsparseZcsc2dense(hipsparseHandle_t handle,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// hipDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t {
typedef hipsparseStatus_t (*csc2dense_func_def)(hipsparseHandle_t, int, int,
const hipsparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// hipsparseStatus_t hipsparseZnnz(hipsparseHandle_t handle,
// hipsparseDirection_t dirA,
// int m, int n,
// const hipsparseMatDescr_t descrA,
// const hipDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t {
typedef hipsparseStatus_t (*nnz_func_def)(hipsparseHandle_t,
hipsparseDirection_t, int, int,
const hipsparseMatDescr_t,
const T *, int, int *, int *);
};
// hipsparseStatus_t hipsparseZgthr(hipsparseHandle_t handle,
// int nnz,
// const hipDoubleComplex *y,
// hipDoubleComplex *xVal, const int *xInd,
// hipsparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t {
typedef hipsparseStatus_t (*gthr_func_def)(hipsparseHandle_t, int, const T *,
T *, const int *,
hipsparseIndexBase_t);
};
#define SPARSE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SPARSE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
cusparseModule &_ = getCusparsePlugin(); \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)( \
_.cusparse##PREFIX##FUNC); \
}
/// Newer versions of cusparse use matrix descriptor instead of types encoded in
/// their names
#if CUSPARSE_VERSION < 11300
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble, Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble, Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble, Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble, Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble, Z)
#endif
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble, Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) {
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx =
arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx =
arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) {
const int M = in.dims()[0];
const int N = in.dims()[1];
cusparseModule &_ = getCusparsePlugin();
#if CUSPARSE_VERSION < 11300
// Create Sparse Matrix Descriptor
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(_.hipsparseCreateMatDescr(&descr));
_.hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
_.hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int d = -1;
hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW;
if (stype == AF_STORAGE_CSR) {
d = M;
dir = HIPSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = HIPSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(),
in.strides()[1], nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if (stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M + 1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N + 1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if (stype == AF_STORAGE_CSR) {
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
} else {
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
}
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(_.hipsparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
stype);
#else
auto matA = denMatDescriptor(in);
hipsparseSpMatDescr_t matB;
Array<int> d_offsets = createEmptyArray<int>(0);
if (stype == AF_STORAGE_CSR) {
d_offsets = createEmptyArray<int>(M + 1);
// Create sparse matrix B in CSR format
CUSPARSE_CHECK(
_.hipsparseCreateCsr(&matB, M, N, 0, d_offsets.get(), nullptr,
nullptr, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, getType<T>()));
} else {
d_offsets = createEmptyArray<int>(N + 1);
CUSPARSE_CHECK(
_.hipsparseCreateCsc(&matB, M, N, 0, d_offsets.get(), nullptr,
nullptr, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, getType<T>()));
}
// allocate an external buffer if needed
size_t bufferSize;
CUSPARSE_CHECK(_.hipsparseDenseToSparse_bufferSize(
sparseHandle(), matA, matB, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT,
&bufferSize));
auto dBuffer = memAlloc<char>(bufferSize);
// execute Sparse to Dense conversion
CUSPARSE_CHECK(_.hipsparseDenseToSparse_analysis(
sparseHandle(), matA, matB, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT,
dBuffer.get()));
// get number of non-zero elements
int64_t num_rows_tmp, num_cols_tmp, nnz;
CUSPARSE_CHECK(
_.hipsparseSpMatGetSize(matB, &num_rows_tmp, &num_cols_tmp, &nnz));
auto d_ind = createEmptyArray<int>(nnz);
auto d_values = createEmptyArray<T>(nnz);
// allocate CSR column indices and values
// reset offsets, column indices, and values pointers
if (stype == AF_STORAGE_CSR) {
// Create sparse matrix B in CSR format
// reset offsets, column indices, and values pointers
CUSPARSE_CHECK(_.hipsparseCsrSetPointers(matB, d_offsets.get(),
d_ind.get(), d_values.get()));
} else {
// reset offsets, column indices, and values pointers
CUSPARSE_CHECK(_.cusparseCscSetPointers(matB, d_offsets.get(),
d_ind.get(), d_values.get()));
}
// execute Sparse to Dense conversion
CUSPARSE_CHECK(_.hipsparseDenseToSparse_convert(
sparseHandle(), matA, matB, HIPSPARSE_DENSETOSPARSE_ALG_DEFAULT,
dBuffer.get()));
if (stype == AF_STORAGE_CSR) {
size_t pBufferSizeInBytes = 0;
auto desc = make_handle<hipsparseMatDescr_t>();
CUSPARSE_CHECK(_.hipsparseXcsrsort_bufferSizeExt(
sparseHandle(), M, N, nnz, d_offsets.get(), d_ind.get(),
&pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
Array<int> P = createEmptyArray<int>(nnz);
CUSPARSE_CHECK(
_.hipsparseCreateIdentityPermutation(sparseHandle(), nnz, P.get()));
CUSPARSE_CHECK(_.hipsparseXcsrsort(
sparseHandle(), M, N, nnz, desc, (int *)d_offsets.get(),
(int *)d_ind.get(), P.get(), pBuffer.get()));
d_values = lookup(d_values, P, 0);
return createArrayDataSparseArray<T>(in.dims(), d_values, d_offsets,
d_ind, stype, false);
} else {
return createArrayDataSparseArray<T>(in.dims(), d_values, d_ind,
d_offsets, stype, false);
}
#endif
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) {
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) {
// Create Sparse Matrix Descriptor
cusparseModule &_ = getCusparsePlugin();
#if CUSPARSE_VERSION < 11300
hipsparseMatDescr_t descr = 0;
CUSPARSE_CHECK(_.hipsparseCreateMatDescr(&descr));
_.hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
_.hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if (stype == AF_STORAGE_CSR) {
CUSPARSE_CHECK(
csr2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
} else {
CUSPARSE_CHECK(
csc2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
}
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(_.hipsparseDestroyMatDescr(descr));
#else
unique_handle<hipsparseSpMatDescr_t> inhandle = cusparseDescriptor(in);
Array<T> dense = createEmptyArray<T>(in.dims());
unique_handle<hipsparseDnMatDescr_t> outhandle = denMatDescriptor(dense);
size_t bufferSize = 0;
_.hipsparseSparseToDense_bufferSize(sparseHandle(), inhandle, outhandle,
HIPSPARSE_SPARSETODENSE_ALG_DEFAULT,
&bufferSize);
auto dBuffer = memAlloc<char>(bufferSize);
_.hipsparseSparseToDense(sparseHandle(), inhandle, outhandle,
HIPSPARSE_SPARSETODENSE_ALG_DEFAULT, dBuffer.get());
#endif
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) {
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
cusparseModule &_ = getCusparsePlugin();
if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(
hipMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
hipMemcpyDeviceToDevice, getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(_.hipsparseXcsr2coo(
sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0],
converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(_.hipsparseXcoosort_bufferSizeExt(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
// shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
Array<int> P = createEmptyArray<int>(nNZ);
CUSPARSE_CHECK(
_.hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(_.hipsparseXcoosortByRow(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(), P.get(),
pBuffer.get()));
converted.getValues() = lookup<T, int>(in.getValues(), P, 0);
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
int M = in.dims()[0];
int N = in.dims()[1];
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(
in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(_.hipsparseXcoosort_bufferSizeExt(
sparseHandle(), M, N, nNZ, cooT.getRowIdx().get(),
cooT.getColIdx().get(), &pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
Array<int> P = createEmptyArray<int>(nNZ);
CUSPARSE_CHECK(_.hipsparseCreateIdentityPermutation(sparseHandle(),
nNZ, P.get()));
CUSPARSE_CHECK(_.hipsparseXcoosortByRow(
sparseHandle(), M, N, nNZ, cooT.getRowIdx().get(),
cooT.getColIdx().get(), P.get(), pBuffer.get()));
converted.getValues() = lookup<T, int>(in.getValues(), P, 0);
}
// Copy values and colIdx as is
copyArray<int, int>(converted.getColIdx(), cooT.getColIdx());
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(_.hipsparseXcoo2csr(
sparseHandle(), cooT.getRowIdx().get(), nNZ, M,
converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination",
AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \
const SparseArray<T> &in);
#define INSTANTIATE_COO_SPECIAL(T) \
template<> \
SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \
const Array<T> &in) { \
return sparseConvertDenseToCOO<T>(in); \
} \
template<> \
Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \
const SparseArray<T> &in) { \
return sparseConvertCOOToDense<T>(in); \
}
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \
const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \
const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO)
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
} // namespace cuda
} // namespace arrayfire
| 3127009893bdb8e2002c99f17c07b0690cd1ee6a.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <sparse.hpp>
#include <arith.hpp>
#include <common/cast.hpp>
#include <common/err_common.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <cudaDataType.hpp>
#include <cusparse.hpp>
#include <cusparseModule.hpp>
#include <cusparse_descriptor_helpers.hpp>
#include <handle.hpp>
#include <kernel/sparse.hpp>
#include <lookup.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <where.hpp>
#include <stdexcept>
#include <string>
namespace arrayfire {
namespace cuda {
using namespace common;
// cusparseStatus_t cusparseZdense2csr(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerRow,
// cuDoubleComplex *csrValA,
// int *csrRowPtrA, int *csrColIndA)
template<typename T>
struct dense2csr_func_def_t {
typedef cusparseStatus_t (*dense2csr_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// cusparseStatus_t cusparseZdense2csc(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// const int *nnzPerCol,
// cuDoubleComplex *cscValA,
// int *cscRowIndA, int *cscColPtrA)
template<typename T>
struct dense2csc_func_def_t {
typedef cusparseStatus_t (*dense2csc_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, int, const int *,
T *, int *, int *);
};
// cusparseStatus_t cusparseZcsr2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *csrValA,
// const int *csrRowPtrA,
// const int *csrColIndA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csr2dense_func_def_t {
typedef cusparseStatus_t (*csr2dense_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// cusparseStatus_t cusparseZcsc2dense(cusparseHandle_t handle,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *cscValA,
// const int *cscRowIndA,
// const int *cscColPtrA,
// cuDoubleComplex *A, int lda)
template<typename T>
struct csc2dense_func_def_t {
typedef cusparseStatus_t (*csc2dense_func_def)(cusparseHandle_t, int, int,
const cusparseMatDescr_t,
const T *, const int *,
const int *, T *, int);
};
// cusparseStatus_t cusparseZnnz(cusparseHandle_t handle,
// cusparseDirection_t dirA,
// int m, int n,
// const cusparseMatDescr_t descrA,
// const cuDoubleComplex *A, int lda,
// int *nnzPerRowColumn,
// int *nnzTotalDevHostPtr)
template<typename T>
struct nnz_func_def_t {
typedef cusparseStatus_t (*nnz_func_def)(cusparseHandle_t,
cusparseDirection_t, int, int,
const cusparseMatDescr_t,
const T *, int, int *, int *);
};
// cusparseStatus_t cusparseZgthr(cusparseHandle_t handle,
// int nnz,
// const cuDoubleComplex *y,
// cuDoubleComplex *xVal, const int *xInd,
// cusparseIndexBase_t idxBase)
template<typename T>
struct gthr_func_def_t {
typedef cusparseStatus_t (*gthr_func_def)(cusparseHandle_t, int, const T *,
T *, const int *,
cusparseIndexBase_t);
};
#define SPARSE_FUNC_DEF(FUNC) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func();
#define SPARSE_FUNC(FUNC, TYPE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \
cusparseModule &_ = getCusparsePlugin(); \
return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)( \
_.cusparse##PREFIX##FUNC); \
}
/// Newer versions of cusparse use matrix descriptor instead of types encoded in
/// their names
#if CUSPARSE_VERSION < 11300
SPARSE_FUNC_DEF(dense2csr)
SPARSE_FUNC(dense2csr, float, S)
SPARSE_FUNC(dense2csr, double, D)
SPARSE_FUNC(dense2csr, cfloat, C)
SPARSE_FUNC(dense2csr, cdouble, Z)
SPARSE_FUNC_DEF(dense2csc)
SPARSE_FUNC(dense2csc, float, S)
SPARSE_FUNC(dense2csc, double, D)
SPARSE_FUNC(dense2csc, cfloat, C)
SPARSE_FUNC(dense2csc, cdouble, Z)
SPARSE_FUNC_DEF(csr2dense)
SPARSE_FUNC(csr2dense, float, S)
SPARSE_FUNC(csr2dense, double, D)
SPARSE_FUNC(csr2dense, cfloat, C)
SPARSE_FUNC(csr2dense, cdouble, Z)
SPARSE_FUNC_DEF(csc2dense)
SPARSE_FUNC(csc2dense, float, S)
SPARSE_FUNC(csc2dense, double, D)
SPARSE_FUNC(csc2dense, cfloat, C)
SPARSE_FUNC(csc2dense, cdouble, Z)
SPARSE_FUNC_DEF(gthr)
SPARSE_FUNC(gthr, float, S)
SPARSE_FUNC(gthr, double, D)
SPARSE_FUNC(gthr, cfloat, C)
SPARSE_FUNC(gthr, cdouble, Z)
#endif
SPARSE_FUNC_DEF(nnz)
SPARSE_FUNC(nnz, float, S)
SPARSE_FUNC(nnz, double, D)
SPARSE_FUNC(nnz, cfloat, C)
SPARSE_FUNC(nnz, cdouble, Z)
#undef SPARSE_FUNC
#undef SPARSE_FUNC_DEF
// Partial template specialization of sparseConvertDenseToStorage for COO
// However, template specialization is not allowed
template<typename T>
SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) {
Array<uint> nonZeroIdx_ = where<T>(in);
Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_);
dim_t nNZ = nonZeroIdx.elements();
Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]);
Array<int> rowIdx =
arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<int> colIdx =
arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims());
Array<T> values = copyArray<T>(in);
values.modDims(dim4(values.elements()));
values = lookup<T, int>(values, nonZeroIdx, 0);
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
AF_STORAGE_COO);
}
template<typename T, af_storage stype>
SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) {
const int M = in.dims()[0];
const int N = in.dims()[1];
cusparseModule &_ = getCusparsePlugin();
#if CUSPARSE_VERSION < 11300
// Create Sparse Matrix Descriptor
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(_.cusparseCreateMatDescr(&descr));
_.cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
_.cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int d = -1;
cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW;
if (stype == AF_STORAGE_CSR) {
d = M;
dir = CUSPARSE_DIRECTION_ROW;
} else {
d = N;
dir = CUSPARSE_DIRECTION_COLUMN;
}
Array<int> nnzPerDir = createEmptyArray<int>(dim4(d));
int nNZ = -1;
CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(),
in.strides()[1], nnzPerDir.get(), &nNZ));
Array<int> rowIdx = createEmptyArray<int>(dim4());
Array<int> colIdx = createEmptyArray<int>(dim4());
if (stype == AF_STORAGE_CSR) {
rowIdx = createEmptyArray<int>(dim4(M + 1));
colIdx = createEmptyArray<int>(dim4(nNZ));
} else {
rowIdx = createEmptyArray<int>(dim4(nNZ));
colIdx = createEmptyArray<int>(dim4(N + 1));
}
Array<T> values = createEmptyArray<T>(dim4(nNZ));
if (stype == AF_STORAGE_CSR) {
CUSPARSE_CHECK(dense2csr_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
} else {
CUSPARSE_CHECK(dense2csc_func<T>()(
sparseHandle(), M, N, descr, in.get(), in.strides()[1],
nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get()));
}
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(_.cusparseDestroyMatDescr(descr));
return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx,
stype);
#else
auto matA = denMatDescriptor(in);
cusparseSpMatDescr_t matB;
Array<int> d_offsets = createEmptyArray<int>(0);
if (stype == AF_STORAGE_CSR) {
d_offsets = createEmptyArray<int>(M + 1);
// Create sparse matrix B in CSR format
CUSPARSE_CHECK(
_.cusparseCreateCsr(&matB, M, N, 0, d_offsets.get(), nullptr,
nullptr, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, getType<T>()));
} else {
d_offsets = createEmptyArray<int>(N + 1);
CUSPARSE_CHECK(
_.cusparseCreateCsc(&matB, M, N, 0, d_offsets.get(), nullptr,
nullptr, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, getType<T>()));
}
// allocate an external buffer if needed
size_t bufferSize;
CUSPARSE_CHECK(_.cusparseDenseToSparse_bufferSize(
sparseHandle(), matA, matB, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
&bufferSize));
auto dBuffer = memAlloc<char>(bufferSize);
// execute Sparse to Dense conversion
CUSPARSE_CHECK(_.cusparseDenseToSparse_analysis(
sparseHandle(), matA, matB, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
dBuffer.get()));
// get number of non-zero elements
int64_t num_rows_tmp, num_cols_tmp, nnz;
CUSPARSE_CHECK(
_.cusparseSpMatGetSize(matB, &num_rows_tmp, &num_cols_tmp, &nnz));
auto d_ind = createEmptyArray<int>(nnz);
auto d_values = createEmptyArray<T>(nnz);
// allocate CSR column indices and values
// reset offsets, column indices, and values pointers
if (stype == AF_STORAGE_CSR) {
// Create sparse matrix B in CSR format
// reset offsets, column indices, and values pointers
CUSPARSE_CHECK(_.cusparseCsrSetPointers(matB, d_offsets.get(),
d_ind.get(), d_values.get()));
} else {
// reset offsets, column indices, and values pointers
CUSPARSE_CHECK(_.cusparseCscSetPointers(matB, d_offsets.get(),
d_ind.get(), d_values.get()));
}
// execute Sparse to Dense conversion
CUSPARSE_CHECK(_.cusparseDenseToSparse_convert(
sparseHandle(), matA, matB, CUSPARSE_DENSETOSPARSE_ALG_DEFAULT,
dBuffer.get()));
if (stype == AF_STORAGE_CSR) {
size_t pBufferSizeInBytes = 0;
auto desc = make_handle<cusparseMatDescr_t>();
CUSPARSE_CHECK(_.cusparseXcsrsort_bufferSizeExt(
sparseHandle(), M, N, nnz, d_offsets.get(), d_ind.get(),
&pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
Array<int> P = createEmptyArray<int>(nnz);
CUSPARSE_CHECK(
_.cusparseCreateIdentityPermutation(sparseHandle(), nnz, P.get()));
CUSPARSE_CHECK(_.cusparseXcsrsort(
sparseHandle(), M, N, nnz, desc, (int *)d_offsets.get(),
(int *)d_ind.get(), P.get(), pBuffer.get()));
d_values = lookup(d_values, P, 0);
return createArrayDataSparseArray<T>(in.dims(), d_values, d_offsets,
d_ind, stype, false);
} else {
return createArrayDataSparseArray<T>(in.dims(), d_values, d_ind,
d_offsets, stype, false);
}
#endif
}
// Partial template specialization of sparseConvertStorageToDense for COO
// However, template specialization is not allowed
template<typename T>
Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) {
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
const Array<T> values = in.getValues();
const Array<int> rowIdx = in.getRowIdx();
const Array<int> colIdx = in.getColIdx();
kernel::coo2dense<T>(dense, values, rowIdx, colIdx);
return dense;
}
template<typename T, af_storage stype>
Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) {
// Create Sparse Matrix Descriptor
cusparseModule &_ = getCusparsePlugin();
#if CUSPARSE_VERSION < 11300
cusparseMatDescr_t descr = 0;
CUSPARSE_CHECK(_.cusparseCreateMatDescr(&descr));
_.cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
_.cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
int M = in.dims()[0];
int N = in.dims()[1];
Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0));
int d_strides1 = dense.strides()[1];
if (stype == AF_STORAGE_CSR) {
CUSPARSE_CHECK(
csr2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
} else {
CUSPARSE_CHECK(
csc2dense_func<T>()(sparseHandle(), M, N, descr,
in.getValues().get(), in.getRowIdx().get(),
in.getColIdx().get(), dense.get(), d_strides1));
}
// Destory Sparse Matrix Descriptor
CUSPARSE_CHECK(_.cusparseDestroyMatDescr(descr));
#else
unique_handle<cusparseSpMatDescr_t> inhandle = cusparseDescriptor(in);
Array<T> dense = createEmptyArray<T>(in.dims());
unique_handle<cusparseDnMatDescr_t> outhandle = denMatDescriptor(dense);
size_t bufferSize = 0;
_.cusparseSparseToDense_bufferSize(sparseHandle(), inhandle, outhandle,
CUSPARSE_SPARSETODENSE_ALG_DEFAULT,
&bufferSize);
auto dBuffer = memAlloc<char>(bufferSize);
_.cusparseSparseToDense(sparseHandle(), inhandle, outhandle,
CUSPARSE_SPARSETODENSE_ALG_DEFAULT, dBuffer.get());
#endif
return dense;
}
template<typename T, af_storage dest, af_storage src>
SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) {
using std::shared_ptr;
in.eval();
int nNZ = in.getNNZ();
SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest);
cusparseModule &_ = getCusparsePlugin();
if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) {
// Copy colIdx as is
CUDA_CHECK(
cudaMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(),
in.getColIdx().elements() * sizeof(int),
cudaMemcpyDeviceToDevice, getActiveStream()));
// cusparse function to expand compressed row into coordinate
CUSPARSE_CHECK(_.cusparseXcsr2coo(
sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0],
converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO));
// Call sort
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(_.cusparseXcoosort_bufferSizeExt(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(),
&pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
// shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>);
Array<int> P = createEmptyArray<int>(nNZ);
CUSPARSE_CHECK(
_.cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get()));
CUSPARSE_CHECK(_.cusparseXcoosortByRow(
sparseHandle(), in.dims()[0], in.dims()[1], nNZ,
converted.getRowIdx().get(), converted.getColIdx().get(), P.get(),
pBuffer.get()));
converted.getValues() = lookup<T, int>(in.getValues(), P, 0);
} else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) {
// The cusparse csr sort function is not behaving correctly.
// So the work around is to convert the COO into row major and then
// convert it to CSR
int M = in.dims()[0];
int N = in.dims()[1];
// Deep copy input into temporary COO Row Major
SparseArray<T> cooT = createArrayDataSparseArray<T>(
in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(),
in.getStorage(), true);
// Call sort to convert column major to row major
{
size_t pBufferSizeInBytes = 0;
CUSPARSE_CHECK(_.cusparseXcoosort_bufferSizeExt(
sparseHandle(), M, N, nNZ, cooT.getRowIdx().get(),
cooT.getColIdx().get(), &pBufferSizeInBytes));
auto pBuffer = memAlloc<char>(pBufferSizeInBytes);
Array<int> P = createEmptyArray<int>(nNZ);
CUSPARSE_CHECK(_.cusparseCreateIdentityPermutation(sparseHandle(),
nNZ, P.get()));
CUSPARSE_CHECK(_.cusparseXcoosortByRow(
sparseHandle(), M, N, nNZ, cooT.getRowIdx().get(),
cooT.getColIdx().get(), P.get(), pBuffer.get()));
converted.getValues() = lookup<T, int>(in.getValues(), P, 0);
}
// Copy values and colIdx as is
copyArray<int, int>(converted.getColIdx(), cooT.getColIdx());
// cusparse function to compress row from coordinate
CUSPARSE_CHECK(_.cusparseXcoo2csr(
sparseHandle(), cooT.getRowIdx().get(), nNZ, M,
converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO));
// No need to call CSRSORT
} else {
// Should never come here
AF_ERROR("CUDA Backend invalid conversion combination",
AF_ERR_NOT_SUPPORTED);
}
return converted;
}
#define INSTANTIATE_TO_STORAGE(T, S) \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
template SparseArray<T> \
sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \
const SparseArray<T> &in);
#define INSTANTIATE_COO_SPECIAL(T) \
template<> \
SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \
const Array<T> &in) { \
return sparseConvertDenseToCOO<T>(in); \
} \
template<> \
Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \
const SparseArray<T> &in) { \
return sparseConvertCOOToDense<T>(in); \
}
#define INSTANTIATE_SPARSE(T) \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \
const Array<T> &in); \
template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \
const Array<T> &in); \
\
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \
const SparseArray<T> &in); \
template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \
const SparseArray<T> &in); \
\
INSTANTIATE_COO_SPECIAL(T) \
\
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \
INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO)
INSTANTIATE_SPARSE(float)
INSTANTIATE_SPARSE(double)
INSTANTIATE_SPARSE(cfloat)
INSTANTIATE_SPARSE(cdouble)
#undef INSTANTIATE_TO_STORAGE
#undef INSTANTIATE_COO_SPECIAL
#undef INSTANTIATE_SPARSE
} // namespace cuda
} // namespace arrayfire
|
13604156c2700e28b54e57229db5854de0f07e46.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
/* Time */
#include <sys/time.h>
#include <sys/resource.h>
static struct timeval tv0;
double getMicroSeconds()
{
double t;
gettimeofday(&tv0, (struct timezone*)0);
t = ((tv0.tv_usec) + (tv0.tv_sec)*1000000);
return (t);
}
void init_seed()
{
int seedi=1;
FILE *fd;
/* Generated random values between 0.00 - 1.00 */
fd = fopen("/dev/urandom", "r");
fread( &seedi, sizeof(int), 1, fd);
fclose (fd);
srand( seedi );
}
void init2Drand(float **buffer, int n)
{
int i, j;
for (i=0; i<n; i++)
for(j=0; j<n; j++)
buffer[i][j] = 500.0*(float(rand())/RAND_MAX)-500.0; /* [-500 500]*/
}
float *getmemory1D( int nx )
{
int i;
float *buffer;
if( (buffer=(float *)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
for( i=0; i<nx; i++ )
buffer[i] = 0.0;
return( buffer );
}
float **getmemory2D(int nx, int ny)
{
int i,j;
float **buffer;
if( (buffer=(float **)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
if( (buffer[0]=(float *)malloc(nx*ny*sizeof(float)))==NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
free( buffer );
return( NULL );
}
for( i=1; i<nx; i++ )
{
buffer[i] = buffer[i-1] + ny;
}
for( i=0; i<nx; i++ )
for( j=0; j<ny; j++ )
{
buffer[i][j] = 0.0;
}
return( buffer );
}
/********************************************************************************/
/********************************************************************************/
/*
* Traspose 2D version
*/
void transpose2D(float **in, float **out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j][i] = in[i][j];
}
/*
* Traspose 1D version
*/
void transpose1D(float *in, float *out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j*n+i] = in[i*n+j];
}
/*
* Traspose CUDA version
*/
#define NTHREADS1D 256
#define TILE_DIM 16
__global__ void transpose_device_v2(float *in, float *out, int rows, int cols)
{
//Accesos alineados/desalineados??
int i, j;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < rows && j < cols)
out [ i * cols + j ] = in [ j * rows + i ];
}
int check(float *GPU, float **CPU, int n)
{
int i,j;
for (i=0; i<n; i++){
for(j = 0; j < n; j++){
if(GPU[i * n + j]!=CPU[i][j]) return(1);
}
}
return(0);
}
void print_matrix(float *M, int hM, int wM)
{
int i,j;
for (i=0; i<hM; i++){
// printf("Line %i: ", i);
for (j=0; j<wM; j++)
printf("%4.1f ", M[i*wM+j]);
printf("\n");
}
}
int main(int argc, char **argv)
{
int n;
float **array2D, **array2D_trans;
float *array2D_trans_GPU;
double t0;
float size_block = 16;
if (argc==2)
n = atoi(argv[1]);
else {
n = 4096;
printf("./exec n (by default n=%i)\n", n);
}
/* Initizalization */
init_seed();
array2D = getmemory2D(n,n);
array2D_trans = getmemory2D(n,n);
init2Drand(array2D, n);
/* Transpose 2D version */
t0 = getMicroSeconds();
transpose2D(array2D, array2D_trans, n);
printf("Transpose version 2D: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
/* CUDA version */
float *darray2D, *darray2D_trans;
hipMalloc((void**)&darray2D, n*n*sizeof(float));
hipMemcpy(darray2D, array2D, n*n*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&darray2D_trans, n*n*sizeof(float));
dim3 dimBlock(size_block,size_block);
int blocks = ceil(n/size_block);
dim3 dimGrid(blocks,blocks);
t0 = getMicroSeconds();
hipLaunchKernelGGL(( transpose_device_v2), dim3(dimGrid),dim3(dimBlock), 0, 0, darray2D, darray2D_trans, n, n);
array2D_trans_GPU = getmemory1D(n);
hipMemcpy(array2D_trans_GPU, darray2D_trans, n*n*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("Transpose kernel version: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
printf("Matriz GPU:\n");
print_matrix(array2D_trans_GPU,n,n);
printf("Matriz CPU\n");
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
printf("%4.1f ",array2D_trans[i][j]);
}
printf("\n");
}
if (check(array2D_trans_GPU, array2D_trans, n*n))
printf("Transpose CPU-GPU differs!!\n");
return(1);
}
| 13604156c2700e28b54e57229db5854de0f07e46.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
/* Time */
#include <sys/time.h>
#include <sys/resource.h>
static struct timeval tv0;
double getMicroSeconds()
{
double t;
gettimeofday(&tv0, (struct timezone*)0);
t = ((tv0.tv_usec) + (tv0.tv_sec)*1000000);
return (t);
}
void init_seed()
{
int seedi=1;
FILE *fd;
/* Generated random values between 0.00 - 1.00 */
fd = fopen("/dev/urandom", "r");
fread( &seedi, sizeof(int), 1, fd);
fclose (fd);
srand( seedi );
}
void init2Drand(float **buffer, int n)
{
int i, j;
for (i=0; i<n; i++)
for(j=0; j<n; j++)
buffer[i][j] = 500.0*(float(rand())/RAND_MAX)-500.0; /* [-500 500]*/
}
float *getmemory1D( int nx )
{
int i;
float *buffer;
if( (buffer=(float *)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
for( i=0; i<nx; i++ )
buffer[i] = 0.0;
return( buffer );
}
float **getmemory2D(int nx, int ny)
{
int i,j;
float **buffer;
if( (buffer=(float **)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
if( (buffer[0]=(float *)malloc(nx*ny*sizeof(float)))==NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
free( buffer );
return( NULL );
}
for( i=1; i<nx; i++ )
{
buffer[i] = buffer[i-1] + ny;
}
for( i=0; i<nx; i++ )
for( j=0; j<ny; j++ )
{
buffer[i][j] = 0.0;
}
return( buffer );
}
/********************************************************************************/
/********************************************************************************/
/*
* Traspose 2D version
*/
void transpose2D(float **in, float **out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j][i] = in[i][j];
}
/*
* Traspose 1D version
*/
void transpose1D(float *in, float *out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j*n+i] = in[i*n+j];
}
/*
* Traspose CUDA version
*/
#define NTHREADS1D 256
#define TILE_DIM 16
__global__ void transpose_device_v2(float *in, float *out, int rows, int cols)
{
//Accesos alineados/desalineados??
int i, j;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < rows && j < cols)
out [ i * cols + j ] = in [ j * rows + i ];
}
int check(float *GPU, float **CPU, int n)
{
int i,j;
for (i=0; i<n; i++){
for(j = 0; j < n; j++){
if(GPU[i * n + j]!=CPU[i][j]) return(1);
}
}
return(0);
}
void print_matrix(float *M, int hM, int wM)
{
int i,j;
for (i=0; i<hM; i++){
// printf("Line %i: ", i);
for (j=0; j<wM; j++)
printf("%4.1f ", M[i*wM+j]);
printf("\n");
}
}
int main(int argc, char **argv)
{
int n;
float **array2D, **array2D_trans;
float *array2D_trans_GPU;
double t0;
float size_block = 16;
if (argc==2)
n = atoi(argv[1]);
else {
n = 4096;
printf("./exec n (by default n=%i)\n", n);
}
/* Initizalization */
init_seed();
array2D = getmemory2D(n,n);
array2D_trans = getmemory2D(n,n);
init2Drand(array2D, n);
/* Transpose 2D version */
t0 = getMicroSeconds();
transpose2D(array2D, array2D_trans, n);
printf("Transpose version 2D: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
/* CUDA version */
float *darray2D, *darray2D_trans;
cudaMalloc((void**)&darray2D, n*n*sizeof(float));
cudaMemcpy(darray2D, array2D, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&darray2D_trans, n*n*sizeof(float));
dim3 dimBlock(size_block,size_block);
int blocks = ceil(n/size_block);
dim3 dimGrid(blocks,blocks);
t0 = getMicroSeconds();
transpose_device_v2<<<dimGrid,dimBlock>>>(darray2D, darray2D_trans, n, n);
array2D_trans_GPU = getmemory1D(n);
cudaMemcpy(array2D_trans_GPU, darray2D_trans, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf("Transpose kernel version: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
printf("Matriz GPU:\n");
print_matrix(array2D_trans_GPU,n,n);
printf("Matriz CPU\n");
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
printf("%4.1f ",array2D_trans[i][j]);
}
printf("\n");
}
if (check(array2D_trans_GPU, array2D_trans, n*n))
printf("Transpose CPU-GPU differs!!\n");
return(1);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.